diff --git a/.azure-pipelines/README.md b/.azure-pipelines/README.md index 385e70bac5..9e8ad74104 100644 --- a/.azure-pipelines/README.md +++ b/.azure-pipelines/README.md @@ -1,3 +1,9 @@ + + ## Azure Pipelines Configuration Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information. diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 289108c04d..e9bfa6f8e4 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -1,3 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + trigger: batch: true branches: @@ -24,22 +29,20 @@ schedules: always: true branches: include: - - stable-5 - - stable-4 + - stable-11 + - stable-10 - cron: 0 11 * * 0 displayName: Weekly (old stable branches) always: true branches: include: - - stable-3 + - stable-9 variables: - name: checkoutPath value: ansible_collections/community/general - name: coverageBranches value: main - - name: pipelinesCoverage - value: coverage - name: entryPoint value: tests/utils/shippable/shippable.sh - name: fetchDepth @@ -48,7 +51,7 @@ variables: resources: containers: - container: default - image: quay.io/ansible/azure-pipelines-test-container:1.9.0 + image: quay.io/ansible/azure-pipelines-test-container:7.0.0 pool: Standard @@ -67,41 +70,40 @@ stages: - test: 2 - test: 3 - test: 4 - - test: extra - - stage: Sanity_2_13 - displayName: Sanity 2.13 + - stage: Sanity_2_20 + displayName: Sanity 2.20 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Test {0} - testFormat: 2.13/sanity/{0} + testFormat: 2.20/sanity/{0} targets: - test: 1 - test: 2 - test: 3 - test: 4 - - stage: Sanity_2_12 - displayName: Sanity 2.12 + - stage: Sanity_2_19 + displayName: Sanity 2.19 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Test {0} - testFormat: 2.12/sanity/{0} + testFormat: 2.19/sanity/{0} targets: - test: 1 - test: 2 - test: 3 - test: 4 - - stage: Sanity_2_11 - displayName: Sanity 2.11 + - stage: Sanity_2_18 + displayName: Sanity 2.18 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Test {0} - testFormat: 2.11/sanity/{0} + testFormat: 2.18/sanity/{0} targets: - test: 1 - test: 2 @@ -117,54 +119,68 @@ stages: nameFormat: Python {0} testFormat: devel/units/{0}/1 targets: - - test: 2.7 - - test: 3.5 - - test: 3.6 - - test: 3.7 - - test: 3.8 - test: 3.9 - test: '3.10' - - stage: Units_2_13 - displayName: Units 2.13 + - test: '3.11' + - test: '3.12' + - test: '3.13' + - test: '3.14' + - stage: Units_2_20 + displayName: Units 2.20 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Python {0} - testFormat: 2.13/units/{0}/1 + testFormat: 2.20/units/{0}/1 targets: - - test: 2.7 - - test: 3.6 - - test: 3.8 - test: 3.9 - - stage: Units_2_12 - displayName: Units 2.12 + - test: "3.12" + - test: "3.14" + - stage: Units_2_19 + displayName: Units 2.19 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Python {0} - testFormat: 2.12/units/{0}/1 + testFormat: 2.19/units/{0}/1 targets: - - test: 2.6 - - test: 3.5 - test: 3.8 - - stage: Units_2_11 - displayName: Units 2.11 + - test: "3.11" + - test: "3.13" + - stage: Units_2_18 + displayName: Units 2.18 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Python {0} - testFormat: 2.11/units/{0}/1 + testFormat: 2.18/units/{0}/1 targets: - - test: 2.6 - - test: 2.7 - - test: 3.5 - - test: 3.6 - - test: 3.9 + - test: 3.8 + - test: "3.11" + - test: "3.13" ## Remote + - stage: Remote_devel_extra_vms + displayName: Remote devel extra VMs + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: devel/{0} + targets: + - name: Alpine 3.22 + test: alpine/3.22 + # - name: Fedora 42 + # test: fedora/42 + - name: Ubuntu 22.04 + test: ubuntu/22.04 + - name: Ubuntu 24.04 + test: ubuntu/24.04 + groups: + - vm - stage: Remote_devel displayName: Remote devel dependsOn: [] @@ -173,68 +189,68 @@ stages: parameters: testFormat: devel/{0} targets: - - name: macOS 12.0 - test: macos/12.0 - - name: RHEL 7.9 - test: rhel/7.9 - - name: RHEL 9.0 - test: rhel/9.0 - - name: FreeBSD 12.3 - test: freebsd/12.3 - - name: FreeBSD 13.1 - test: freebsd/13.1 + - name: macOS 15.3 + test: macos/15.3 + - name: RHEL 10.0 + test: rhel/10.0 + - name: RHEL 9.6 + test: rhel/9.6 + - name: FreeBSD 14.3 + test: freebsd/14.3 + - name: FreeBSD 13.5 + test: freebsd/13.5 groups: - 1 - 2 - 3 - - stage: Remote_2_13 - displayName: Remote 2.13 + - stage: Remote_2_20 + displayName: Remote 2.20 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.13/{0} + testFormat: 2.20/{0} targets: - - name: macOS 12.0 - test: macos/12.0 - - name: RHEL 8.5 - test: rhel/8.5 + - name: RHEL 10.0 + test: rhel/10.0 + - name: FreeBSD 14.3 + test: freebsd/14.3 groups: - 1 - 2 - 3 - - stage: Remote_2_12 - displayName: Remote 2.12 + - stage: Remote_2_19 + displayName: Remote 2.19 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.12/{0} + testFormat: 2.19/{0} targets: - - name: macOS 11.1 - test: macos/11.1 - - name: RHEL 8.4 - test: rhel/8.4 - - name: FreeBSD 13.0 - test: freebsd/13.0 + - name: RHEL 9.5 + test: rhel/9.5 + - name: RHEL 10.0 + test: rhel/10.0 + - name: FreeBSD 14.2 + test: freebsd/14.2 groups: - 1 - 2 - 3 - - stage: Remote_2_11 - displayName: Remote 2.11 + - stage: Remote_2_18 + displayName: Remote 2.18 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.11/{0} + testFormat: 2.18/{0} targets: - - name: RHEL 7.9 - test: rhel/7.9 - - name: RHEL 8.3 - test: rhel/8.3 - #- name: FreeBSD 12.2 - # test: freebsd/12.2 + - name: macOS 14.3 + test: macos/14.3 + - name: RHEL 9.4 + test: rhel/9.4 + - name: FreeBSD 14.1 + test: freebsd/14.1 groups: - 1 - 2 @@ -249,74 +265,64 @@ stages: parameters: testFormat: devel/linux/{0} targets: - - name: CentOS 7 - test: centos7 - - name: Fedora 35 - test: fedora35 - - name: Fedora 36 - test: fedora36 - - name: openSUSE 15 - test: opensuse15 - - name: Ubuntu 20.04 - test: ubuntu2004 + - name: Fedora 42 + test: fedora42 + - name: Alpine 3.22 + test: alpine322 - name: Ubuntu 22.04 test: ubuntu2204 - - name: Alpine 3 - test: alpine3 + - name: Ubuntu 24.04 + test: ubuntu2404 groups: - 1 - 2 - 3 - - stage: Docker_2_13 - displayName: Docker 2.13 + - stage: Docker_2_20 + displayName: Docker 2.20 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.13/linux/{0} + testFormat: 2.20/linux/{0} targets: - - name: Fedora 35 - test: fedora35 - - name: openSUSE 15 py2 - test: opensuse15py2 - - name: Alpine 3 - test: alpine3 + - name: Fedora 42 + test: fedora42 + - name: Alpine 3.22 + test: alpine322 groups: - 1 - 2 - 3 - - stage: Docker_2_12 - displayName: Docker 2.12 + - stage: Docker_2_19 + displayName: Docker 2.19 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.12/linux/{0} + testFormat: 2.19/linux/{0} targets: - - name: CentOS 6 - test: centos6 - - name: Fedora 34 - test: fedora34 - - name: Ubuntu 18.04 - test: ubuntu1804 + - name: Fedora 41 + test: fedora41 + - name: Alpine 3.21 + test: alpine321 groups: - 1 - 2 - 3 - - stage: Docker_2_11 - displayName: Docker 2.11 + - stage: Docker_2_18 + displayName: Docker 2.18 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.11/linux/{0} + testFormat: 2.18/linux/{0} targets: - - name: Fedora 32 - test: fedora32 - - name: Fedora 33 - test: fedora33 - - name: Alpine 3 - test: alpine3 + - name: Fedora 40 + test: fedora40 + - name: Alpine 3.20 + test: alpine320 + - name: Ubuntu 24.04 + test: ubuntu2404 groups: - 1 - 2 @@ -331,84 +337,92 @@ stages: parameters: testFormat: devel/linux-community/{0} targets: - - name: Debian Bullseye + - name: Debian 11 Bullseye test: debian-bullseye/3.9 + - name: Debian 12 Bookworm + test: debian-bookworm/3.11 + - name: Debian 13 Trixie + test: debian-13-trixie/3.13 - name: ArchLinux - test: archlinux/3.10 - - name: CentOS Stream 8 - test: centos-stream8/3.8 + test: archlinux/3.13 groups: - 1 - 2 - 3 -### Cloud - - stage: Cloud_devel - displayName: Cloud devel - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: devel/cloud/{0}/1 - targets: - - test: 2.7 - - test: '3.10' - - stage: Cloud_2_13 - displayName: Cloud 2.13 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.13/cloud/{0}/1 - targets: - - test: 3.9 - - stage: Cloud_2_12 - displayName: Cloud 2.12 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.12/cloud/{0}/1 - targets: - - test: 3.8 - - stage: Cloud_2_11 - displayName: Cloud 2.11 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.11/cloud/{0}/1 - targets: - - test: 2.7 - - test: 3.5 +### Generic +# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. +# - stage: Generic_devel +# displayName: Generic devel +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: devel/generic/{0}/1 +# targets: +# - test: '3.9' +# - test: '3.12' +# - test: '3.14' +# - stage: Generic_2_20 +# displayName: Generic 2.20 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.20/generic/{0}/1 +# targets: +# - test: '3.10' +# - test: '3.14' +# - stage: Generic_2_19 +# displayName: Generic 2.19 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.19/generic/{0}/1 +# targets: +# - test: '3.9' +# - test: '3.13' +# - stage: Generic_2_18 +# displayName: Generic 2.18 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.18/generic/{0}/1 +# targets: +# - test: '3.8' +# - test: '3.13' - stage: Summary condition: succeededOrFailed() dependsOn: - Sanity_devel - - Sanity_2_11 - - Sanity_2_12 - - Sanity_2_13 + - Sanity_2_20 + - Sanity_2_19 + - Sanity_2_18 - Units_devel - - Units_2_11 - - Units_2_12 - - Units_2_13 + - Units_2_20 + - Units_2_19 + - Units_2_18 + - Remote_devel_extra_vms - Remote_devel - - Remote_2_11 - - Remote_2_12 - - Remote_2_13 + - Remote_2_20 + - Remote_2_19 + - Remote_2_18 - Docker_devel - - Docker_2_11 - - Docker_2_12 - - Docker_2_13 + - Docker_2_20 + - Docker_2_19 + - Docker_2_18 - Docker_community_devel - - Cloud_devel - - Cloud_2_11 - - Cloud_2_12 - - Cloud_2_13 +# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. +# - Generic_devel +# - Generic_2_20 +# - Generic_2_19 +# - Generic_2_18 jobs: - template: templates/coverage.yml diff --git a/.azure-pipelines/scripts/aggregate-coverage.sh b/.azure-pipelines/scripts/aggregate-coverage.sh index 1ccfcf2073..ca2b19de97 100755 --- a/.azure-pipelines/scripts/aggregate-coverage.sh +++ b/.azure-pipelines/scripts/aggregate-coverage.sh @@ -1,4 +1,8 @@ #!/usr/bin/env bash +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # Aggregate code coverage results for later processing. set -o pipefail -eu diff --git a/.azure-pipelines/scripts/combine-coverage.py b/.azure-pipelines/scripts/combine-coverage.py index 506ade6460..3b2fd993db 100755 --- a/.azure-pipelines/scripts/combine-coverage.py +++ b/.azure-pipelines/scripts/combine-coverage.py @@ -1,4 +1,8 @@ #!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + """ Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job. Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}" diff --git a/.azure-pipelines/scripts/process-results.sh b/.azure-pipelines/scripts/process-results.sh index f3f1d1bae8..1f4b8e4f10 100755 --- a/.azure-pipelines/scripts/process-results.sh +++ b/.azure-pipelines/scripts/process-results.sh @@ -1,4 +1,8 @@ #!/usr/bin/env bash +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # Check the test results and set variables for use in later steps. set -o pipefail -eu diff --git a/.azure-pipelines/scripts/publish-codecov.py b/.azure-pipelines/scripts/publish-codecov.py index ab947f9810..58e32f6d37 100755 --- a/.azure-pipelines/scripts/publish-codecov.py +++ b/.azure-pipelines/scripts/publish-codecov.py @@ -1,4 +1,8 @@ #!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + """ Upload code coverage reports to codecov.io. Multiple coverage files from multiple languages are accepted and aggregated after upload. diff --git a/.azure-pipelines/scripts/report-coverage.sh b/.azure-pipelines/scripts/report-coverage.sh index c039f7dcbd..c08154b6f8 100755 --- a/.azure-pipelines/scripts/report-coverage.sh +++ b/.azure-pipelines/scripts/report-coverage.sh @@ -1,4 +1,8 @@ #!/usr/bin/env bash +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # Generate code coverage reports for uploading to Azure Pipelines and codecov.io. set -o pipefail -eu diff --git a/.azure-pipelines/scripts/run-tests.sh b/.azure-pipelines/scripts/run-tests.sh index a947fdf013..2cfdcf61ef 100755 --- a/.azure-pipelines/scripts/run-tests.sh +++ b/.azure-pipelines/scripts/run-tests.sh @@ -1,4 +1,8 @@ #!/usr/bin/env bash +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # Configure the test environment and run the tests. set -o pipefail -eu diff --git a/.azure-pipelines/scripts/time-command.py b/.azure-pipelines/scripts/time-command.py index 5e8eb8d4c8..85a7c3c171 100755 --- a/.azure-pipelines/scripts/time-command.py +++ b/.azure-pipelines/scripts/time-command.py @@ -1,4 +1,8 @@ #!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + """Prepends a relative timestamp to each input line from stdin and writes it to stdout.""" from __future__ import (absolute_import, division, print_function) diff --git a/.azure-pipelines/templates/coverage.yml b/.azure-pipelines/templates/coverage.yml index 1b36ea45a4..1bf17e053a 100644 --- a/.azure-pipelines/templates/coverage.yml +++ b/.azure-pipelines/templates/coverage.yml @@ -1,3 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # This template adds a job for processing code coverage data. # It will upload results to Azure Pipelines and codecov.io. # Use it from a job stage that completes after all other jobs have completed. @@ -23,16 +28,6 @@ jobs: - bash: .azure-pipelines/scripts/report-coverage.sh displayName: Generate Coverage Report condition: gt(variables.coverageFileCount, 0) - - task: PublishCodeCoverageResults@1 - inputs: - codeCoverageTool: Cobertura - # Azure Pipelines only accepts a single coverage data file. - # That means only Python or PowerShell coverage can be uploaded, but not both. - # Set the "pipelinesCoverage" variable to determine which type is uploaded. - # Use "coverage" for Python and "coverage-powershell" for PowerShell. - summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml" - displayName: Publish to Azure Pipelines - condition: gt(variables.coverageFileCount, 0) - bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)" displayName: Publish to codecov.io condition: gt(variables.coverageFileCount, 0) diff --git a/.azure-pipelines/templates/matrix.yml b/.azure-pipelines/templates/matrix.yml index 4e9555dd3b..49f5d8595a 100644 --- a/.azure-pipelines/templates/matrix.yml +++ b/.azure-pipelines/templates/matrix.yml @@ -1,3 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template. # If this matrix template does not provide the required functionality, consider using the test template directly instead. @@ -45,11 +50,11 @@ jobs: parameters: jobs: - ${{ if eq(length(parameters.groups), 0) }}: - - ${{ each target in parameters.targets }}: - - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }} - test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }} - - ${{ if not(eq(length(parameters.groups), 0)) }}: - - ${{ each group in parameters.groups }}: - ${{ each target in parameters.targets }}: - - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }} - test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }} + - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }} + test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }} + - ${{ if not(eq(length(parameters.groups), 0)) }}: + - ${{ each group in parameters.groups }}: + - ${{ each target in parameters.targets }}: + - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }} + test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }} diff --git a/.azure-pipelines/templates/test.yml b/.azure-pipelines/templates/test.yml index 5250ed8023..b263379c06 100644 --- a/.azure-pipelines/templates/test.yml +++ b/.azure-pipelines/templates/test.yml @@ -1,3 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # This template uses the provided list of jobs to create test one or more test jobs. # It can be used directly if needed, or through the matrix template. @@ -9,37 +14,37 @@ parameters: jobs: - ${{ each job in parameters.jobs }}: - - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }} - displayName: ${{ job.name }} - container: default - workspace: - clean: all - steps: - - checkout: self - fetchDepth: $(fetchDepth) - path: $(checkoutPath) - - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)" - displayName: Run Tests - - bash: .azure-pipelines/scripts/process-results.sh - condition: succeededOrFailed() - displayName: Process Results - - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)" - condition: eq(variables.haveCoverageData, 'true') - displayName: Aggregate Coverage Data - - task: PublishTestResults@2 - condition: eq(variables.haveTestResults, 'true') - inputs: - testResultsFiles: "$(outputPath)/junit/*.xml" - displayName: Publish Test Results - - task: PublishPipelineArtifact@1 - condition: eq(variables.haveBotResults, 'true') - displayName: Publish Bot Results - inputs: - targetPath: "$(outputPath)/bot/" - artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" - - task: PublishPipelineArtifact@1 - condition: eq(variables.haveCoverageData, 'true') - displayName: Publish Coverage Data - inputs: - targetPath: "$(Agent.TempDirectory)/coverage/" - artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" + - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }} + displayName: ${{ job.name }} + container: default + workspace: + clean: all + steps: + - checkout: self + fetchDepth: $(fetchDepth) + path: $(checkoutPath) + - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)" + displayName: Run Tests + - bash: .azure-pipelines/scripts/process-results.sh + condition: succeededOrFailed() + displayName: Process Results + - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)" + condition: eq(variables.haveCoverageData, 'true') + displayName: Aggregate Coverage Data + - task: PublishTestResults@2 + condition: eq(variables.haveTestResults, 'true') + inputs: + testResultsFiles: "$(outputPath)/junit/*.xml" + displayName: Publish Test Results + - task: PublishPipelineArtifact@1 + condition: eq(variables.haveBotResults, 'true') + displayName: Publish Bot Results + inputs: + targetPath: "$(outputPath)/bot/" + artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" + - task: PublishPipelineArtifact@1 + condition: eq(variables.haveCoverageData, 'true') + displayName: Publish Coverage Data + inputs: + targetPath: "$(Agent.TempDirectory)/coverage/" + artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..cd4bdfee65 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,9 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# YAML reformatting +d032de3b16eed11ea3a31cd3d96d78f7c46a2ee0 +e8f965fbf8154ea177c6622da149f2ae8533bd3c +e938ca5f20651abc160ee6aba10014013d04dcc1 +eaa5e07b2866e05b6c7b5628ca92e9cb1142d008 diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index ecedff1e81..d9d291f3b1 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1,3 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + notifications: true automerge: true files: @@ -8,9 +13,9 @@ files: support: community $actions: labels: action - $actions/system/iptables_state.py: + $actions/iptables_state.py: maintainers: quidame - $actions/system/shutdown.py: + $actions/shutdown.py: maintainers: nitzmahone samdoran aminvakil $becomes/: labels: become @@ -28,6 +33,8 @@ files: maintainers: $team_ansible_core $becomes/pmrun.py: maintainers: $team_ansible_core + $becomes/run0.py: + maintainers: konstruktoid $becomes/sesu.py: maintainers: nekonyuu $becomes/sudosu.py: @@ -45,20 +52,21 @@ files: $callbacks/cgroup_memory_recap.py: {} $callbacks/context_demo.py: {} $callbacks/counter_enabled.py: {} + $callbacks/default_without_diff.py: + maintainers: felixfontein $callbacks/dense.py: maintainers: dagwieers $callbacks/diy.py: maintainers: theque5t $callbacks/elastic.py: - maintainers: v1v keywords: apm observability - $callbacks/hipchat.py: {} + maintainers: v1v $callbacks/jabber.py: {} + $callbacks/log_plays.py: {} $callbacks/loganalytics.py: maintainers: zhcli $callbacks/logdna.py: {} $callbacks/logentries.py: {} - $callbacks/log_plays.py: {} $callbacks/logstash.py: maintainers: ujenmr $callbacks/mail.py: @@ -67,65 +75,85 @@ files: maintainers: rverchere $callbacks/null.py: {} $callbacks/opentelemetry.py: - maintainers: v1v keywords: opentelemetry observability + maintainers: v1v + $callbacks/print_task.py: + maintainers: demonpig $callbacks/say.py: - notify: chris-short - maintainers: $team_macos - labels: macos say keywords: brew cask darwin homebrew macosx macports osx + labels: macos say + maintainers: $team_macos + notify: chris-short $callbacks/selective.py: {} $callbacks/slack.py: {} $callbacks/splunk.py: {} $callbacks/sumologic.py: - maintainers: ryancurrah labels: sumologic + maintainers: ryancurrah $callbacks/syslog_json.py: maintainers: imjoseangel + $callbacks/tasks_only.py: + maintainers: felixfontein + $callbacks/timestamp.py: + maintainers: kurokobo $callbacks/unixy.py: - maintainers: akatch labels: unixy - $callbacks/yaml.py: {} + maintainers: akatch $connections/: labels: connections $connections/chroot.py: {} $connections/funcd.py: maintainers: mscherer $connections/iocage.py: {} + $connections/incus.py: + labels: incus + maintainers: stgraber $connections/jail.py: maintainers: $team_ansible_core $connections/lxc.py: {} $connections/lxd.py: - maintainers: mattclay labels: lxd + maintainers: mattclay $connections/qubes.py: maintainers: kushaldas $connections/saltstack.py: - maintainers: mscherer labels: saltstack + maintainers: mscherer + $connections/wsl.py: + maintainers: rgl $connections/zone.py: maintainers: $team_ansible_core $doc_fragments/: labels: docs_fragments + $doc_fragments/django.py: + maintainers: russoz $doc_fragments/hpe3par.py: - maintainers: farhan7500 gautamphegde labels: hpe3par + maintainers: farhan7500 gautamphegde $doc_fragments/hwc.py: - maintainers: $team_huawei labels: hwc + maintainers: $team_huawei $doc_fragments/nomad.py: - maintainers: chris93111 + maintainers: chris93111 apecnascimento + $doc_fragments/pipx.py: + maintainers: russoz $doc_fragments/xenserver.py: - maintainers: bvitnik labels: xenserver + maintainers: bvitnik + $filters/accumulate.py: + maintainers: VannTen $filters/counter.py: maintainers: keilr + $filters/crc32.py: + maintainers: jouir $filters/dict.py: maintainers: felixfontein $filters/dict_kv.py: maintainers: giner $filters/from_csv.py: maintainers: Ajpantuso + $filters/from_ini.py: + maintainers: sscheib $filters/groupby_as_dict.py: maintainers: felixfontein $filters/hashids.py: @@ -136,32 +164,68 @@ files: maintainers: Ajpantuso $filters/jc.py: maintainers: kellyjonbrazil + $filters/json_diff.yml: + maintainers: numo68 + $filters/json_patch.py: + maintainers: numo68 + $filters/json_patch.yml: + maintainers: numo68 + $filters/json_patch_recipe.yml: + maintainers: numo68 $filters/json_query.py: {} + $filters/keep_keys.py: + maintainers: vbotka + $filters/lists.py: + maintainers: cfiehe + $filters/lists_difference.yml: + maintainers: cfiehe + $filters/lists_intersect.yml: + maintainers: cfiehe $filters/lists_mergeby.py: maintainers: vbotka + $filters/lists_symmetric_difference.yml: + maintainers: cfiehe + $filters/lists_union.yml: + maintainers: cfiehe $filters/random_mac.py: {} + $filters/remove_keys.py: + maintainers: vbotka + $filters/replace_keys.py: + maintainers: vbotka + $filters/reveal_ansible_type.py: + maintainers: vbotka $filters/time.py: maintainers: resmo - $filters/unicode_normalize.py: - maintainers: Ajpantuso $filters/to_days.yml: maintainers: resmo $filters/to_hours.yml: maintainers: resmo + $filters/to_ini.py: + maintainers: sscheib $filters/to_milliseconds.yml: maintainers: resmo $filters/to_minutes.yml: maintainers: resmo $filters/to_months.yml: maintainers: resmo + $filters/to_nice_yaml.yml: + maintainers: felixfontein + $filters/to_prettytable.py: + maintainers: tgadiev $filters/to_seconds.yml: maintainers: resmo $filters/to_time_unit.yml: maintainers: resmo $filters/to_weeks.yml: maintainers: resmo + $filters/to_yaml.py: + maintainers: felixfontein + $filters/to_yaml.yml: + maintainers: felixfontein $filters/to_years.yml: maintainers: resmo + $filters/unicode_normalize.py: + maintainers: Ajpantuso $filters/version_sort.py: maintainers: ericzolf $inventories/: @@ -170,33 +234,37 @@ files: maintainers: opoplawski $inventories/gitlab_runners.py: maintainers: morph027 + $inventories/iocage.py: + maintainers: vbotka + $inventories/icinga2.py: + maintainers: BongoEADGC6 $inventories/linode.py: - maintainers: $team_linode - labels: cloud linode keywords: linode dynamic inventory script + labels: cloud linode + maintainers: $team_linode $inventories/lxd.py: maintainers: conloos $inventories/nmap.py: {} $inventories/online.py: maintainers: remyleone $inventories/opennebula.py: - maintainers: feldsam - labels: cloud opennebula keywords: opennebula dynamic inventory script - $inventories/proxmox.py: - maintainers: $team_virt ilijamt + labels: cloud opennebula + maintainers: feldsam + $inventories/scaleway.py: + labels: cloud scaleway + maintainers: $team_scaleway + $inventories/virtualbox.py: {} $inventories/xen_orchestra.py: maintainers: ddelnano shinuza - $inventories/icinga2.py: - maintainers: BongoEADGC6 - $inventories/scaleway.py: - maintainers: $team_scaleway - labels: cloud scaleway - $inventories/stackpath_compute.py: - maintainers: shayrybak - $inventories/virtualbox.py: {} $lookups/: labels: lookups + $lookups/binary_file.py: + maintainers: felixfontein + $lookups/bitwarden_secrets_manager.py: + maintainers: jantari + $lookups/bitwarden.py: + maintainers: lungj $lookups/cartesian.py: {} $lookups/chef_databag.py: {} $lookups/collection_version.py: @@ -204,40 +272,46 @@ files: $lookups/consul_kv.py: {} $lookups/credstash.py: {} $lookups/cyberarkpassword.py: - notify: cyberark-bizdev labels: cyberarkpassword + notify: cyberark-bizdev $lookups/dependent.py: maintainers: felixfontein $lookups/dig.py: - maintainers: jpmens labels: dig + maintainers: jpmens $lookups/dnstxt.py: maintainers: jpmens $lookups/dsv.py: - maintainers: amigus endlesstrax - $lookups/etcd3.py: - maintainers: eric-belhomme + ignore: amigus + maintainers: delineaKrehl tylerezimmerman $lookups/etcd.py: maintainers: jpmens + $lookups/etcd3.py: + maintainers: eric-belhomme $lookups/filetree.py: maintainers: dagwieers $lookups/flattened.py: {} + $lookups/github_app_access_token.py: + maintainers: weisheng-p blavoie $lookups/hiera.py: maintainers: jparrill $lookups/keyring.py: {} $lookups/lastpass.py: {} $lookups/lmdb_kv.py: maintainers: jpmens - $lookups/manifold.py: - maintainers: galanoff - labels: manifold + $lookups/merge_variables.py: + maintainers: rlenferink m-a-r-k-e alpex8 $lookups/onepass: - maintainers: samdoran labels: onepassword + maintainers: samdoran $lookups/onepassword.py: - maintainers: azenk scottsb + ignore: scottsb + maintainers: azenk $lookups/onepassword_raw.py: - maintainers: azenk scottsb + ignore: scottsb + maintainers: azenk + $lookups/onepassword_ssh_key.py: + maintainers: mohammedbabelly20 $lookups/passwordstore.py: {} $lookups/random_pet.py: maintainers: Akasurde @@ -251,1003 +325,1254 @@ files: maintainers: RevBits $lookups/shelvefile.py: {} $lookups/tss.py: - maintainers: amigus endlesstrax + ignore: amigus + maintainers: delineaKrehl tylerezimmerman $module_utils/: labels: module_utils + $module_utils/android_sdkmanager.py: + maintainers: shamilovstas + $module_utils/btrfs.py: + maintainers: gnfzdz + $module_utils/cmd_runner_fmt.py: + maintainers: russoz + $module_utils/cmd_runner.py: + maintainers: russoz + $module_utils/deps.py: + maintainers: russoz + $module_utils/django.py: + maintainers: russoz + $module_utils/gconftool2.py: + labels: gconftool2 + maintainers: russoz + $module_utils/gio_mime.py: + maintainers: russoz $module_utils/gitlab.py: - notify: jlozadad - maintainers: $team_gitlab - labels: gitlab keywords: gitlab source_control + labels: gitlab + maintainers: $team_gitlab + notify: jlozadad $module_utils/hwc_utils.py: - maintainers: $team_huawei - labels: huawei hwc_utils networking keywords: cloud huawei hwc + labels: huawei hwc_utils networking + maintainers: $team_huawei $module_utils/identity/keycloak/keycloak.py: maintainers: $team_keycloak + $module_utils/identity/keycloak/keycloak_clientsecret.py: + maintainers: $team_keycloak fynncfchen johncant $module_utils/ipa.py: - maintainers: $team_ipa labels: ipa + maintainers: $team_ipa + $module_utils/jenkins.py: + labels: jenkins + maintainers: russoz $module_utils/manageiq.py: - maintainers: $team_manageiq labels: manageiq + maintainers: $team_manageiq $module_utils/memset.py: - maintainers: glitchcrab labels: cloud memset $module_utils/mh/: - maintainers: russoz labels: module_helper + maintainers: russoz $module_utils/module_helper.py: - maintainers: russoz labels: module_helper + maintainers: russoz $module_utils/net_tools/pritunl/: maintainers: Lowess $module_utils/oracle/oci_utils.py: - maintainers: $team_oracle labels: cloud - $module_utils/pure.py: - maintainers: $team_purestorage - labels: pure pure_storage + maintainers: $team_oracle + $module_utils/pacemaker.py: + maintainers: munchtoast + $module_utils/pipx.py: + labels: pipx + maintainers: russoz + $module_utils/pkg_req.py: + maintainers: russoz + $module_utils/python_runner.py: + maintainers: russoz + $module_utils/puppet.py: + labels: puppet + maintainers: russoz $module_utils/redfish_utils.py: - maintainers: $team_redfish labels: redfish_utils + maintainers: $team_redfish $module_utils/remote_management/lxca/common.py: maintainers: navalkp prabhosa $module_utils/scaleway.py: - maintainers: $team_scaleway labels: cloud scaleway + maintainers: $team_scaleway + $module_utils/snap.py: + labels: snap + maintainers: russoz + $module_utils/ssh.py: + maintainers: russoz + $module_utils/systemd.py: + maintainers: NomakCooper $module_utils/storage/hpe3par/hpe3par.py: maintainers: farhan7500 gautamphegde $module_utils/utm_utils.py: - maintainers: $team_e_spirit labels: utm_utils + maintainers: $team_e_spirit + $module_utils/vardict.py: + labels: vardict + maintainers: russoz + $module_utils/wdc_redfish_utils.py: + labels: wdc_redfish_utils + maintainers: $team_wdc + $module_utils/xdg_mime.py: + maintainers: mhalano $module_utils/xenserver.py: - maintainers: bvitnik labels: xenserver - $modules/cloud/alicloud/: - maintainers: xiaozhu36 - $modules/cloud/atomic/atomic_container.py: - maintainers: giuseppe krsacme - $modules/cloud/atomic/: - maintainers: krsacme - $modules/cloud/centurylink/: - maintainers: clc-runner - $modules/cloud/dimensiondata/dimensiondata_network.py: - maintainers: aimonb tintoy - labels: dimensiondata_network - $modules/cloud/dimensiondata/dimensiondata_vlan.py: - maintainers: tintoy - $modules/cloud/heroku/heroku_collaborator.py: - maintainers: marns93 - $modules/cloud/huawei/: - maintainers: $team_huawei huaweicloud - keywords: cloud huawei hwc - $modules/cloud/linode/: - maintainers: $team_linode - $modules/cloud/linode/linode.py: - maintainers: zbal - $modules/cloud/lxc/lxc_container.py: - maintainers: cloudnull - $modules/cloud/lxd/: - ignore: hnakamur - $modules/cloud/lxd/lxd_profile.py: - maintainers: conloos - $modules/cloud/lxd/lxd_project.py: - maintainers: we10710aa - $modules/cloud/memset/: - maintainers: glitchcrab - $modules/cloud/misc/cloud_init_data_facts.py: - maintainers: resmo - $modules/cloud/misc/proxmox: - maintainers: $team_virt - labels: proxmox virt - keywords: kvm libvirt proxmox qemu - $modules/cloud/misc/proxmox.py: - maintainers: UnderGreen - ignore: skvidal - $modules/cloud/misc/proxmox_kvm.py: - maintainers: helldorado - ignore: skvidal - $modules/cloud/misc/proxmox_nic.py: - maintainers: Kogelvis - $modules/cloud/misc/proxmox_tasks_info: - maintainers: paginabianca - $modules/cloud/misc/proxmox_template.py: - maintainers: UnderGreen - ignore: skvidal - $modules/cloud/misc/rhevm.py: - maintainers: $team_virt TimothyVandenbrande - labels: rhevm virt - ignore: skvidal - keywords: kvm libvirt proxmox qemu - $modules/cloud/misc/: - ignore: ryansb - $modules/cloud/misc/terraform.py: - maintainers: m-yosefpor rainerleber - $modules/cloud/misc/xenserver_facts.py: - maintainers: caphrim007 cheese - labels: xenserver_facts - ignore: andyhky - $modules/cloud/oneandone/: - maintainers: aajdinov edevenport - $modules/cloud/online/: - maintainers: remyleone - $modules/cloud/opennebula/: - maintainers: $team_opennebula - $modules/cloud/opennebula/one_host.py: - maintainers: rvalle - $modules/cloud/oracle/oci_vcn.py: - maintainers: $team_oracle rohitChaware - $modules/cloud/ovh/: - maintainers: pascalheraud - $modules/cloud/ovh/ovh_monthly_billing.py: - maintainers: fraff - $modules/cloud/packet/packet_device.py: - maintainers: baldwinSPC t0mk teebes - $modules/cloud/packet/: - maintainers: nurfet-becirevic t0mk - $modules/cloud/packet/packet_sshkey.py: - maintainers: t0mk - $modules/cloud/profitbricks/: - maintainers: baldwinSPC - $modules/cloud/pubnub/pubnub_blocks.py: - maintainers: parfeon pubnub - $modules/cloud/rackspace/rax.py: - maintainers: omgjlk sivel - $modules/cloud/rackspace/: - ignore: ryansb sivel - $modules/cloud/rackspace/rax_cbs.py: - maintainers: claco - $modules/cloud/rackspace/rax_cbs_attachments.py: - maintainers: claco - $modules/cloud/rackspace/rax_cdb.py: - maintainers: jails - $modules/cloud/rackspace/rax_cdb_user.py: - maintainers: jails - $modules/cloud/rackspace/rax_cdb_database.py: - maintainers: jails - $modules/cloud/rackspace/rax_clb.py: - maintainers: claco - $modules/cloud/rackspace/rax_clb_nodes.py: - maintainers: neuroid - $modules/cloud/rackspace/rax_clb_ssl.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_files.py: - maintainers: angstwad - $modules/cloud/rackspace/rax_files_objects.py: - maintainers: angstwad - $modules/cloud/rackspace/rax_identity.py: - maintainers: claco - $modules/cloud/rackspace/rax_network.py: - maintainers: claco omgjlk - $modules/cloud/rackspace/rax_mon_alarm.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_mon_check.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_mon_entity.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_mon_notification.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_mon_notification_plan.py: - maintainers: smashwilson - $modules/cloud/rackspace/rax_queue.py: - maintainers: claco - $modules/cloud/scaleway/: - maintainers: $team_scaleway - $modules/cloud/scaleway/scaleway_database_backup.py: - maintainers: guillaume_ro_fr - $modules/cloud/scaleway/scaleway_image_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_ip_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_organization_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_private_network.py: - maintainers: pastral - $modules/cloud/scaleway/scaleway_security_group.py: - maintainers: DenBeke - $modules/cloud/scaleway/scaleway_security_group_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_security_group_rule.py: - maintainers: DenBeke - $modules/cloud/scaleway/scaleway_server_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_snapshot_info.py: - maintainers: Spredzy - $modules/cloud/scaleway/scaleway_volume.py: - labels: scaleway_volume - ignore: hekonsek - $modules/cloud/scaleway/scaleway_volume_info.py: - maintainers: Spredzy - $modules/cloud/smartos/: - maintainers: $team_solaris - labels: solaris - keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool - $modules/cloud/smartos/nictagadm.py: - maintainers: SmithX10 - $modules/cloud/softlayer/sl_vm.py: - maintainers: mcltn - $modules/cloud/spotinst/spotinst_aws_elastigroup.py: - maintainers: talzur - $modules/cloud/univention/: - maintainers: keachi - $modules/cloud/webfaction/: - maintainers: quentinsf - $modules/cloud/xenserver/: maintainers: bvitnik - $modules/clustering/consul/: - maintainers: $team_consul - ignore: colin-nolan - $modules/clustering/etcd3.py: - maintainers: evrardjp - ignore: vfauth - $modules/clustering/nomad/: - maintainers: chris93111 - $modules/clustering/pacemaker_cluster.py: - maintainers: matbu - $modules/clustering/znode.py: - maintainers: treyperry - $modules/database/aerospike/aerospike_migrations.py: + $module_utils/xfconf.py: + labels: xfconf + maintainers: russoz + $modules/aerospike_migrations.py: maintainers: Alb0t - $modules/database/influxdb/: - maintainers: kamsz - $modules/database/influxdb/influxdb_query.py: - maintainers: resmo - $modules/database/influxdb/influxdb_user.py: - maintainers: zhhuta - $modules/database/influxdb/influxdb_write.py: - maintainers: resmo - $modules/database/misc/elasticsearch_plugin.py: - maintainers: ThePixelDeveloper samdoran - $modules/database/misc/kibana_plugin.py: - maintainers: barryib - $modules/database/misc/odbc.py: - maintainers: john-westcott-iv - $modules/database/misc/redis.py: - maintainers: slok - $modules/database/misc/redis_info.py: - maintainers: levonet - $modules/database/misc/redis_data_info.py: - maintainers: paginabianca - $modules/database/misc/redis_data.py: - maintainers: paginabianca - $modules/database/misc/redis_data_incr.py: - maintainers: paginabianca - $modules/database/misc/riak.py: - maintainers: drewkerrigan jsmartin - $modules/database/mssql/mssql_db.py: - maintainers: vedit Jmainguy kenichi-ogawa-1988 - labels: mssql_db - $modules/database/mssql/mssql_script.py: - maintainers: kbudde - labels: mssql_script - $modules/database/saphana/hana_query.py: - maintainers: rainerleber - $modules/database/vertica/: - maintainers: dareko - $modules/files/archive.py: - maintainers: bendoh - $modules/files/filesize.py: - maintainers: quidame - $modules/files/ini_file.py: - maintainers: jpmens noseka1 - $modules/files/iso_create.py: - maintainers: Tomorrow9 - $modules/files/iso_extract.py: - maintainers: dagwieers jhoekx ribbons - $modules/files/read_csv.py: - maintainers: dagwieers - $modules/files/sapcar_extract.py: - maintainers: RainerLeber - $modules/files/xattr.py: - maintainers: bcoca - labels: xattr - $modules/files/xml.py: - maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0 - labels: m:xml xml - ignore: magnus919 - $modules/identity/ipa/: - maintainers: $team_ipa - $modules/identity/ipa/ipa_pwpolicy.py: - maintainers: adralioh - $modules/identity/ipa/ipa_service.py: - maintainers: cprh - $modules/identity/ipa/ipa_vault.py: - maintainers: jparrill - $modules/identity/keycloak/: - maintainers: $team_keycloak - $modules/identity/keycloak/keycloak_authentication.py: - maintainers: elfelip Gaetan2907 - $modules/identity/keycloak/keycloak_clientscope.py: - maintainers: Gaetan2907 - $modules/identity/keycloak/keycloak_client_rolemapping.py: - maintainers: Gaetan2907 - $modules/identity/keycloak/keycloak_group.py: - maintainers: adamgoossens - $modules/identity/keycloak/keycloak_identity_provider.py: - maintainers: laurpaum - $modules/identity/keycloak/keycloak_realm_info.py: - maintainers: fynncfchen - $modules/identity/keycloak/keycloak_realm.py: - maintainers: kris2kris - $modules/identity/keycloak/keycloak_role.py: - maintainers: laurpaum - $modules/identity/keycloak/keycloak_user_federation.py: - maintainers: laurpaum - $modules/identity/onepassword_info.py: - maintainers: Rylon - $modules/identity/opendj/opendj_backendprop.py: - maintainers: dj-wasabi - $modules/monitoring/airbrake_deployment.py: - maintainers: phumpal + $modules/airbrake_deployment.py: + ignore: bpennypacker labels: airbrake_deployment - ignore: bpennypacker - $modules/monitoring/alerta_customer.py: - maintainers: cwollinger - $modules/monitoring/bigpanda.py: - maintainers: hkariti - $modules/monitoring/circonus_annotation.py: - maintainers: NickatEpic - $modules/monitoring/datadog/datadog_event.py: - maintainers: n0ts - labels: datadog_event - ignore: arturaz - $modules/monitoring/datadog/datadog_downtime.py: - maintainers: Datadog - $modules/monitoring/datadog/datadog_monitor.py: - maintainers: skornehl - $modules/monitoring/honeybadger_deployment.py: - maintainers: stympy - $modules/monitoring/icinga2_feature.py: - maintainers: nerzhul - $modules/monitoring/icinga2_host.py: - maintainers: t794104 - $modules/monitoring/librato_annotation.py: - maintainers: Sedward - $modules/monitoring/logentries.py: - labels: logentries - ignore: ivanvanderbyl - $modules/monitoring/logstash_plugin.py: - maintainers: nerzhul - $modules/monitoring/monit.py: - maintainers: dstoflet brian-brazil snopoke - labels: monit - $modules/monitoring/nagios.py: - maintainers: tbielawa tgoetheyn - $modules/monitoring/newrelic_deployment.py: - maintainers: mcodd - $modules/monitoring/pagerduty.py: - maintainers: suprememoocow thaumos - labels: pagerduty - ignore: bpennypacker - $modules/monitoring/pagerduty_alert.py: - maintainers: ApsOps - $modules/monitoring/pagerduty_change.py: - maintainers: adamvaughan - $modules/monitoring/pagerduty_user.py: - maintainers: zanssa - $modules/monitoring/pingdom.py: - maintainers: thaumos - $modules/monitoring/rollbar_deployment.py: - maintainers: kavu - $modules/monitoring/sensu/sensu_check.py: - maintainers: andsens - $modules/monitoring/sensu/: - maintainers: dmsimard - $modules/monitoring/sensu/sensu_silence.py: - maintainers: smbambling - $modules/monitoring/sensu/sensu_subscription.py: - maintainers: andsens - $modules/monitoring/spectrum_device.py: - maintainers: orgito - $modules/monitoring/spectrum_model_attrs.py: - maintainers: tgates81 - $modules/monitoring/stackdriver.py: - maintainers: bwhaley - $modules/monitoring/statsd.py: - maintainers: mamercad - $modules/monitoring/statusio_maintenance.py: - maintainers: bhcopeland - $modules/monitoring/uptimerobot.py: - maintainers: nate-kingsley - $modules/net_tools/cloudflare_dns.py: - maintainers: mgruener - labels: cloudflare_dns - $modules/net_tools/dnsimple.py: - maintainers: drcapulet - $modules/net_tools/dnsimple_info.py: - maintainers: edhilgendorf - $modules/net_tools/dnsmadeeasy.py: - maintainers: briceburg - $modules/net_tools/gandi_livedns.py: - maintainers: gthiemonge - $modules/net_tools/haproxy.py: - maintainers: ravibhure Normo - $modules/net_tools/infinity/infinity.py: - maintainers: MeganLiu - $modules/net_tools/ip_netns.py: - maintainers: bregman-arie - $modules/net_tools/ipify_facts.py: - maintainers: resmo - $modules/net_tools/ipinfoio_facts.py: - maintainers: akostyuk - $modules/net_tools/ipwcli_dns.py: - maintainers: cwollinger - $modules/net_tools/ldap/ldap_attrs.py: - maintainers: drybjed jtyr noles - $modules/net_tools/ldap/ldap_entry.py: - maintainers: jtyr - $modules/net_tools/ldap/ldap_passwd.py: - maintainers: KellerFuchs jtyr - $modules/net_tools/ldap/ldap_search.py: - maintainers: eryx12o45 jtyr - $modules/net_tools/lldp.py: - labels: lldp - ignore: andyhky - $modules/net_tools/netcup_dns.py: - maintainers: nbuchwitz - $modules/net_tools/nsupdate.py: - maintainers: nerzhul - $modules/net_tools/omapi_host.py: - maintainers: amasolov nerzhul - $modules/net_tools/pritunl/: - maintainers: Lowess - $modules/net_tools/nmcli.py: - maintainers: alcamie101 - $modules/net_tools/snmp_facts.py: - maintainers: ogenstad ujwalkomarla - $modules/notification/bearychat.py: - maintainers: tonyseek - $modules/notification/campfire.py: - maintainers: fabulops - $modules/notification/catapult.py: - maintainers: Jmainguy - $modules/notification/cisco_webex.py: - maintainers: drew-russell - $modules/notification/discord.py: - maintainers: cwollinger - $modules/notification/flowdock.py: - maintainers: mcodd - $modules/notification/grove.py: - maintainers: zimbatm - $modules/notification/hipchat.py: - maintainers: pb8226 shirou - $modules/notification/irc.py: - maintainers: jpmens sivel - $modules/notification/jabber.py: - maintainers: bcoca - $modules/notification/logentries_msg.py: - maintainers: jcftang - $modules/notification/mail.py: - maintainers: dagwieers - $modules/notification/matrix.py: - maintainers: jcgruenhage - $modules/notification/mattermost.py: - maintainers: bjolivot - $modules/notification/mqtt.py: - maintainers: jpmens - $modules/notification/nexmo.py: - maintainers: sivel - $modules/notification/office_365_connector_card.py: - maintainers: marc-sensenich - $modules/notification/pushbullet.py: - maintainers: willybarro - $modules/notification/pushover.py: - maintainers: weaselkeeper wopfel - $modules/notification/rocketchat.py: - maintainers: Deepakkothandan - labels: rocketchat - ignore: ramondelafuente - $modules/notification/say.py: - maintainers: $team_ansible_core mpdehaan - $modules/notification/sendgrid.py: - maintainers: makaimc - $modules/notification/slack.py: - maintainers: ramondelafuente - $modules/notification/syslogger.py: - maintainers: garbled1 - $modules/notification/telegram.py: - maintainers: tyouxa loms lomserman - $modules/notification/twilio.py: - maintainers: makaimc - $modules/notification/typetalk.py: - maintainers: tksmd - $modules/packaging/language/ansible_galaxy_install.py: - maintainers: russoz - $modules/packaging/language/bower.py: - maintainers: mwarkentin - $modules/packaging/language/bundler.py: - maintainers: thoiberg - $modules/packaging/language/cargo.py: - maintainers: radek-sprta - $modules/packaging/language/composer.py: - maintainers: dmtrs - ignore: resmo - $modules/packaging/language/cpanm.py: - maintainers: fcuny russoz - $modules/packaging/language/easy_install.py: - maintainers: mattupstate - $modules/packaging/language/gem.py: - maintainers: $team_ansible_core johanwiren - labels: gem - $modules/packaging/language/maven_artifact.py: - maintainers: tumbl3w33d turb - labels: maven_artifact - ignore: chrisisbeef - $modules/packaging/language/npm.py: - maintainers: shane-walker xcambar - labels: npm - ignore: chrishoffman - $modules/packaging/language/pear.py: - labels: pear - ignore: jle64 - $modules/packaging/language/pip_package_info.py: - maintainers: bcoca matburt maxamillion - $modules/packaging/language/pipx.py: - maintainers: russoz - $modules/packaging/language/yarn.py: - maintainers: chrishoffman verkaufer - $modules/packaging/os/apk.py: - maintainers: tdtrask - labels: apk - ignore: kbrebanov - $modules/packaging/os/apt_repo.py: - maintainers: obirvalger - $modules/packaging/os/apt_rpm.py: - maintainers: evgkrsk - $modules/packaging/os/copr.py: - maintainers: schlupov - $modules/packaging/os/dnf_versionlock.py: - maintainers: moreda - $modules/packaging/os/flatpak.py: - maintainers: $team_flatpak - $modules/packaging/os/flatpak_remote.py: - maintainers: $team_flatpak - $modules/packaging/os/pkg5: - maintainers: $team_solaris mavit - labels: pkg5 solaris - keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool - $modules/packaging/os/homebrew.py: - notify: chris-short - maintainers: $team_macos andrew-d - labels: homebrew macos - ignore: ryansb - keywords: brew cask darwin homebrew macosx macports osx - $modules/packaging/os/homebrew_cask.py: - notify: chris-short - maintainers: $team_macos enriclluelles - labels: homebrew_ macos - ignore: ryansb - keywords: brew cask darwin homebrew macosx macports osx - $modules/packaging/os/homebrew_tap.py: - notify: chris-short - maintainers: $team_macos - labels: homebrew_ macos - ignore: ryansb - keywords: brew cask darwin homebrew macosx macports osx - $modules/packaging/os/installp.py: - maintainers: $team_aix kairoaraujo - labels: aix installp + maintainers: phumpal + $modules/aix: keywords: aix efix lpar wpar - $modules/packaging/os/layman.py: - maintainers: jirutka - $modules/packaging/os/macports.py: - notify: chris-short - maintainers: $team_macos jcftang - labels: macos macports - ignore: ryansb - keywords: brew cask darwin homebrew macosx macports osx - $modules/packaging/os/mas.py: - maintainers: lukasbestle mheap - $modules/packaging/os/openbsd_pkg.py: - maintainers: $team_bsd eest - labels: bsd openbsd_pkg - ignore: ryansb - keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense - $modules/packaging/os/opkg.py: - maintainers: skinp - $modules/packaging/os/pacman.py: - maintainers: elasticdog indrajitr tchernomax jraby - labels: pacman - ignore: elasticdog - $modules/packaging/os/pacman_key.py: - maintainers: grawlinson - labels: pacman - $modules/packaging/os/pkgin.py: - maintainers: $team_solaris L2G jasperla szinck martinm82 - labels: pkgin solaris - $modules/packaging/os/pkgng.py: - maintainers: $team_bsd bleader - labels: bsd pkgng - ignore: bleader - keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense - $modules/packaging/os/pkgutil.py: - maintainers: $team_solaris dermute - labels: pkgutil solaris - $modules/packaging/os/portage.py: - maintainers: Tatsh wltjr - labels: portage - ignore: sayap - $modules/packaging/os/portinstall.py: - maintainers: $team_bsd berenddeboer - labels: bsd portinstall - ignore: ryansb - keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense - $modules/packaging/os/pulp_repo.py: - maintainers: sysadmind - $modules/packaging/os/redhat_subscription.py: - maintainers: barnabycourt alikins kahowell - labels: redhat_subscription - $modules/packaging/os/rhn_channel.py: - maintainers: vincentvdk alikins $team_rhn - labels: rhn_channel - $modules/packaging/os/rhn_register.py: - maintainers: jlaska $team_rhn - labels: rhn_register - $modules/packaging/os/rhsm_release.py: - maintainers: seandst - $modules/packaging/os/rhsm_repository.py: - maintainers: giovannisciortino - $modules/packaging/os/rpm_ostree_pkg.py: - maintainers: dustymabe Akasurde - $modules/packaging/os/slackpkg.py: - maintainers: KimNorgaard - $modules/packaging/os/snap.py: - maintainers: angristan vcarceler - labels: snap - $modules/packaging/os/snap_alias.py: - maintainers: russoz - labels: snap - $modules/packaging/os/sorcery.py: - maintainers: vaygr - $modules/packaging/os/svr4pkg.py: - maintainers: $team_solaris brontitall - labels: solaris svr4pkg - $modules/packaging/os/swdepot.py: - maintainers: $team_hpux melodous - labels: hpux swdepot - keywords: hp-ux - $modules/packaging/os/swupd.py: - maintainers: hnanni albertomurillo - labels: swupd - $modules/packaging/os/urpmi.py: - maintainers: pmakowski - $modules/packaging/os/xbps.py: - maintainers: dinoocch the-maldridge - $modules/packaging/os/yum_versionlock.py: - maintainers: florianpaulhoberg aminvakil - $modules/packaging/os/zypper.py: - maintainers: $team_suse - labels: zypper - ignore: dirtyharrycallahan robinro - $modules/packaging/os/zypper_repository.py: - maintainers: $team_suse - labels: zypper - ignore: matze - $modules/remote_management/cobbler/: - maintainers: dagwieers - $modules/remote_management/hpilo/: - maintainers: haad - ignore: dagwieers - $modules/remote_management/imc/imc_rest.py: - maintainers: dagwieers - labels: cisco - $modules/remote_management/ipmi/: - maintainers: bgaifullin cloudnull - $modules/remote_management/lenovoxcc/: - maintainers: panyy3 renxulei - $modules/remote_management/lxca/: - maintainers: navalkp prabhosa - $modules/remote_management/manageiq/: - labels: manageiq - maintainers: $team_manageiq - $modules/remote_management/manageiq/manageiq_alert_profiles.py: - maintainers: elad661 - $modules/remote_management/manageiq/manageiq_alerts.py: - maintainers: elad661 - $modules/remote_management/manageiq/manageiq_group.py: - maintainers: evertmulder - $modules/remote_management/manageiq/manageiq_tenant.py: - maintainers: evertmulder - $modules/remote_management/oneview/: - maintainers: adriane-cardozo fgbulsoni tmiotto - $modules/remote_management/oneview/oneview_datacenter_info.py: - maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr - $modules/remote_management/oneview/oneview_fc_network.py: - maintainers: fgbulsoni - $modules/remote_management/oneview/oneview_fcoe_network.py: - maintainers: fgbulsoni - $modules/remote_management/redfish/: - maintainers: $team_redfish - ignore: jose-delarosa - $modules/remote_management/stacki/stacki_host.py: - maintainers: bsanders bbyhuy - labels: stacki_host - $modules/remote_management/wakeonlan.py: - maintainers: dagwieers - $modules/source_control/bitbucket/: - maintainers: catcombo - $modules/source_control/bzr.py: - maintainers: andreparames - $modules/source_control/git_config.py: - maintainers: djmattyg007 mgedmin - $modules/source_control/github/github_deploy_key.py: - maintainers: bincyber - $modules/source_control/github/github_issue.py: - maintainers: Akasurde - $modules/source_control/github/github_key.py: - maintainers: erydo - labels: github_key - ignore: erydo - $modules/source_control/github/github_release.py: - maintainers: adrianmoisey - $modules/source_control/github/github_repo.py: - maintainers: atorrescogollo - $modules/source_control/github/: - maintainers: stpierre - $modules/source_control/gitlab/: - notify: jlozadad - maintainers: $team_gitlab - keywords: gitlab source_control - $modules/source_control/gitlab/gitlab_project_variable.py: - maintainers: markuman - $modules/source_control/gitlab/gitlab_runner.py: - maintainers: SamyCoenen - $modules/source_control/gitlab/gitlab_user.py: - maintainers: LennertMertens stgrace - $modules/source_control/gitlab/gitlab_branch.py: - maintainers: paytroff - $modules/source_control/hg.py: - maintainers: yeukhon - $modules/storage/emc/emc_vnx_sg_member.py: - maintainers: remixtj - $modules/storage/hpe3par/ss_3par_cpg.py: - maintainers: farhan7500 gautamphegde - $modules/storage/ibm/: - maintainers: tzure - $modules/storage/pmem/pmem.py: - maintainers: mizumm - $modules/storage/vexata/: - maintainers: vexata - $modules/storage/zfs/: - maintainers: $team_solaris - labels: solaris - keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool - $modules/storage/zfs/zfs.py: - maintainers: johanwiren - $modules/storage/zfs/zfs_delegate_admin.py: - maintainers: natefoo - $modules/system/aix: - maintainers: $team_aix labels: aix - keywords: aix efix lpar wpar - $modules/system/alternatives.py: - maintainers: mulby - labels: alternatives - ignore: DavidWittman - $modules/system/aix_lvol.py: - maintainers: adejoux - $modules/system/awall.py: - maintainers: tdtrask - $modules/system/beadm.py: - maintainers: $team_solaris - labels: beadm solaris - keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool - $modules/system/capabilities.py: - maintainers: natefoo - $modules/system/cronvar.py: - maintainers: dougluce - $modules/system/crypttab.py: - maintainers: groks - $modules/system/dconf.py: - maintainers: azaghal - $modules/system/dpkg_divert.py: - maintainers: quidame - $modules/system/facter.py: - maintainers: $team_ansible_core gamethis - labels: facter - $modules/system/filesystem.py: - maintainers: pilou- abulimov quidame - labels: filesystem - $modules/system/gconftool2.py: - maintainers: Akasurde kevensen - labels: gconftool2 - $modules/system/homectl.py: - maintainers: jameslivulpi - $modules/system/interfaces_file.py: - maintainers: obourdon hryamzik - labels: interfaces_file - $modules/system/iptables_state.py: - maintainers: quidame - $modules/system/shutdown.py: - maintainers: nitzmahone samdoran aminvakil - $modules/system/java_cert.py: - maintainers: haad absynth76 - $modules/system/java_keystore.py: - maintainers: Mogztter quidame - $modules/system/kernel_blacklist.py: - maintainers: matze - $modules/system/launchd.py: - maintainers: martinm82 - $modules/system/lbu.py: - maintainers: kunkku - $modules/system/listen_ports_facts.py: - maintainers: ndavison - $modules/system/locale_gen.py: - maintainers: AugustusKling - $modules/system/lvg.py: - maintainers: abulimov - $modules/system/lvol.py: - maintainers: abulimov jhoekx zigaSRC unkaputtbar112 - $modules/system/make.py: - maintainers: LinusU - $modules/system/mksysb.py: maintainers: $team_aix - labels: aix mksysb - $modules/system/modprobe.py: - maintainers: jdauphant mattjeffery - labels: modprobe - ignore: stygstra - $modules/system/nosh.py: - maintainers: tacatac - $modules/system/ohai.py: - maintainers: $team_ansible_core mpdehaan - labels: ohai - $modules/system/open_iscsi.py: - maintainers: srvg - $modules/system/openwrt_init.py: - maintainers: agaffney - $modules/system/osx_defaults.py: - notify: chris-short - maintainers: $team_macos notok - labels: macos osx_defaults - keywords: brew cask darwin homebrew macosx macports osx - $modules/system/pam_limits.py: - maintainers: giovannisciortino - labels: pam_limits - ignore: usawa - $modules/system/pamd.py: - maintainers: kevensen - $modules/system/parted.py: - maintainers: ColOfAbRiX rosowiecki jake2184 - $modules/system/pids.py: - maintainers: saranyasridharan - $modules/system/puppet.py: - maintainers: nibalizer emonty - labels: puppet - $modules/system/python_requirements_info.py: - maintainers: willthames - ignore: ryansb - $modules/system/runit.py: - maintainers: jsumners - $modules/system/sap_task_list_execute: - maintainers: rainerleber - $modules/system/sefcontext.py: - maintainers: dagwieers - $modules/system/selinux_permissive.py: - maintainers: mscherer - $modules/system/selogin.py: - maintainers: bachradsusi dankeder jamescassell - $modules/system/seport.py: - maintainers: dankeder - $modules/system/solaris_zone.py: - maintainers: $team_solaris pmarkham - labels: solaris - keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool - $modules/system/ssh_config.py: - maintainers: gaqzi Akasurde - $modules/system/sudoers.py: - maintainers: JonEllis - $modules/system/svc.py: - maintainers: bcoca - $modules/system/syspatch.py: - maintainers: precurse - $modules/system/sysrc.py: - maintainers: dlundgren - $modules/system/sysupgrade.py: - maintainers: precurse - $modules/system/timezone.py: - maintainers: indrajitr jasperla tmshn - $modules/system/ufw.py: - notify: felixfontein - maintainers: ahtik ovcharenko pyykkis - labels: ufw - $modules/system/vdo.py: - maintainers: rhawalsh bgurney-rh - $modules/system/xfconf.py: - maintainers: russoz jbenden - labels: xfconf - $modules/system/xfconf_info.py: + $modules/aix_lvol.py: + maintainers: adejoux + $modules/alerta_customer.py: + maintainers: cwollinger + $modules/ali_: + maintainers: xiaozhu36 + $modules/alternatives.py: + ignore: DavidWittman jiuka + labels: alternatives + maintainers: mulby + $modules/android_sdk.py: + maintainers: shamilovstas + $modules/ansible_galaxy_install.py: maintainers: russoz - labels: xfconf - $modules/system/xfs_quota.py: - maintainers: bushvin - $modules/web_infrastructure/apache2_mod_proxy.py: + $modules/apache2_mod_proxy.py: maintainers: oboukili - $modules/web_infrastructure/apache2_module.py: - maintainers: berendt n0trax + $modules/apache2_module.py: ignore: robinro - $modules/web_infrastructure/deploy_helper.py: + maintainers: berendt n0trax + $modules/apk.py: + ignore: kbrebanov + labels: apk + maintainers: tdtrask + $modules/apt_repo.py: + maintainers: obirvalger + $modules/apt_rpm.py: + maintainers: evgkrsk + $modules/archive.py: + maintainers: bendoh + $modules/atomic_: + maintainers: krsacme + $modules/atomic_container.py: + maintainers: giuseppe krsacme + $modules/awall.py: + maintainers: tdtrask + $modules/beadm.py: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: beadm solaris + maintainers: $team_solaris + $modules/bigpanda.py: + ignore: hkariti + $modules/bitbucket_: + maintainers: catcombo + $modules/bootc_manage.py: + maintainers: cooktheryan + $modules/bower.py: + maintainers: mwarkentin + $modules/btrfs_: + maintainers: gnfzdz + $modules/bundler.py: + maintainers: thoiberg + $modules/bzr.py: + maintainers: andreparames + $modules/campfire.py: + maintainers: fabulops + $modules/capabilities.py: + maintainers: natefoo + $modules/cargo.py: + maintainers: radek-sprta + $modules/catapult.py: + maintainers: Jmainguy + $modules/circonus_annotation.py: + maintainers: NickatEpic + $modules/cisco_webex.py: + maintainers: drew-russell + $modules/cloud_init_data_facts.py: + maintainers: resmo + $modules/cloudflare_dns.py: + labels: cloudflare_dns + maintainers: mgruener + $modules/cobbler_: + maintainers: dagwieers + $modules/composer.py: + ignore: resmo + maintainers: dmtrs + $modules/consul: + ignore: colin-nolan Hakon + maintainers: $team_consul + $modules/copr.py: + maintainers: schlupov + $modules/cpanm.py: + maintainers: fcuny russoz + $modules/cronvar.py: + maintainers: dougluce + $modules/crypttab.py: + maintainers: groks + $modules/datadog_downtime.py: + maintainers: Datadog + $modules/datadog_event.py: + ignore: arturaz + labels: datadog_event + maintainers: n0ts + $modules/datadog_monitor.py: + ignore: skornehl + $modules/dconf.py: + maintainers: azaghal + $modules/decompress.py: + maintainers: shamilovstas + $modules/deploy_helper.py: maintainers: ramondelafuente - $modules/web_infrastructure/django_manage.py: + $modules/dimensiondata_network.py: + labels: dimensiondata_network + maintainers: aimonb tintoy + $modules/dimensiondata_vlan.py: + maintainers: tintoy + $modules/discord.py: + maintainers: cwollinger + $modules/django_check.py: maintainers: russoz + $modules/django_command.py: + maintainers: russoz + $modules/django_createcachetable.py: + maintainers: russoz + $modules/django_dumpdata.py: + maintainers: russoz + $modules/django_loaddata.py: + maintainers: russoz + $modules/django_manage.py: ignore: scottanderson42 tastychutney labels: django_manage - $modules/web_infrastructure/ejabberd_user.py: + maintainers: russoz + $modules/dnf_versionlock.py: + maintainers: moreda + $modules/dnf_config_manager.py: + maintainers: ahyattdev + $modules/dnsimple.py: + maintainers: drcapulet + $modules/dnsimple_info.py: + maintainers: edhilgendorf + $modules/dnsmadeeasy.py: + maintainers: briceburg + $modules/dpkg_divert.py: + maintainers: quidame + $modules/easy_install.py: + maintainers: mattupstate + $modules/ejabberd_user.py: maintainers: privateip - $modules/web_infrastructure/gunicorn.py: - maintainers: agmezr - $modules/web_infrastructure/htpasswd.py: - maintainers: $team_ansible_core - labels: htpasswd - $modules/web_infrastructure/jboss.py: - maintainers: $team_jboss jhoekx - labels: jboss - $modules/web_infrastructure/jenkins_build.py: - maintainers: brettmilford unnecessary-username - $modules/web_infrastructure/jenkins_job.py: - maintainers: sermilrod - $modules/web_infrastructure/jenkins_job_info.py: + $modules/elasticsearch_plugin.py: + maintainers: ThePixelDeveloper samdoran + $modules/emc_vnx_sg_member.py: + maintainers: remixtj + $modules/etcd3.py: + ignore: vfauth + maintainers: evrardjp + $modules/facter_facts.py: + labels: facter + maintainers: russoz $team_ansible_core gamethis + $modules/filesize.py: + maintainers: quidame + $modules/filesystem.py: + labels: filesystem + maintainers: pilou- abulimov quidame + $modules/flatpak.py: + maintainers: $team_flatpak + $modules/flatpak_remote.py: + maintainers: $team_flatpak + $modules/gandi_livedns.py: + maintainers: gthiemonge + $modules/gconftool2.py: + labels: gconftool2 + maintainers: Akasurde kevensen + $modules/gconftool2_info.py: + labels: gconftool2 + maintainers: russoz + $modules/gem.py: + labels: gem + maintainers: $team_ansible_core johanwiren + $modules/gio_mime.py: + maintainers: russoz + $modules/git_config.py: + maintainers: djmattyg007 mgedmin + $modules/git_config_info.py: + maintainers: guenhter + $modules/github_: maintainers: stpierre - $modules/web_infrastructure/jenkins_plugin.py: - maintainers: jtyr - $modules/web_infrastructure/jenkins_script.py: - maintainers: hogarthj - $modules/web_infrastructure/jira.py: - maintainers: Slezhuk tarka pertoft - ignore: DWSR - labels: jira - $modules/web_infrastructure/nginx_status_info.py: + $modules/github_deploy_key.py: + maintainers: bincyber + $modules/github_issue.py: + maintainers: Akasurde + $modules/github_key.py: + ignore: erydo + labels: github_key + maintainers: erydo + $modules/github_release.py: + maintainers: adrianmoisey + $modules/github_repo.py: + maintainers: atorrescogollo + $modules/gitlab_: + keywords: gitlab source_control + maintainers: $team_gitlab + notify: jlozadad + ignore: dj-wasabi + $modules/gitlab_branch.py: + maintainers: paytroff + $modules/gitlab_issue.py: + maintainers: zvaraondrej + $modules/gitlab_label.py: + maintainers: gpongelli + $modules/gitlab_merge_request.py: + maintainers: zvaraondrej + $modules/gitlab_milestone.py: + maintainers: gpongelli + $modules/gitlab_project_variable.py: + maintainers: markuman + $modules/gitlab_instance_variable.py: + maintainers: benibr + $modules/gitlab_runner.py: + maintainers: SamyCoenen + $modules/gitlab_user.py: + maintainers: LennertMertens stgrace + $modules/gitlab_group_access_token.py: + maintainers: pixslx + $modules/gitlab_project_access_token.py: + maintainers: pixslx + $modules/grove.py: + maintainers: zimbatm + $modules/gunicorn.py: + maintainers: agmezr + $modules/haproxy.py: + maintainers: ravibhure Normo + $modules/heroku_collaborator.py: + maintainers: marns93 + $modules/hg.py: + maintainers: yeukhon + $modules/homebrew.py: + ignore: ryansb + keywords: brew cask darwin homebrew macosx macports osx + labels: homebrew macos + maintainers: $team_macos andrew-d + notify: chris-short + $modules/homebrew_cask.py: + ignore: ryansb + keywords: brew cask darwin homebrew macosx macports osx + labels: homebrew_ macos + maintainers: $team_macos enriclluelles + notify: chris-short + $modules/homebrew_tap.py: + ignore: ryansb + keywords: brew cask darwin homebrew macosx macports osx + labels: homebrew_ macos + maintainers: $team_macos + notify: chris-short + $modules/homebrew_services.py: + ignore: ryansb + keywords: brew cask services darwin homebrew macosx macports osx + labels: homebrew_ macos + maintainers: $team_macos kitizz + $modules/homectl.py: + maintainers: jameslivulpi + $modules/honeybadger_deployment.py: + maintainers: stympy + $modules/hpilo_: + ignore: dagwieers + maintainers: haad + $modules/hponcfg.py: + ignore: dagwieers + maintainers: haad + $modules/htpasswd.py: + labels: htpasswd + maintainers: $team_ansible_core + $modules/hwc_: + keywords: cloud huawei hwc + maintainers: $team_huawei huaweicloud + $modules/ibm_sa_: + maintainers: tzure + $modules/icinga2_feature.py: + maintainers: nerzhul + $modules/icinga2_host.py: + maintainers: t794104 + $modules/idrac_: + ignore: jose-delarosa + maintainers: $team_redfish + $modules/ilo_: + ignore: jose-delarosa varini-hp + maintainers: $team_redfish + $modules/imc_rest.py: + labels: cisco + maintainers: dagwieers + $modules/imgadm.py: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: solaris + maintainers: $team_solaris + $modules/infinity.py: + maintainers: MeganLiu + $modules/influxdb_: + maintainers: kamsz + $modules/influxdb_query.py: maintainers: resmo - $modules/web_infrastructure/rundeck_acl_policy.py: + $modules/influxdb_user.py: + maintainers: zhhuta + $modules/influxdb_write.py: + maintainers: resmo + $modules/ini_file.py: + maintainers: jpmens noseka1 + $modules/installp.py: + keywords: aix efix lpar wpar + labels: aix installp + maintainers: $team_aix kairoaraujo + $modules/interfaces_file.py: + labels: interfaces_file + maintainers: obourdon hryamzik + $modules/ip_netns.py: + maintainers: bregman-arie + $modules/ipa_: + maintainers: $team_ipa + ignore: fxfitz + $modules/ipa_getkeytab.py: + maintainers: abakanovskii + $modules/ipa_dnsrecord.py: + maintainers: $team_ipa jwbernin + $modules/ipbase_info.py: + maintainers: dominikkukacka + $modules/ipa_pwpolicy.py: + maintainers: adralioh + $modules/ipa_service.py: + maintainers: cprh + $modules/ipa_vault.py: + maintainers: jparrill + $modules/ipify_facts.py: + maintainers: resmo + $modules/ipinfoio_facts.py: + maintainers: akostyuk + $modules/ipmi_: + maintainers: bgaifullin cloudnull + $modules/iptables_state.py: + maintainers: quidame + $modules/ipwcli_dns.py: + maintainers: cwollinger + $modules/irc.py: + maintainers: jpmens sivel + $modules/iso_create.py: + maintainers: Tomorrow9 + $modules/iso_customize.py: + maintainers: ZouYuhua + $modules/iso_extract.py: + maintainers: dagwieers jhoekx ribbons + $modules/jabber.py: + maintainers: bcoca + $modules/java_cert.py: + maintainers: haad absynth76 + $modules/java_keystore.py: + maintainers: Mogztter quidame + $modules/jboss.py: + labels: jboss + maintainers: $team_jboss jhoekx + $modules/jenkins_build.py: + maintainers: brettmilford unnecessary-username juanmcasanova + $modules/jenkins_build_info.py: + maintainers: juanmcasanova + $modules/jenkins_credential.py: + maintainers: YoussefKhalidAli + $modules/jenkins_job.py: + maintainers: sermilrod + $modules/jenkins_job_info.py: + maintainers: stpierre + $modules/jenkins_node.py: + maintainers: phyrwork + $modules/jenkins_plugin.py: + maintainers: jtyr + $modules/jenkins_script.py: + maintainers: hogarthj + $modules/jira.py: + ignore: DWSR tarka + labels: jira + maintainers: Slezhuk pertoft + $modules/kdeconfig.py: + maintainers: smeso + $modules/kernel_blacklist.py: + maintainers: matze + $modules/keycloak_: + maintainers: $team_keycloak + $modules/keycloak_authentication.py: + maintainers: elfelip Gaetan2907 + $modules/keycloak_authentication_required_actions.py: + maintainers: Skrekulko + $modules/keycloak_authz_authorization_scope.py: + maintainers: mattock + $modules/keycloak_authz_permission.py: + maintainers: mattock + $modules/keycloak_authz_custom_policy.py: + maintainers: mattock + $modules/keycloak_authz_permission_info.py: + maintainers: mattock + $modules/keycloak_client_rolemapping.py: + maintainers: Gaetan2907 + $modules/keycloak_clientscope.py: + maintainers: Gaetan2907 + $modules/keycloak_clientscope_type.py: + maintainers: simonpahl + $modules/keycloak_clientsecret_info.py: + maintainers: fynncfchen johncant + $modules/keycloak_clientsecret_regenerate.py: + maintainers: fynncfchen johncant + $modules/keycloak_component.py: + maintainers: fivetide + $modules/keycloak_group.py: + maintainers: adamgoossens + $modules/keycloak_identity_provider.py: + maintainers: laurpaum + $modules/keycloak_realm.py: + maintainers: kris2kris + $modules/keycloak_realm_info.py: + maintainers: fynncfchen + $modules/keycloak_realm_key.py: + maintainers: mattock + $modules/keycloak_role.py: + maintainers: laurpaum + $modules/keycloak_user.py: + maintainers: elfelip + $modules/keycloak_user_federation.py: + maintainers: laurpaum + $modules/keycloak_userprofile.py: + maintainers: yeoldegrove + $modules/keycloak_component_info.py: + maintainers: desand01 + $modules/keycloak_client_rolescope.py: + maintainers: desand01 + $modules/keycloak_user_rolemapping.py: + maintainers: bratwurzt + $modules/keycloak_realm_rolemapping.py: + maintainers: agross mhuysamen Gaetan2907 + $modules/keyring.py: + maintainers: ahussey-redhat + $modules/keyring_info.py: + maintainers: ahussey-redhat + $modules/kibana_plugin.py: + maintainers: barryib + $modules/krb_ticket.py: + maintainers: abakanovskii + $modules/launchd.py: + maintainers: martinm82 + $modules/layman.py: + maintainers: jirutka + $modules/lbu.py: + maintainers: kunkku + $modules/ldap_attrs.py: + maintainers: drybjed jtyr noles + $modules/ldap_entry.py: + maintainers: jtyr + $modules/ldap_inc.py: + maintainers: pduveau + $modules/ldap_passwd.py: + maintainers: KellerFuchs jtyr + $modules/ldap_search.py: + maintainers: eryx12o45 jtyr + $modules/librato_annotation.py: + maintainers: Sedward + $modules/linode: + maintainers: $team_linode + $modules/linode.py: + maintainers: zbal + $modules/listen_ports_facts.py: + maintainers: ndavison + $modules/lldp.py: + ignore: andyhky + labels: lldp + $modules/locale_gen.py: + maintainers: AugustusKling + $modules/logentries.py: + ignore: ivanvanderbyl + labels: logentries + $modules/logentries_msg.py: + maintainers: jcftang + $modules/logstash_plugin.py: maintainers: nerzhul - $modules/web_infrastructure/rundeck_project.py: + $modules/lvg.py: + maintainers: abulimov + $modules/lvm_pv.py: + maintainers: klention + $modules/lvm_pv_move_data.py: + maintainers: klention + $modules/lvg_rename.py: + maintainers: lszomor + $modules/lvol.py: + maintainers: abulimov jhoekx zigaSRC unkaputtbar112 + $modules/lxc_container.py: + maintainers: cloudnull + $modules/lxca_: + maintainers: navalkp prabhosa + $modules/lxd_: + ignore: hnakamur + $modules/lxd_profile.py: + maintainers: conloos + $modules/lxd_project.py: + maintainers: we10710aa + $modules/macports.py: + ignore: ryansb + keywords: brew cask darwin homebrew macosx macports osx + labels: macos macports + maintainers: $team_macos jcftang + notify: chris-short + $modules/mail.py: + maintainers: dagwieers + $modules/make.py: + maintainers: LinusU + $modules/manageiq_: + labels: manageiq + maintainers: $team_manageiq + $modules/manageiq_alert_profiles.py: + maintainers: elad661 + $modules/manageiq_alerts.py: + maintainers: elad661 + $modules/manageiq_group.py: + maintainers: evertmulder + $modules/manageiq_policies_info.py: + maintainers: russoz $team_manageiq + $modules/manageiq_tags_info.py: + maintainers: russoz $team_manageiq + $modules/manageiq_tenant.py: + maintainers: evertmulder + $modules/mas.py: + maintainers: lukasbestle mheap + $modules/matrix.py: + maintainers: jcgruenhage + $modules/mattermost.py: + maintainers: bjolivot + $modules/maven_artifact.py: + ignore: chrisisbeef + labels: maven_artifact + maintainers: tumbl3w33d turb + $modules/memset_: + ignore: glitchcrab + $modules/mksysb.py: + labels: aix mksysb + maintainers: $team_aix + $modules/modprobe.py: + ignore: stygstra + labels: modprobe + maintainers: jdauphant mattjeffery + $modules/monit.py: + labels: monit + maintainers: dstoflet brian-brazil snopoke + $modules/mqtt.py: + maintainers: jpmens + $modules/mssql_db.py: + labels: mssql_db + maintainers: vedit Jmainguy kenichi-ogawa-1988 + $modules/mssql_script.py: + labels: mssql_script + maintainers: kbudde + $modules/nagios.py: + maintainers: tbielawa tgoetheyn + $modules/netcup_dns.py: + maintainers: nbuchwitz + $modules/newrelic_deployment.py: + ignore: mcodd + $modules/nexmo.py: + maintainers: sivel + $modules/nginx_status_info.py: + maintainers: resmo + $modules/nictagadm.py: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: solaris + maintainers: $team_solaris SmithX10 + $modules/nmcli.py: + maintainers: alcamie101 + $modules/nomad_: + maintainers: chris93111 apecnascimento + $modules/nosh.py: + maintainers: tacatac + $modules/npm.py: + ignore: chrishoffman + labels: npm + maintainers: shane-walker xcambar + $modules/nsupdate.py: maintainers: nerzhul - $modules/web_infrastructure/rundeck_job_run.py: + $modules/ocapi_command.py: + maintainers: $team_wdc + $modules/ocapi_info.py: + maintainers: $team_wdc + $modules/oci_vcn.py: + maintainers: $team_oracle rohitChaware + $modules/odbc.py: + maintainers: john-westcott-iv + $modules/office_365_connector_card.py: + maintainers: marc-sensenich + $modules/ohai.py: + labels: ohai + maintainers: $team_ansible_core + ignore: mpdehaan + $modules/omapi_host.py: + maintainers: amasolov nerzhul + $modules/one_: + maintainers: $team_opennebula + $modules/one_host.py: + maintainers: rvalle + $modules/one_vnet.py: + maintainers: abakanovskii + $modules/oneandone_: + maintainers: aajdinov edevenport + $modules/onepassword_info.py: + maintainers: Rylon + $modules/oneview_: + maintainers: adriane-cardozo fgbulsoni tmiotto + $modules/oneview_datacenter_info.py: + maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr + $modules/oneview_fc_network.py: + maintainers: fgbulsoni + $modules/oneview_fcoe_network.py: + maintainers: fgbulsoni + $modules/online_: + maintainers: remyleone + $modules/open_iscsi.py: + maintainers: srvg + $modules/openbsd_pkg.py: + ignore: ryansb + keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense + labels: bsd openbsd_pkg + maintainers: $team_bsd eest + $modules/opendj_backendprop.py: + maintainers: dj-wasabi + $modules/openwrt_init.py: + maintainers: agaffney + $modules/opkg.py: + maintainers: skinp + $modules/osx_defaults.py: + keywords: brew cask darwin homebrew macosx macports osx + labels: macos osx_defaults + maintainers: $team_macos notok + notify: chris-short + $modules/ovh_: + maintainers: pascalheraud + $modules/ovh_monthly_billing.py: + maintainers: fraff + $modules/pacemaker_cluster.py: + maintainers: matbu munchtoast + $modules/pacemaker_info.py: + maintainers: munchtoast + $modules/pacemaker_resource.py: + maintainers: munchtoast + $modules/pacemaker_stonith.py: + maintainers: munchtoast + $modules/packet_: + maintainers: nurfet-becirevic t0mk + $modules/packet_device.py: + maintainers: baldwinSPC t0mk teebes + $modules/packet_sshkey.py: + maintainers: t0mk + $modules/pacman.py: + ignore: elasticdog + labels: pacman + maintainers: elasticdog indrajitr tchernomax jraby + $modules/pacman_key.py: + labels: pacman + maintainers: grawlinson + $modules/pagerduty.py: + ignore: bpennypacker + labels: pagerduty + maintainers: suprememoocow thaumos + $modules/pagerduty_alert.py: + maintainers: ApsOps xshen1 + $modules/pagerduty_change.py: + maintainers: adamvaughan + $modules/pagerduty_user.py: + maintainers: zanssa + $modules/pam_limits.py: + ignore: usawa + labels: pam_limits + maintainers: giovannisciortino + $modules/pamd.py: + maintainers: kevensen + $modules/parted.py: + maintainers: ColOfAbRiX jake2184 + $modules/pear.py: + ignore: jle64 + labels: pear + $modules/pids.py: + maintainers: saranyasridharan + $modules/pingdom.py: + maintainers: thaumos + $modules/pip_package_info.py: + maintainers: bcoca matburt maxamillion + $modules/pipx.py: + maintainers: russoz + $modules/pipx_info.py: + maintainers: russoz + $modules/pkg5: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: pkg5 solaris + maintainers: $team_solaris mavit + $modules/pkgin.py: + labels: pkgin solaris + maintainers: $team_solaris L2G jasperla szinck martinm82 + $modules/pkgng.py: + ignore: bleader + keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense + labels: bsd pkgng + maintainers: $team_bsd bleader + $modules/pkgutil.py: + labels: pkgutil solaris + maintainers: $team_solaris dermute + $modules/pmem.py: + maintainers: mizumm + $modules/pnpm.py: + ignore: chrishoffman + maintainers: aretrosen + $modules/portage.py: + ignore: sayap + labels: portage + maintainers: Tatsh wltjr + $modules/portinstall.py: + ignore: ryansb + keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense + labels: bsd portinstall + maintainers: $team_bsd berenddeboer + $modules/pritunl_: + maintainers: Lowess + $modules/pubnub_blocks.py: + maintainers: parfeon pubnub + $modules/pulp_repo.py: + maintainers: sysadmind + $modules/puppet.py: + labels: puppet + maintainers: emonty + $modules/pushbullet.py: + maintainers: willybarro + $modules/pushover.py: + maintainers: weaselkeeper wopfel + $modules/python_requirements_info.py: + ignore: ryansb + maintainers: willthames + $modules/read_csv.py: + maintainers: dagwieers + $modules/redfish_: + ignore: jose-delarosa + maintainers: $team_redfish TSKushal + $modules/redhat_subscription.py: + labels: redhat_subscription + maintainers: $team_rhsm + ignore: barnabycourt alikins kahowell + $modules/redis.py: + maintainers: slok + $modules/redis_data.py: + maintainers: paginabianca + $modules/redis_data_incr.py: + maintainers: paginabianca + $modules/redis_data_info.py: + maintainers: paginabianca + $modules/redis_info.py: + maintainers: levonet + $modules/rhevm.py: + ignore: skvidal + keywords: kvm libvirt proxmox qemu + labels: rhevm virt + maintainers: $team_virt TimothyVandenbrande + $modules/rhsm_release.py: + maintainers: seandst $team_rhsm + $modules/rhsm_repository.py: + maintainers: giovannisciortino $team_rhsm + $modules/riak.py: + maintainers: drewkerrigan jsmartin + $modules/rocketchat.py: + ignore: ramondelafuente + labels: rocketchat + maintainers: Deepakkothandan + $modules/rollbar_deployment.py: + maintainers: kavu + $modules/rpm_ostree_pkg.py: + maintainers: dustymabe Akasurde + $modules/rundeck_acl_policy.py: + maintainers: nerzhul + $modules/rundeck_job_executions_info.py: maintainers: phsmith - $modules/web_infrastructure/rundeck_job_executions_info.py: + $modules/rundeck_job_run.py: maintainers: phsmith - $modules/web_infrastructure/sophos_utm/: - maintainers: $team_e_spirit - keywords: sophos utm - $modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py: - maintainers: $team_e_spirit stearz - keywords: sophos utm - $modules/web_infrastructure/sophos_utm/utm_proxy_exception.py: - maintainers: $team_e_spirit RickS-C137 - keywords: sophos utm - $modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py: - maintainers: stearz - $modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py: - maintainers: stearz - $modules/web_infrastructure/sophos_utm/utm_network_interface_address.py: - maintainers: steamx - $modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py: - maintainers: steamx - $modules/web_infrastructure/supervisorctl.py: + $modules/rundeck_project.py: + maintainers: nerzhul + $modules/runit.py: + maintainers: jsumners + $modules/say.py: + maintainers: $team_ansible_core + ignore: mpdehaan + $modules/scaleway_: + maintainers: $team_scaleway + $modules/scaleway_compute_private_network.py: + maintainers: pastral + $modules/scaleway_container.py: + maintainers: Lunik + $modules/scaleway_container_info.py: + maintainers: Lunik + $modules/scaleway_container_namespace.py: + maintainers: Lunik + $modules/scaleway_container_namespace_info.py: + maintainers: Lunik + $modules/scaleway_container_registry.py: + maintainers: Lunik + $modules/scaleway_container_registry_info.py: + maintainers: Lunik + $modules/scaleway_database_backup.py: + maintainers: guillaume_ro_fr + $modules/scaleway_function.py: + maintainers: Lunik + $modules/scaleway_function_info.py: + maintainers: Lunik + $modules/scaleway_function_namespace.py: + maintainers: Lunik + $modules/scaleway_function_namespace_info.py: + maintainers: Lunik + $modules/scaleway_image_info.py: + maintainers: Spredzy + $modules/scaleway_ip_info.py: + maintainers: Spredzy + $modules/scaleway_organization_info.py: + maintainers: Spredzy + $modules/scaleway_private_network.py: + maintainers: pastral + $modules/scaleway_security_group.py: + maintainers: DenBeke + $modules/scaleway_security_group_info.py: + maintainers: Spredzy + $modules/scaleway_security_group_rule.py: + maintainers: DenBeke + $modules/scaleway_server_info.py: + maintainers: Spredzy + $modules/scaleway_snapshot_info.py: + maintainers: Spredzy + $modules/scaleway_volume.py: + ignore: hekonsek + labels: scaleway_volume + $modules/scaleway_volume_info.py: + maintainers: Spredzy + $modules/sefcontext.py: + maintainers: dagwieers + $modules/selinux_permissive.py: + maintainers: mscherer + $modules/selogin.py: + maintainers: bachradsusi dankeder jamescassell + $modules/sendgrid.py: + maintainers: makaimc + $modules/sensu_: + maintainers: dmsimard + $modules/sensu_check.py: + maintainers: andsens + $modules/sensu_silence.py: + maintainers: smbambling + $modules/sensu_subscription.py: + maintainers: andsens + $modules/seport.py: + maintainers: dankeder + $modules/serverless.py: + ignore: ryansb + $modules/shutdown.py: + maintainers: nitzmahone samdoran aminvakil + $modules/simpleinit_msb.py: + maintainers: vaygr + $modules/sl_vm.py: + maintainers: mcltn + $modules/slack.py: + maintainers: ramondelafuente + $modules/slackpkg.py: + maintainers: KimNorgaard + $modules/smartos_image_info.py: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: solaris + maintainers: $team_solaris + $modules/snap.py: + labels: snap + maintainers: angristan vcarceler russoz + $modules/snap_alias.py: + labels: snap + maintainers: russoz + $modules/snmp_facts.py: + maintainers: ogenstad ujwalkomarla + $modules/solaris_zone.py: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: solaris + maintainers: $team_solaris pmarkham + $modules/sorcery.py: + maintainers: vaygr + $modules/spectrum_device.py: + maintainers: orgito + $modules/spectrum_model_attrs.py: + maintainers: tgates81 + $modules/spotinst_aws_elastigroup.py: + maintainers: talzur + $modules/ss_3par_cpg.py: + maintainers: farhan7500 gautamphegde + $modules/ssh_config.py: + maintainers: gaqzi Akasurde + $modules/stacki_host.py: + labels: stacki_host + maintainers: bsanders bbyhuy + $modules/statsd.py: + maintainers: mamercad + $modules/statusio_maintenance.py: + maintainers: bhcopeland + $modules/sudoers.py: + maintainers: JonEllis + $modules/supervisorctl.py: maintainers: inetfuture mattupstate - $modules/web_infrastructure/taiga_issue.py: + $modules/svc.py: + maintainers: bcoca + $modules/svr4pkg.py: + labels: solaris svr4pkg + maintainers: $team_solaris brontitall + $modules/swdepot.py: + keywords: hp-ux + labels: hpux swdepot + maintainers: $team_hpux melodous + $modules/swupd.py: + labels: swupd + maintainers: hnanni albertomurillo + $modules/syslogger.py: + maintainers: garbled1 + $modules/syspatch.py: + maintainers: precurse + $modules/sysrc.py: + maintainers: dlundgren + $modules/systemd_creds_decrypt.py: + maintainers: konstruktoid + $modules/systemd_creds_encrypt.py: + maintainers: konstruktoid + $modules/systemd_info.py: + maintainers: NomakCooper + $modules/sysupgrade.py: + maintainers: precurse + $modules/taiga_issue.py: maintainers: lekum + $modules/telegram.py: + maintainers: tyouxa loms lomserman + $modules/terraform.py: + ignore: ryansb + maintainers: m-yosefpor rainerleber + $modules/timezone.py: + maintainers: indrajitr jasperla tmshn + $modules/twilio.py: + maintainers: makaimc + $modules/typetalk.py: + maintainers: tksmd + $modules/udm_: + maintainers: keachi + $modules/ufw.py: + labels: ufw + maintainers: ahtik ovcharenko pyykkis + notify: felixfontein + $modules/uptimerobot.py: + maintainers: nate-kingsley + $modules/urpmi.py: + maintainers: pmakowski + $modules/usb_facts.py: + maintainers: maxopoly + $modules/utm_: + keywords: sophos utm + maintainers: $team_e_spirit + $modules/utm_ca_host_key_cert.py: + ignore: stearz + maintainers: $team_e_spirit + $modules/utm_ca_host_key_cert_info.py: + ignore: stearz + maintainers: $team_e_spirit + $modules/utm_network_interface_address.py: + maintainers: steamx + $modules/utm_network_interface_address_info.py: + maintainers: steamx + $modules/utm_proxy_auth_profile.py: + keywords: sophos utm + ignore: stearz + maintainers: $team_e_spirit + $modules/utm_proxy_exception.py: + keywords: sophos utm + maintainers: $team_e_spirit RickS-C137 + $modules/vdo.py: + maintainers: rhawalsh bgurney-rh + $modules/vertica_: + maintainers: dareko + $modules/vexata_: + maintainers: vexata + $modules/vmadm.py: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: solaris + maintainers: $team_solaris + $modules/wakeonlan.py: + maintainers: dagwieers + $modules/wdc_: + ignore: jose-delarosa + maintainers: $team_redfish + $modules/wdc_redfish_command.py: + maintainers: $team_wdc + $modules/wdc_redfish_info.py: + maintainers: $team_wdc + $modules/xattr.py: + labels: xattr + maintainers: bcoca + $modules/xbps.py: + maintainers: dinoocch the-maldridge + $modules/xcc_: + maintainers: panyy3 renxulei + $modules/xdg_mime.py: + maintainers: mhalano + $modules/xenserver_: + maintainers: bvitnik + $modules/xenserver_facts.py: + ignore: andyhky ryansb + labels: xenserver_facts + maintainers: caphrim007 cheese + $modules/xfconf.py: + labels: xfconf + maintainers: russoz jbenden + $modules/xfconf_info.py: + labels: xfconf + maintainers: russoz + $modules/xfs_quota.py: + maintainers: bushvin + $modules/xml.py: + ignore: magnus919 + labels: m:xml xml + maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0 + $modules/yarn.py: + ignore: chrishoffman verkaufer + $modules/yum_versionlock.py: + maintainers: gyptazy aminvakil + $modules/zfs: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: solaris + maintainers: $team_solaris + $modules/zfs.py: + maintainers: johanwiren + $modules/zfs_delegate_admin.py: + maintainers: natefoo + $modules/znode.py: + maintainers: treyperry + $modules/zpool.py: + maintainers: tomhesse + $modules/zpool_facts: + keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + labels: solaris + maintainers: $team_solaris + $modules/zypper.py: + ignore: dirtyharrycallahan robinro + labels: zypper + maintainers: $team_suse + $modules/zypper_repository.py: + ignore: matze + labels: zypper + maintainers: $team_suse + $plugin_utils/ansible_type.py: + maintainers: vbotka + $modules/zypper_repository_info.py: + labels: zypper + maintainers: $team_suse TobiasZeuch181 + $plugin_utils/keys_filter.py: + maintainers: vbotka + $plugin_utils/unsafe.py: + maintainers: felixfontein $tests/a_module.py: maintainers: felixfontein + $tests/ansible_type.py: + maintainers: vbotka + $tests/fqdn_valid.py: + maintainers: vbotka +######################### + docs/docsite/rst/filter_guide.rst: {} + docs/docsite/rst/filter_guide_abstract_informations.rst: {} + docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst: + maintainers: keilr + docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst: + maintainers: felixfontein giner + docs/docsite/rst/filter_guide_abstract_informations_grouping.rst: + maintainers: felixfontein + docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst: + maintainers: cfiehe + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide_conversions.rst: + maintainers: Ajpantuso kellyjonbrazil + docs/docsite/rst/filter_guide_creating_identifiers.rst: + maintainers: Ajpantuso + docs/docsite/rst/filter_guide_paths.rst: {} + docs/docsite/rst/filter_guide_selecting_json_data.rst: {} + docs/docsite/rst/filter_guide_working_with_times.rst: + maintainers: resmo + docs/docsite/rst/filter_guide_working_with_unicode.rst: + maintainers: Ajpantuso + docs/docsite/rst/filter_guide_working_with_versions.rst: + maintainers: ericzolf + docs/docsite/rst/guide_alicloud.rst: + maintainers: xiaozhu36 + docs/docsite/rst/guide_cmdrunner.rst: + maintainers: russoz + docs/docsite/rst/guide_deps.rst: + maintainers: russoz + docs/docsite/rst/guide_iocage.rst: + maintainers: russoz felixfontein + docs/docsite/rst/guide_iocage_inventory.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_aliases.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_basics.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_dhcp.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_hooks.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_properties.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_tags.rst: + maintainers: vbotka + docs/docsite/rst/guide_modulehelper.rst: + maintainers: russoz + docs/docsite/rst/guide_online.rst: + maintainers: remyleone + docs/docsite/rst/guide_packet.rst: + maintainers: baldwinSPC nurfet-becirevic t0mk teebes + docs/docsite/rst/guide_scaleway.rst: + maintainers: $team_scaleway + docs/docsite/rst/guide_uthelper.rst: + maintainers: russoz + docs/docsite/rst/guide_vardict.rst: + maintainers: russoz + docs/docsite/rst/test_guide.rst: + maintainers: felixfontein ######################### tests/: labels: tests - tests/unit/: - labels: unit - support: community tests/integration: labels: integration support: community - tests/utils/: - maintainers: gundalow + tests/unit/: labels: unit + support: community + tests/utils/: + labels: unit + maintainers: gundalow macros: actions: plugins/action becomes: plugins/become caches: plugins/cache callbacks: plugins/callback - cliconfs: plugins/cliconf connections: plugins/connection doc_fragments: plugins/doc_fragments filters: plugins/filter @@ -1255,31 +1580,31 @@ macros: lookups: plugins/lookup module_utils: plugins/module_utils modules: plugins/modules - terminals: plugins/terminal + plugin_utils: plugins/plugin_utils tests: plugins/test team_ansible_core: team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo - team_consul: sgargan + team_consul: sgargan apollo13 Ilgmi team_cyberark_conjur: jvanderhoof ryanprior team_e_spirit: MatrixCrawler getjack team_flatpak: JayKayy oolongbrothers - team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii sh0shin nejch lgatellier suukit + team_gitlab: Lunik Shaps marwatk waheedi zanssa scodeman metanovii sh0shin nejch lgatellier suukit team_hpux: bcoca davx8342 team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2 - team_ipa: Akasurde Nosmoht fxfitz justchris1 + team_ipa: Akasurde Nosmoht justchris1 team_jboss: Wolfant jairojunior wbrefvem - team_keycloak: eikef ndclt + team_keycloak: eikef ndclt mattock thomasbach-dev team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding team_oracle: manojmeda mross22 nalsaber - team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16 - team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06 - team_rhn: FlossWare alikins barnabycourt vritant + team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06 jyundt + team_rhsm: cnsnyder ptoscano team_scaleway: remyleone abarbare team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l - team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom sealor - team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso + team_suse: commel evrardjp lrupp AnderEnder alxgu andytom sealor + team_virt: joshainglis karmab Thulium-Drake Ajpantuso + team_wdc: mikemoerk diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index d640b9aae4..4b1c1bfb95 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -1,149 +1,153 @@ --- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + name: Bug report description: Create a report to help us improve body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - Also test if the latest release and devel branch are affected too. - *Complete **all** sections as described, this form is processed automatically.* + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Also test if the latest release and devel branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* - [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues + [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues -- type: textarea - attributes: - label: Summary - description: Explain the problem briefly below. - placeholder: >- - When I try to do X with the collection from the main branch on GitHub, Y - breaks in a way Z under the env E. Here are all the details I know - about this problem... - validations: - required: true - -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - Bug Report - validations: - required: true - -- type: textarea - attributes: - # For smaller collections we could use a multi-select and hardcode the list - # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins - # Select from list, filter as you type (`mysql` would only show the 3 mysql components) - # OR freeform - doesn't seem to be supported in adaptivecards - label: Component Name - description: >- - Write the short name of the module, plugin, task or feature below, - *use your best guess if unsure*. - placeholder: dnf, apt, yum, pip, user etc. - validations: - required: true - -- type: textarea - attributes: - label: Ansible Version - description: >- - Paste verbatim output from `ansible --version` between - tripple backticks. - value: | - ```console (paste below) - $ ansible --version - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Community.general Version - description: >- - Paste verbatim output from "ansible-galaxy collection list community.general" - between tripple backticks. - value: | - ```console (paste below) - $ ansible-galaxy collection list community.general - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Configuration - description: >- - If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. - This can be a piece of YAML from, e.g., an automation, script, scene or configuration. - Paste verbatim output from `ansible-config dump --only-changed` between quotes - value: | - ```console (paste below) - $ ansible-config dump --only-changed - - ``` - - -- type: textarea - attributes: - label: OS / Environment - description: >- - Provide all relevant information below, e.g. target OS versions, - network device firmware, etc. - placeholder: RHEL 8, CentOS Stream etc. - validations: - required: false - - -- type: textarea - attributes: - label: Steps to Reproduce - description: | - Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used. - - **HINT:** You can paste https://gist.github.com links for larger files. - value: | - - ```yaml (paste below) - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Expected Results - description: >- - Describe what you expected to happen when running the steps above. - placeholder: >- - I expected X to happen because I assumed Y. - that it did not. - validations: - required: true - -- type: textarea - attributes: - label: Actual Results - description: | - Describe what actually happened. If possible run with extra verbosity (`-vvvv`). - - Paste verbatim command output between quotes. - value: | - ```console (paste below) - - ``` -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct + - type: textarea + attributes: + label: Summary + description: Explain the problem briefly below. + placeholder: >- + When I try to do X with the collection from the main branch on GitHub, Y + breaks in a way Z under the env E. Here are all the details I know + about this problem... + validations: required: true + + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Bug Report + validations: + required: true + + - type: textarea + attributes: + # For smaller collections we could use a multi-select and hardcode the list + # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins + # Select from list, filter as you type (`mysql` would only show the 3 mysql components) + # OR freeform - doesn't seem to be supported in adaptivecards + label: Component Name + description: >- + Write the short name of the module, plugin, task or feature below, + *use your best guess if unsure*. Do not include `community.general.`! + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + + - type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` between + tripple backticks. + value: | + ```console (paste below) + $ ansible --version + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Community.general Version + description: >- + Paste verbatim output from "ansible-galaxy collection list community.general" + between tripple backticks. + value: | + ```console (paste below) + $ ansible-galaxy collection list community.general + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Configuration + description: >- + If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. + This can be a piece of YAML from, e.g., an automation, script, scene or configuration. + Paste verbatim output from `ansible-config dump --only-changed` between quotes + value: | + ```console (paste below) + $ ansible-config dump --only-changed + + ``` + + + - type: textarea + attributes: + label: OS / Environment + description: >- + Provide all relevant information below, e.g. target OS versions, + network device firmware, etc. + placeholder: RHEL 8, CentOS Stream etc. + validations: + required: false + + + - type: textarea + attributes: + label: Steps to Reproduce + description: | + Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also passed any playbooks, configs and commands you used. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Expected Results + description: >- + Describe what you expected to happen when running the steps above. + placeholder: >- + I expected X to happen because I assumed Y. + that it did not. + validations: + required: true + + - type: textarea + attributes: + label: Actual Results + description: | + Describe what actually happened. If possible run with extra verbosity (`-vvvv`). + + Paste verbatim command output between quotes. + value: | + ```console (paste below) + + ``` + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true ... diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index f90bd1ad86..476eed516e 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,27 +1,31 @@ --- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser blank_issues_enabled: false # default: true contact_links: -- name: Security bug report - url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: | - Please learn how to report security vulnerabilities here. + - name: Security bug report + url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: | + Please learn how to report security vulnerabilities here. - For all security related bugs, email security@ansible.com - instead of using this issue tracker and you will receive - a prompt response. + For all security related bugs, email security@ansible.com + instead of using this issue tracker and you will receive + a prompt response. - For more information, see - https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html -- name: Ansible Code of Conduct - url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: Be nice to other members of the community. -- name: Talks to the community - url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information - about: Please ask and answer usage questions here -- name: Working groups - url: https://github.com/ansible/community/wiki - about: Interested in improving a specific area? Become a part of a working group! -- name: For Enterprise - url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: Red Hat offers support for the Ansible Automation Platform + For more information, see + https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html + - name: Ansible Code of Conduct + url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: Be nice to other members of the community. + - name: Talks to the community + url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information + about: Please ask and answer usage questions here + - name: Working groups + url: https://github.com/ansible/community/wiki + about: Interested in improving a specific area? Become a part of a working group! + - name: For Enterprise + url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: Red Hat offers support for the Ansible Automation Platform diff --git a/.github/ISSUE_TEMPLATE/documentation_report.yml b/.github/ISSUE_TEMPLATE/documentation_report.yml index cd88343d06..2ad4bce44a 100644 --- a/.github/ISSUE_TEMPLATE/documentation_report.yml +++ b/.github/ISSUE_TEMPLATE/documentation_report.yml @@ -1,125 +1,129 @@ --- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + name: Documentation Report description: Ask us about docs # NOTE: issue body is enabled to allow screenshots body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - Also test if the latest release and devel branch are affected too. - *Complete **all** sections as described, this form is processed automatically.* + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Also test if the latest release and devel branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* - [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues + [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues -- type: textarea - attributes: - label: Summary - description: | - Explain the problem briefly below, add suggestions to wording or structure. + - type: textarea + attributes: + label: Summary + description: | + Explain the problem briefly below, add suggestions to wording or structure. - **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page? - placeholder: >- - I was reading the Collection documentation of version X and I'm having - problems understanding Y. It would be very helpful if that got - rephrased as Z. - validations: - required: true - -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - Documentation Report - validations: - required: true - -- type: input - attributes: - label: Component Name - description: >- - Write the short name of the rst file, module, plugin, task or - feature below, *use your best guess if unsure*. - placeholder: mysql_user - validations: - required: true - -- type: textarea - attributes: - label: Ansible Version - description: >- - Paste verbatim output from `ansible --version` between - tripple backticks. - value: | - ```console (paste below) - $ ansible --version - - ``` - validations: - required: false - -- type: textarea - attributes: - label: Community.general Version - description: >- - Paste verbatim output from "ansible-galaxy collection list community.general" - between tripple backticks. - value: | - ```console (paste below) - $ ansible-galaxy collection list community.general - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Configuration - description: >- - Paste verbatim output from `ansible-config dump --only-changed` between quotes. - value: | - ```console (paste below) - $ ansible-config dump --only-changed - - ``` - validations: - required: false - -- type: textarea - attributes: - label: OS / Environment - description: >- - Provide all relevant information below, e.g. OS version, - browser, etc. - placeholder: Fedora 33, Firefox etc. - validations: - required: false - -- type: textarea - attributes: - label: Additional Information - description: | - Describe how this improves the documentation, e.g. before/after situation or screenshots. - - **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them. - - **HINT:** You can paste https://gist.github.com links for larger files. - placeholder: >- - When the improvement is applied, it makes it more straightforward - to understand X. - validations: - required: false - -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct + **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page? + placeholder: >- + I was reading the Collection documentation of version X and I'm having + problems understanding Y. It would be very helpful if that got + rephrased as Z. + validations: required: true + + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Documentation Report + validations: + required: true + + - type: input + attributes: + label: Component Name + description: >- + Write the short name of the file, module, plugin, task or feature below, + *use your best guess if unsure*. Do not include `community.general.`! + placeholder: mysql_user + validations: + required: true + + - type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` between + tripple backticks. + value: | + ```console (paste below) + $ ansible --version + + ``` + validations: + required: false + + - type: textarea + attributes: + label: Community.general Version + description: >- + Paste verbatim output from "ansible-galaxy collection list community.general" + between tripple backticks. + value: | + ```console (paste below) + $ ansible-galaxy collection list community.general + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Configuration + description: >- + Paste verbatim output from `ansible-config dump --only-changed` between quotes. + value: | + ```console (paste below) + $ ansible-config dump --only-changed + + ``` + validations: + required: false + + - type: textarea + attributes: + label: OS / Environment + description: >- + Provide all relevant information below, e.g. OS version, + browser, etc. + placeholder: Fedora 33, Firefox etc. + validations: + required: false + + - type: textarea + attributes: + label: Additional Information + description: | + Describe how this improves the documentation, e.g. before/after situation or screenshots. + + **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them. + + **HINT:** You can paste https://gist.github.com links for larger files. + placeholder: >- + When the improvement is applied, it makes it more straightforward + to understand X. + validations: + required: false + + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true ... diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index e676ff25ef..dc62f94c5c 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -1,69 +1,73 @@ --- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + name: Feature request description: Suggest an idea for this project body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - Also test if the latest release and devel branch are affected too. - *Complete **all** sections as described, this form is processed automatically.* + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Also test if the latest release and devel branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* - [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues + [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues -- type: textarea - attributes: - label: Summary - description: Describe the new feature/improvement briefly below. - placeholder: >- - I am trying to do X with the collection from the main branch on GitHub and - I think that implementing a feature Y would be very helpful for me and - every other user of community.general because of Z. - validations: - required: true - -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - Feature Idea - validations: - required: true - -- type: input - attributes: - label: Component Name - description: >- - Write the short name of the module, plugin, task or feature below, - *use your best guess if unsure*. - placeholder: dnf, apt, yum, pip, user etc. - validations: - required: true - -- type: textarea - attributes: - label: Additional Information - description: | - Describe how the feature would be used, why it is needed and what it would solve. - - **HINT:** You can paste https://gist.github.com links for larger files. - value: | - - ```yaml (paste below) - - ``` - validations: - required: false -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct + - type: textarea + attributes: + label: Summary + description: Describe the new feature/improvement briefly below. + placeholder: >- + I am trying to do X with the collection from the main branch on GitHub and + I think that implementing a feature Y would be very helpful for me and + every other user of community.general because of Z. + validations: required: true + + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Feature Idea + validations: + required: true + + - type: input + attributes: + label: Component Name + description: >- + Write the short name of the module or plugin, or which other part(s) of the collection this feature affects. + *use your best guess if unsure*. Do not include `community.general.`! + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + + - type: textarea + attributes: + label: Additional Information + description: | + Describe how the feature would be used, why it is needed and what it would solve. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + validations: + required: false + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true ... diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 23c4cb3b50..f71b322d2a 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,7 +1,15 @@ --- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly" + groups: + ci: + patterns: + - "*" diff --git a/.github/patchback.yml b/.github/patchback.yml index 33ad6e84a6..5ee7812edb 100644 --- a/.github/patchback.yml +++ b/.github/patchback.yml @@ -1,4 +1,8 @@ --- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + backport_branch_prefix: patchback/backports/ backport_label_prefix: backport- target_branch_prefix: stable- diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..29a2d2e36a --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,32 @@ +##### SUMMARY + + + + + + +##### ISSUE TYPE + +- Bugfix Pull Request +- Docs Pull Request +- Feature Pull Request +- New Module/Plugin Pull Request +- Refactoring Pull Request +- Test Pull Request + +##### COMPONENT NAME + + +##### ADDITIONAL INFORMATION + + + + +```paste below + +``` diff --git a/.github/pull_request_template.md.license b/.github/pull_request_template.md.license new file mode 100644 index 0000000000..a1390a69ed --- /dev/null +++ b/.github/pull_request_template.md.license @@ -0,0 +1,3 @@ +Copyright (c) Ansible Project +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later diff --git a/.github/settings.yml b/.github/settings.yml index 8a5b8d32f2..3e8a5f9ad8 100644 --- a/.github/settings.yml +++ b/.github/settings.yml @@ -1,3 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + # DO NOT MODIFY # Settings: https://probot.github.io/apps/settings/ diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml new file mode 100644 index 0000000000..616c7a843c --- /dev/null +++ b/.github/workflows/ansible-test.yml @@ -0,0 +1,176 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# For the comprehensive list of the inputs supported by the ansible-community/ansible-test-gh-action GitHub Action, see +# https://github.com/marketplace/actions/ansible-test + +name: EOL CI +"on": + # Run EOL CI against all pushes (direct commits, also merged PRs), Pull Requests + push: + branches: + - main + - stable-* + pull_request: + # Run EOL CI once per day (at 08:00 UTC) + schedule: + - cron: '0 8 * * *' + +concurrency: + # Make sure there is at most one active run per PR, but do not cancel any non-PR runs + group: ${{ github.workflow }}-${{ (github.head_ref && github.event.number) || github.run_id }} + cancel-in-progress: true + +jobs: + sanity: + name: EOL Sanity (Ⓐ${{ matrix.ansible }}) + strategy: + matrix: + ansible: + - '2.17' + runs-on: ubuntu-latest + steps: + - name: Perform sanity testing + uses: felixfontein/ansible-test-gh-action@main + with: + ansible-core-version: stable-${{ matrix.ansible }} + codecov-token: ${{ secrets.CODECOV_TOKEN }} + coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }} + pull-request-change-detection: 'true' + testing-type: sanity + pre-test-cmd: >- + git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools + + units: + runs-on: ubuntu-latest + name: EOL Units (Ⓐ${{ matrix.ansible }}+py${{ matrix.python }}) + strategy: + # As soon as the first unit test fails, cancel the others to free up the CI queue + fail-fast: true + matrix: + ansible: + - '' + python: + - '' + exclude: + - ansible: '' + include: + - ansible: '2.17' + python: '3.7' + - ansible: '2.17' + python: '3.10' + - ansible: '2.17' + python: '3.12' + + steps: + - name: >- + Perform unit testing against + Ansible version ${{ matrix.ansible }} + uses: felixfontein/ansible-test-gh-action@main + with: + ansible-core-version: stable-${{ matrix.ansible }} + codecov-token: ${{ secrets.CODECOV_TOKEN }} + coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }} + pre-test-cmd: >- + mkdir -p ../../ansible + ; + git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools + pull-request-change-detection: 'true' + target-python-version: ${{ matrix.python }} + testing-type: units + + integration: + runs-on: ubuntu-latest + name: EOL I (Ⓐ${{ matrix.ansible }}+${{ matrix.docker }}+py${{ matrix.python }}:${{ matrix.target }}) + strategy: + fail-fast: false + matrix: + ansible: + - '' + docker: + - '' + python: + - '' + target: + - '' + exclude: + - ansible: '' + include: + # 2.17 + - ansible: '2.17' + docker: fedora39 + python: '' + target: azp/posix/1/ + - ansible: '2.17' + docker: fedora39 + python: '' + target: azp/posix/2/ + - ansible: '2.17' + docker: fedora39 + python: '' + target: azp/posix/3/ + - ansible: '2.17' + docker: ubuntu2004 + python: '' + target: azp/posix/1/ + - ansible: '2.17' + docker: ubuntu2004 + python: '' + target: azp/posix/2/ + - ansible: '2.17' + docker: ubuntu2004 + python: '' + target: azp/posix/3/ + - ansible: '2.17' + docker: alpine319 + python: '' + target: azp/posix/1/ + - ansible: '2.17' + docker: alpine319 + python: '' + target: azp/posix/2/ + - ansible: '2.17' + docker: alpine319 + python: '' + target: azp/posix/3/ + # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. + # - ansible: '2.17' + # docker: default + # python: '3.7' + # target: azp/generic/1/ + # - ansible: '2.17' + # docker: default + # python: '3.12' + # target: azp/generic/1/ + + steps: + - name: >- + Perform integration testing against + Ansible version ${{ matrix.ansible }} + under Python ${{ matrix.python }} + uses: felixfontein/ansible-test-gh-action@main + with: + ansible-core-version: stable-${{ matrix.ansible }} + codecov-token: ${{ secrets.CODECOV_TOKEN }} + coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }} + docker-image: ${{ matrix.docker }} + integration-continue-on-error: 'false' + integration-diff: 'false' + integration-retry-on-error: 'true' + # TODO: remove "--branch stable-2" from community.crypto install once we're only using ansible-core 2.17 or newer! + pre-test-cmd: >- + mkdir -p ../../ansible + ; + git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git ../../ansible/posix + ; + git clone --depth=1 --single-branch --branch stable-2 https://github.com/ansible-collections/community.crypto.git ../../community/crypto + ; + git clone --depth=1 --single-branch https://github.com/ansible-collections/community.docker.git ../../community/docker + ; + git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools + pull-request-change-detection: 'true' + target: ${{ matrix.target }} + target-python-version: ${{ matrix.python }} + testing-type: integration diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index dfaf617752..3c6776929d 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -1,8 +1,14 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + name: "Code scanning - action" -on: +"on": schedule: - cron: '26 19 * * 1' + workflow_dispatch: permissions: contents: read @@ -17,40 +23,16 @@ jobs: runs-on: ubuntu-latest steps: - - name: Checkout repository - uses: actions/checkout@v3 - with: - # We must fetch at least the immediate parents so that if this is - # a pull request then we can checkout the head. - fetch-depth: 2 + - name: Checkout repository + uses: actions/checkout@v5 + with: + persist-credentials: false - # If this run was triggered by a pull request event, then checkout - # the head of the pull request instead of the merge commit. - - run: git checkout HEAD^2 - if: ${{ github.event_name == 'pull_request' }} + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v4 + with: + languages: python - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v2 - # Override language selection by uncommenting this and choosing your languages - # with: - # languages: go, javascript, csharp, python, cpp, java - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v2 - - # ℹ️ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl - - # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language - - #- run: | - # make bootstrap - # make release - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v4 diff --git a/.github/workflows/nox.yml b/.github/workflows/nox.yml new file mode 100644 index 0000000000..81c6563811 --- /dev/null +++ b/.github/workflows/nox.yml @@ -0,0 +1,28 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +name: nox +'on': + push: + branches: + - main + - stable-* + pull_request: + # Run CI once per day (at 08:00 UTC) + schedule: + - cron: '0 8 * * *' + workflow_dispatch: + +jobs: + nox: + runs-on: ubuntu-latest + name: "Run extra sanity tests" + steps: + - name: Check out collection + uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Run nox + uses: ansible-community/antsibull-nox@main diff --git a/.gitignore b/.gitignore index b95546f623..e427699798 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Created by https://www.toptal.com/developers/gitignore/api/vim,git,macos,linux,pydev,emacs,dotenv,python,windows,webstorm,pycharm+all,jupyternotebooks # Edit at https://www.toptal.com/developers/gitignore?templates=vim,git,macos,linux,pydev,emacs,dotenv,python,windows,webstorm,pycharm+all,jupyternotebooks @@ -380,6 +383,16 @@ cython_debug/ # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ +### Python Patch ### +# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration +poetry.toml + +# ruff +.ruff_cache/ + +# LSP config files +pyrightconfig.json + ### Vim ### # Swap [._]*.s[a-v][a-z] @@ -479,6 +492,10 @@ tags # https://plugins.jetbrains.com/plugin/12206-codestream .idea/codestream.xml +# Azure Toolkit for IntelliJ plugin +# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij +.idea/**/azureSettings.xml + ### Windows ### # Windows thumbnail cache files Thumbs.db @@ -505,4 +522,12 @@ $RECYCLE.BIN/ # Windows shortcuts *.lnk -# End of https://www.toptal.com/developers/gitignore/api/vim,git,macos,linux,pydev,emacs,dotenv,python,windows,webstorm,pycharm+all,jupyternotebooks \ No newline at end of file +# End of https://www.toptal.com/developers/gitignore/api/vim,git,macos,linux,pydev,emacs,dotenv,python,windows,webstorm,pycharm+all,jupyternotebooks + +# Integration tests cloud configs +tests/integration/cloud-config-*.ini + + +# VSCode specific extensions +.vscode/settings.json +.ansible diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 0342e8054a..0000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.0.1 - hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: mixed-line-ending - args: [--fix=lf] - - id: fix-encoding-pragma - - id: check-ast - - id: check-merge-conflict - - id: check-symlinks - - repo: https://github.com/pre-commit/pygrep-hooks - rev: v1.9.0 - hooks: - - id: rst-backticks - types: [file] - files: changelogs/fragments/.*\.(yml|yaml)$ diff --git a/.yamllint b/.yamllint new file mode 100644 index 0000000000..c10d86ab19 --- /dev/null +++ b/.yamllint @@ -0,0 +1,52 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +extends: default + +ignore: | + /changelogs/ + +rules: + line-length: + max: 1000 + level: error + document-start: disable + document-end: disable + truthy: + level: error + allowed-values: + - 'true' + - 'false' + indentation: + spaces: 2 + indent-sequences: true + key-duplicates: enable + trailing-spaces: enable + new-line-at-end-of-file: disable + hyphens: + max-spaces-after: 1 + empty-lines: + max: 2 + max-start: 0 + max-end: 0 + commas: + max-spaces-before: 0 + min-spaces-after: 1 + max-spaces-after: 1 + colons: + max-spaces-before: 0 + max-spaces-after: 1 + brackets: + min-spaces-inside: 0 + max-spaces-inside: 0 + braces: + min-spaces-inside: 0 + max-spaces-inside: 1 + octal-values: + forbid-implicit-octal: true + forbid-explicit-octal: true + comments: + min-spaces-from-content: 1 + comments-indentation: false diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..b35c52441b --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,5 @@ +# Placeholder changelog + +This file is a placeholder; a version-specific `CHANGELOG-vX.md` will be generated during releases from fragments +under `changelogs/fragments`. On release branches once a release has been created, consult the branch's version-specific +file for changes that have occurred in that branch. diff --git a/CHANGELOG.md.license b/CHANGELOG.md.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/CHANGELOG.md.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f9538c247a..119e04e170 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,352 +1,6 @@ -=============================== -Community General Release Notes -=============================== +Placeholder changelog +===================== -.. contents:: Topics - -This changelog describes changes after version 4.0.0. - -v5.0.1 -====== - -Release Summary ---------------- - -Regular bugfix release for inclusion in Ansible 6.0.0. - -Minor Changes -------------- - -- cpanm - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674). -- mksysb - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674). -- pipx - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674). -- snap - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674). -- xfconf - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674). - -Bugfixes --------- - -- consul - fixed bug introduced in PR 4590 (https://github.com/ansible-collections/community.general/issues/4680). -- filesystem - handle ``fatresize --info`` output lines without ``:`` (https://github.com/ansible-collections/community.general/pull/4700). -- filesystem - improve error messages when output cannot be parsed by including newlines in escaped form (https://github.com/ansible-collections/community.general/pull/4700). -- keycloak_realm - fix default groups and roles (https://github.com/ansible-collections/community.general/issues/4241). -- redis* modules - fix call to ``module.fail_json`` when failing because of missing Python libraries (https://github.com/ansible-collections/community.general/pull/4733). -- xcc_redfish_command - for compatibility due to Redfish spec changes the virtualMedia resource location changed from Manager to System (https://github.com/ansible-collections/community.general/pull/4682). -- zfs - fix wrong quoting of properties (https://github.com/ansible-collections/community.general/issues/4707, https://github.com/ansible-collections/community.general/pull/4726). - -v5.0.0 -====== - -Release Summary ---------------- - -This is release 5.0.0 of ``community.general``, released on 2022-05-17. - -Major Changes -------------- - -- The community.general collection no longer supports Ansible 2.9 and ansible-base 2.10. While we take no active measures to prevent usage, we will remove a lot of compatibility code and other compatility measures that will effectively prevent using most content from this collection with Ansible 2.9, and some content of this collection with ansible-base 2.10. Both Ansible 2.9 and ansible-base 2.10 will very soon be End of Life and if you are still using them, you should consider upgrading to ansible-core 2.11 or later as soon as possible (https://github.com/ansible-collections/community.general/pull/4548). - -Minor Changes -------------- - -- Avoid internal ansible-core module_utils in favor of equivalent public API available since at least Ansible 2.9. This fixes some instances added since the last time this was fixed (https://github.com/ansible-collections/community.general/pull/4232). -- ModuleHelper module utils - ``ModuleHelperBase` now delegates the attributes ``check_mode``, ``get_bin_path``, ``warn``, and ``deprecate`` to the underlying ``AnsibleModule`` instance (https://github.com/ansible-collections/community.general/pull/4600). -- ModuleHelper module utils - ``ModuleHelperBase`` now has a convenience method ``do_raise`` (https://github.com/ansible-collections/community.general/pull/4660). -- Remove vendored copy of ``distutils.version`` in favor of vendored copy included with ansible-core 2.12+. For ansible-core 2.11, uses ``distutils.version`` for Python < 3.12. There is no support for ansible-core 2.11 with Python 3.12+ (https://github.com/ansible-collections/community.general/pull/3988). -- aix_filesystem - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3833). -- aix_lvg - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3834). -- alternatives - add ``state`` parameter, which provides control over whether the alternative should be set as the active selection for its alternatives group (https://github.com/ansible-collections/community.general/issues/4543, https://github.com/ansible-collections/community.general/pull/4557). -- ansible_galaxy_install - added option ``no_deps`` to the module (https://github.com/ansible-collections/community.general/issues/4174). -- atomic_container - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). -- clc_alert_policy - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556). -- clc_group - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556). -- clc_loadbalancer - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556). -- clc_server - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556). -- cmd_runner module util - reusable command runner with consistent argument formatting and sensible defaults (https://github.com/ansible-collections/community.general/pull/4476). -- cobbler inventory plugin - add ``include_profiles`` option (https://github.com/ansible-collections/community.general/pull/4068). -- datadog_monitor - support new datadog event monitor of type `event-v2 alert` (https://github.com/ansible-collections/community.general/pull/4457) -- filesystem - add support for resizing btrfs (https://github.com/ansible-collections/community.general/issues/4465). -- gitlab - add more token authentication support with the new options ``api_oauth_token`` and ``api_job_token`` (https://github.com/ansible-collections/community.general/issues/705). -- gitlab - clean up modules and utils (https://github.com/ansible-collections/community.general/pull/3694). -- gitlab_group, gitlab_project - add new option ``avatar_path`` (https://github.com/ansible-collections/community.general/pull/3792). -- gitlab_group_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/pull/4038 and https://github.com/ansible-collections/community.general/issues/4074). -- gitlab_project - add new option ``default_branch`` to gitlab_project (if ``readme = true``) (https://github.com/ansible-collections/community.general/pull/3792). -- gitlab_project_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/issues/4038). -- hponcfg - revamped module using ModuleHelper (https://github.com/ansible-collections/community.general/pull/3840). -- icinga2 inventory plugin - added the ``display_name`` field to variables (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906). -- icinga2 inventory plugin - implemented constructed interface (https://github.com/ansible-collections/community.general/pull/4088). -- icinga2 inventory plugin - inventory object names are changable using ``inventory_attr`` in your config file to the host object name, address, or display_name fields (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906). -- ip_netns - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3822). -- ipa_dnsrecord - add new argument ``record_values``, mutually exclusive to ``record_value``, which supports multiple values for one record (https://github.com/ansible-collections/community.general/pull/4578). -- ipa_dnszone - ``dynamicupdate`` is now a boolean parameter, instead of a string parameter accepting ``"true"`` and ``"false"``. Also the module is now idempotent with respect to ``dynamicupdate`` (https://github.com/ansible-collections/community.general/pull/3374). -- ipa_dnszone - add DNS zone synchronization support (https://github.com/ansible-collections/community.general/pull/3374). -- ipa_service - add ``skip_host_check`` parameter. (https://github.com/ansible-collections/community.general/pull/4417). -- ipmi_boot - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698). -- ipmi_power - add ``machine`` option to ensure the power state via the remote target address (https://github.com/ansible-collections/community.general/pull/3968). -- ipmi_power - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698). -- iso_extract - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3805). -- java_cert - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3835). -- jira - add support for Bearer token auth (https://github.com/ansible-collections/community.general/pull/3838). -- jira - when creating a comment, ``fields`` now is used for additional data (https://github.com/ansible-collections/community.general/pull/4304). -- keycloak_* modules - added connection timeout parameter when calling server (https://github.com/ansible-collections/community.general/pull/4168). -- keycloak_client - add ``always_display_in_console`` parameter (https://github.com/ansible-collections/community.general/issues/4390). -- keycloak_client - add ``default_client_scopes`` and ``optional_client_scopes`` parameters. (https://github.com/ansible-collections/community.general/pull/4385). -- keycloak_user_federation - add sssd user federation support (https://github.com/ansible-collections/community.general/issues/3767). -- ldap_entry - add support for recursive deletion (https://github.com/ansible-collections/community.general/issues/3613). -- linode inventory plugin - add support for caching inventory results (https://github.com/ansible-collections/community.general/pull/4179). -- linode inventory plugin - allow templating of ``access_token`` variable in Linode inventory plugin (https://github.com/ansible-collections/community.general/pull/4040). -- listen_ports_facts - add support for ``ss`` command besides ``netstat`` (https://github.com/ansible-collections/community.general/pull/3708). -- lists_mergeby filter plugin - add parameters ``list_merge`` and ``recursive``. These are only supported when used with ansible-base 2.10 or ansible-core, but not with Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/4058). -- logentries - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3807). -- logstash_plugin - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3808). -- lxc_container - added ``wait_for_container`` parameter. If ``true`` the module will wait until the running task reports success as the status (https://github.com/ansible-collections/community.general/pull/4039). -- lxc_container - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3851). -- lxd connection plugin - make sure that ``ansible_lxd_host``, ``ansible_executable``, and ``ansible_lxd_executable`` work (https://github.com/ansible-collections/community.general/pull/3798). -- lxd inventory plugin - support virtual machines (https://github.com/ansible-collections/community.general/pull/3519). -- lxd_container - adds ``project`` option to allow selecting project for LXD instance (https://github.com/ansible-collections/community.general/pull/4479). -- lxd_container - adds ``type`` option which also allows to operate on virtual machines and not just containers (https://github.com/ansible-collections/community.general/pull/3661). -- lxd_profile - adds ``project`` option to allow selecting project for LXD profile (https://github.com/ansible-collections/community.general/pull/4479). -- mail callback plugin - add ``Message-ID`` and ``Date`` headers (https://github.com/ansible-collections/community.general/issues/4055, https://github.com/ansible-collections/community.general/pull/4056). -- mail callback plugin - properly use Ansible's option handling to split lists (https://github.com/ansible-collections/community.general/pull/4140). -- mattermost - add the possibility to send attachments instead of text messages (https://github.com/ansible-collections/community.general/pull/3946). -- mksysb - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3295). -- module_helper module utils - added decorators ``check_mode_skip`` and ``check_mode_skip_returns`` for skipping methods when ``check_mode=True`` (https://github.com/ansible-collections/community.general/pull/3849). -- monit - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3821). -- nmap inventory plugin - add ``sudo`` option in plugin in order to execute ``sudo nmap`` so that ``nmap`` runs with elevated privileges (https://github.com/ansible-collections/community.general/pull/4506). -- nmcli - add ``wireguard`` connection type (https://github.com/ansible-collections/community.general/pull/3985). -- nmcli - add missing connection aliases ``802-3-ethernet`` and ``802-11-wireless`` (https://github.com/ansible-collections/community.general/pull/4108). -- nmcli - add multiple addresses support for ``ip4`` parameter (https://github.com/ansible-collections/community.general/issues/1088, https://github.com/ansible-collections/community.general/pull/3738). -- nmcli - add multiple addresses support for ``ip6`` parameter (https://github.com/ansible-collections/community.general/issues/1088). -- nmcli - add support for ``eui64`` and ``ipv6privacy`` parameters (https://github.com/ansible-collections/community.general/issues/3357). -- nmcli - adds ``routes6`` and ``route_metric6`` parameters for supporting IPv6 routes (https://github.com/ansible-collections/community.general/issues/4059). -- nmcli - remove nmcli modify dependency on ``type`` parameter (https://github.com/ansible-collections/community.general/issues/2858). -- nomad_job - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). -- nomad_job_info - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). -- npm - add ability to use ``production`` flag when ``ci`` is set (https://github.com/ansible-collections/community.general/pull/4299). -- open_iscsi - extended module to allow rescanning of established session for one or all targets (https://github.com/ansible-collections/community.general/issues/3763). -- opennebula - add the release action for VMs in the ``HOLD`` state (https://github.com/ansible-collections/community.general/pull/4036). -- opentelemetry_plugin - enrich service when using the ``docker_login`` (https://github.com/ansible-collections/community.general/pull/4104). -- opentelemetry_plugin - enrich service when using the ``jenkins``, ``hetzner`` or ``jira`` modules (https://github.com/ansible-collections/community.general/pull/4105). -- packet_device - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). -- packet_sshkey - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). -- packet_volume - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). -- pacman - add ``remove_nosave`` parameter to avoid saving modified configuration files as ``.pacsave`` files. (https://github.com/ansible-collections/community.general/pull/4316, https://github.com/ansible-collections/community.general/issues/4315). -- pacman - add ``stdout`` and ``stderr`` as return values (https://github.com/ansible-collections/community.general/pull/3758). -- pacman - now implements proper change detection for ``update_cache=true``. Adds ``cache_updated`` return value to when ``update_cache=true`` to report this result independently of the module's overall changed return value (https://github.com/ansible-collections/community.general/pull/4337). -- pacman - the module has been rewritten and is now much faster when using ``state=latest``. Operations are now done all packages at once instead of package per package and the configured output format of ``pacman`` no longer affect the module's operation. (https://github.com/ansible-collections/community.general/pull/3907, https://github.com/ansible-collections/community.general/issues/3783, https://github.com/ansible-collections/community.general/issues/4079) -- passwordstore lookup plugin - add configurable ``lock`` and ``locktimeout`` options to avoid race conditions in itself and in the ``pass`` utility it calls. By default, the plugin now locks on write operations (https://github.com/ansible-collections/community.general/pull/4194). -- pipx - added options ``editable`` and ``pip_args`` (https://github.com/ansible-collections/community.general/issues/4300). -- pritunl_user - add ``mac_addresses`` parameter (https://github.com/ansible-collections/community.general/pull/4535). -- profitbricks - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). -- proxmox - add ``clone`` parameter (https://github.com/ansible-collections/community.general/pull/3930). -- proxmox - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). -- proxmox inventory plugin - add support for client-side jinja filters (https://github.com/ansible-collections/community.general/issues/3553). -- proxmox inventory plugin - add support for templating the ``url``, ``user``, and ``password`` options (https://github.com/ansible-collections/community.general/pull/4418). -- proxmox inventory plugin - add token authentication as an alternative to username/password (https://github.com/ansible-collections/community.general/pull/4540). -- proxmox inventory plugin - parse LXC configs returned by the proxmox API (https://github.com/ansible-collections/community.general/pull/4472). -- proxmox modules - move ``HAS_PROXMOXER`` check into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4030). -- proxmox modules - move common code into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4029). -- proxmox_kvm - added EFI disk support when creating VM with OVMF UEFI BIOS with new ``efidisk0`` option (https://github.com/ansible-collections/community.general/pull/4106, https://github.com/ansible-collections/community.general/issues/1638). -- proxmox_kwm - add ``win11`` to ``ostype`` parameter for Windows 11 and Windows Server 2022 support (https://github.com/ansible-collections/community.general/issues/4023, https://github.com/ansible-collections/community.general/pull/4191). -- proxmox_snap - add restore snapshot option (https://github.com/ansible-collections/community.general/pull/4377). -- proxmox_snap - fixed timeout value to correctly reflect time in seconds. The timeout was off by one second (https://github.com/ansible-collections/community.general/pull/4377). -- puppet - remove deprecation for ``show_diff`` parameter. Its alias ``show-diff`` is still deprecated and will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/3980). -- python_requirements_info - returns python version broken down into its components, and some minor refactoring (https://github.com/ansible-collections/community.general/pull/3797). -- rax_files_objects - minor refactoring improving code quality (https://github.com/ansible-collections/community.general/pull/4649). -- redfish_* modules - the contents of ``@Message.ExtendedInfo`` will be returned as a string in the event that ``@Message.ExtendedInfo.Messages`` does not exist. This is likely more useful than the standard HTTP error (https://github.com/ansible-collections/community.general/pull/4596). -- redfish_command - add ``GetHostInterfaces`` command to enable reporting Redfish Host Interface information (https://github.com/ansible-collections/community.general/issues/3693). -- redfish_command - add ``IndicatorLedOn``, ``IndicatorLedOff``, and ``IndicatorLedBlink`` commands to the Systems category for controling system LEDs (https://github.com/ansible-collections/community.general/issues/4084). -- redfish_command - add ``SetHostInterface`` command to enable configuring the Redfish Host Interface (https://github.com/ansible-collections/community.general/issues/3632). -- redis - add authentication parameters ``login_user``, ``tls``, ``validate_certs``, and ``ca_certs`` (https://github.com/ansible-collections/community.general/pull/4207). -- scaleway inventory plugin - add profile parameter ``scw_profile`` (https://github.com/ansible-collections/community.general/pull/4049). -- scaleway_compute - add possibility to use project identifier (new ``project`` option) instead of deprecated organization identifier (https://github.com/ansible-collections/community.general/pull/3951). -- scaleway_volume - all volumes are systematically created on par1 (https://github.com/ansible-collections/community.general/pull/3964). -- seport - minor refactoring (https://github.com/ansible-collections/community.general/pull/4471). -- smartos_image_info - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). -- snap - add option ``options`` permitting to set options using the ``snap set`` command (https://github.com/ansible-collections/community.general/pull/3943). -- sudoers - add support for ``runas`` parameter (https://github.com/ansible-collections/community.general/issues/4379). -- svc - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3829). -- syslog_json - add option to skip logging of ``gather_facts`` playbook tasks; use v2 callback API (https://github.com/ansible-collections/community.general/pull/4223). -- terraform - adds ``terraform_upgrade`` parameter which allows ``terraform init`` to satisfy new provider constraints in an existing Terraform project (https://github.com/ansible-collections/community.general/issues/4333). -- to_time_unit filter plugins - the time filters has been extended to also allow ``0`` as input (https://github.com/ansible-collections/community.general/pull/4612). -- udm_group - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556). -- udm_share - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556). -- vmadm - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). -- vmadm - minor refactoring and improvement on the module (https://github.com/ansible-collections/community.general/pull/4581). -- vmadm - minor refactoring and improvement on the module (https://github.com/ansible-collections/community.general/pull/4648). -- webfaction_app - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). -- webfaction_db - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). -- xattr - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3806). -- xfconf - added missing value types ``char``, ``uchar``, ``int64`` and ``uint64`` (https://github.com/ansible-collections/community.general/pull/4534). -- xfconf - minor refactor on the base class for the module (https://github.com/ansible-collections/community.general/pull/3919). -- zfs - minor refactoring in the code (https://github.com/ansible-collections/community.general/pull/4650). -- zypper - add support for ``--clean-deps`` option to remove packages that depend on a package being removed (https://github.com/ansible-collections/community.general/pull/4195). - -Breaking Changes / Porting Guide --------------------------------- - -- Parts of this collection do not work with ansible-core 2.11 on Python 3.12+. Please either upgrade to ansible-core 2.12+, or use Python 3.11 or earlier (https://github.com/ansible-collections/community.general/pull/3988). -- The symbolic links used to implement flatmapping for all modules were removed and replaced by ``meta/runtime.yml`` redirects. This effectively breaks compatibility with Ansible 2.9 for all modules (without using their "long" names, which is discouraged and which can change without previous notice since they are considered an implementation detail) (https://github.com/ansible-collections/community.general/pull/4548). -- a_module test plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). -- archive - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). -- git_config - remove Ansible 2.9 and early ansible-base 2.10 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). -- java_keystore - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). -- lists_mergeby and groupby_as_dict filter plugins - adjust filter plugin filename. This change is not visible to end-users, it only affects possible other collections importing Python paths (https://github.com/ansible-collections/community.general/pull/4625). -- lists_mergeby filter plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). -- maven_artifact - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). -- memcached cache plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). -- path_join filter plugin shim - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). -- redis cache plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). -- yarn - remove unsupported and unnecessary ``--no-emoji`` flag (https://github.com/ansible-collections/community.general/pull/4662). - -Deprecated Features -------------------- - -- ansible_galaxy_install - deprecated support for ``ansible`` 2.9 and ``ansible-base`` 2.10 (https://github.com/ansible-collections/community.general/pull/4601). -- dig lookup plugin - the ``DLV`` record type has been decommissioned in 2017 and support for it will be removed from community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/4618). -- gem - the default of the ``norc`` option has been deprecated and will change to ``true`` in community.general 6.0.0. Explicitly specify a value to avoid a deprecation warning (https://github.com/ansible-collections/community.general/pull/4517). -- mail callback plugin - not specifying ``sender`` is deprecated and will be disallowed in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/4140). -- module_helper module utils - deprecated the attribute ``ModuleHelper.VarDict`` (https://github.com/ansible-collections/community.general/pull/3801). -- nmcli - deprecate default hairpin mode for a bridge. This so we can change it to ``false`` in community.general 7.0.0, as this is also the default in ``nmcli`` (https://github.com/ansible-collections/community.general/pull/4334). -- pacman - from community.general 5.0.0 on, the ``changed`` status of ``update_cache`` will no longer be ignored if ``name`` or ``upgrade`` is specified. To keep the old behavior, add something like ``register: result`` and ``changed_when: result.packages | length > 0`` to your task (https://github.com/ansible-collections/community.general/pull/4329). -- proxmox inventory plugin - the current default ``true`` of the ``want_proxmox_nodes_ansible_host`` option has been deprecated. The default will change to ``false`` in community.general 6.0.0. To keep the current behavior, explicitly set ``want_proxmox_nodes_ansible_host`` to ``true`` in your inventory configuration. We suggest to already switch to the new behavior by explicitly setting it to ``false``, and by using ``compose:`` to set ``ansible_host`` to the correct value. See the examples in the plugin documentation for details (https://github.com/ansible-collections/community.general/pull/4466). -- vmadm - deprecated module parameter ``debug`` that was not used anywhere (https://github.com/ansible-collections/community.general/pull/4580). - -Removed Features (previously deprecated) ----------------------------------------- - -- ali_instance_info - removed the options ``availability_zone``, ``instance_ids``, and ``instance_names``. Use filter item ``zone_id`` instead of ``availability_zone``, filter item ``instance_ids`` instead of ``instance_ids``, and filter item ``instance_name`` instead of ``instance_names`` (https://github.com/ansible-collections/community.general/pull/4516). -- apt_rpm - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516). -- compose - removed various deprecated aliases. Use the version with ``_`` instead of ``-`` instead (https://github.com/ansible-collections/community.general/pull/4516). -- dnsimple - remove support for dnsimple < 2.0.0 (https://github.com/ansible-collections/community.general/pull/4516). -- github_deploy_key - removed the deprecated alias ``2fa_token`` of ``otp`` (https://github.com/ansible-collections/community.general/pull/4516). -- homebrew, homebrew_cask - removed the deprecated alias ``update-brew`` of ``update_brew`` (https://github.com/ansible-collections/community.general/pull/4516). -- linode - removed the ``backupsenabled`` option. Use ``backupweeklyday`` or ``backupwindow`` to enable backups (https://github.com/ansible-collections/community.general/pull/4516). -- opkg - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516). -- pacman - if ``update_cache=true`` is used with ``name`` or ``upgrade``, the changed state will now also indicate if only the cache was updated. To keep the old behavior - only indicate ``changed`` when a package was installed/upgraded -, use ``changed_when`` as indicated in the module examples (https://github.com/ansible-collections/community.general/pull/4516). -- pacman - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516). -- proxmox, proxmox_kvm, proxmox_snap - no longer allow to specify a VM name that matches multiple VMs. If this happens, the modules now fail (https://github.com/ansible-collections/community.general/pull/4516). -- serverless - removed the ``functions`` option. It was not used by the module (https://github.com/ansible-collections/community.general/pull/4516). -- slackpkg - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516). -- urpmi - removed the deprecated alias ``no-recommends`` of ``no_recommends`` (https://github.com/ansible-collections/community.general/pull/4516). -- urpmi - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516). -- xbps - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516). -- xfconf - the ``get`` state has been removed. Use the ``xfconf_info`` module instead (https://github.com/ansible-collections/community.general/pull/4516). - -Bugfixes --------- - -- Various modules and plugins - use vendored version of ``distutils.version`` instead of the deprecated Python standard library ``distutils`` (https://github.com/ansible-collections/community.general/pull/3936). -- a_module test plugin - fix crash when testing a module name that was tombstoned (https://github.com/ansible-collections/community.general/pull/3660). -- alternatives - fix output parsing for alternatives groups (https://github.com/ansible-collections/community.general/pull/3976). -- cargo - fix detection of outdated packages when ``state=latest`` (https://github.com/ansible-collections/community.general/pull/4052). -- cargo - fix incorrectly reported changed status for packages with a name containing a hyphen (https://github.com/ansible-collections/community.general/issues/4044, https://github.com/ansible-collections/community.general/pull/4052). -- consul - fixed bug where class ``ConsulService`` was overwriting the field ``checks``, preventing the addition of checks to a service (https://github.com/ansible-collections/community.general/pull/4590). -- counter_enabled callback plugin - fix output to correctly display host and task counters in serial mode (https://github.com/ansible-collections/community.general/pull/3709). -- dconf - skip processes that disappeared while we inspected them (https://github.com/ansible-collections/community.general/issues/4151). -- dnsmadeeasy - fix failure on deleting DNS entries when API response does not contain monitor value (https://github.com/ansible-collections/community.general/issues/3620). -- dsv lookup plugin - raise an Ansible error if the wrong ``python-dsv-sdk`` version is installed (https://github.com/ansible-collections/community.general/pull/4422). -- filesize - add support for busybox dd implementation, that is used by default on Alpine linux (https://github.com/ansible-collections/community.general/pull/4288, https://github.com/ansible-collections/community.general/issues/4259). -- gconftool2 - properly escape values when passing them to ``gconftool-2`` (https://github.com/ansible-collections/community.general/pull/4647). -- git_branch - remove deprecated and unnecessary branch ``unprotect`` method (https://github.com/ansible-collections/community.general/pull/4496). -- github_repo - ``private`` and ``description`` attributes should not be set to default values when the repo already exists (https://github.com/ansible-collections/community.general/pull/2386). -- gitlab_group - improve searching for projects inside group on deletion (https://github.com/ansible-collections/community.general/pull/4491). -- gitlab_group_members - handle more than 20 groups when finding a group (https://github.com/ansible-collections/community.general/pull/4491, https://github.com/ansible-collections/community.general/issues/4460, https://github.com/ansible-collections/community.general/issues/3729). -- gitlab_group_variable - add missing documentation about GitLab versions that support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/pull/4038). -- gitlab_group_variable - allow to set same variable name under different environment scopes. Due this change, the return value ``group_variable`` differs from previous version in check mode. It was counting ``updated`` values, because it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/pull/4038). -- gitlab_group_variable - fix idempotent change behaviour for float and integer variables (https://github.com/ansible-collections/community.general/pull/4038). -- gitlab_hook - avoid errors during idempotency check when an attribute does not exist (https://github.com/ansible-collections/community.general/pull/4668). -- gitlab_hook - handle more than 20 hooks when finding a hook (https://github.com/ansible-collections/community.general/pull/4491). -- gitlab_project - handle more than 20 namespaces when finding a namespace (https://github.com/ansible-collections/community.general/pull/4491). -- gitlab_project_members - handle more than 20 projects and users when finding a project resp. user (https://github.com/ansible-collections/community.general/pull/4491). -- gitlab_project_variable - ``value`` is not necessary when deleting variables (https://github.com/ansible-collections/community.general/pull/4150). -- gitlab_project_variable - add missing documentation about GitLab versions that support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/issues/4038). -- gitlab_project_variable - allow to set same variable name under different environment scopes. Due this change, the return value ``project_variable`` differs from previous version in check mode. It was counting ``updated`` values, because it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/issues/4038). -- gitlab_project_variable - fix idempotent change behaviour for float and integer variables (https://github.com/ansible-collections/community.general/issues/4038). -- gitlab_runner - make ``project`` and ``owned`` mutually exclusive (https://github.com/ansible-collections/community.general/pull/4136). -- gitlab_runner - use correct API endpoint to create and retrieve project level runners when using ``project`` (https://github.com/ansible-collections/community.general/pull/3965). -- gitlab_user - handle more than 20 users and SSH keys when finding a user resp. SSH key (https://github.com/ansible-collections/community.general/pull/4491). -- homebrew_cask - fix force install operation (https://github.com/ansible-collections/community.general/issues/3703). -- icinga2 inventory plugin - handle 404 error when filter produces no results (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906). -- imc_rest - fixes the module failure due to the usage of ``itertools.izip_longest`` which is not available in Python 3 (https://github.com/ansible-collections/community.general/issues/4206). -- ini_file - when removing nothing do not report changed (https://github.com/ansible-collections/community.general/issues/4154). -- interfaces_file - fixed the check for existing option in interface (https://github.com/ansible-collections/community.general/issues/3841). -- jail connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934). -- jira - fixed bug where module returns error related to dictionary key ``body`` (https://github.com/ansible-collections/community.general/issues/3419). -- keycloak - fix parameters types for ``defaultDefaultClientScopes`` and ``defaultOptionalClientScopes`` from list of dictionaries to list of strings (https://github.com/ansible-collections/community.general/pull/4526). -- keycloak_* - the documented ``validate_certs`` parameter was not taken into account when calling the ``open_url`` function in some cases, thus enforcing certificate validation even when ``validate_certs`` was set to ``false``. (https://github.com/ansible-collections/community.general/pull/4382) -- keycloak_user_federation - creating a user federation while specifying an ID (that does not exist yet) no longer fail with a 404 Not Found (https://github.com/ansible-collections/community.general/pull/4212). -- keycloak_user_federation - mappers auto-created by keycloak are matched and merged by their name and no longer create duplicated entries (https://github.com/ansible-collections/community.general/pull/4212). -- ldap_search - allow it to be used even in check mode (https://github.com/ansible-collections/community.general/issues/3619). -- linode inventory plugin - fix configuration handling relating to inventory filtering (https://github.com/ansible-collections/community.general/pull/4336). -- listen_ports_facts - local port regex was not handling well IPv6 only binding. Fixes the regex for ``ss`` (https://github.com/ansible-collections/community.general/pull/4092). -- lvol - allows logical volumes to be created with certain size arguments prefixed with ``+`` to preserve behavior of older versions of this module (https://github.com/ansible-collections/community.general/issues/3665). -- lxd connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the ``lxc`` executable (https://github.com/ansible-collections/community.general/pull/3934). -- lxd inventory plugin - do not crash if OS and release metadata are not present - (https://github.com/ansible-collections/community.general/pull/4351). -- mail callback plugin - fix crash on Python 3 (https://github.com/ansible-collections/community.general/issues/4025, https://github.com/ansible-collections/community.general/pull/4026). -- mail callback plugin - fix encoding of the name of sender and recipient (https://github.com/ansible-collections/community.general/issues/4060, https://github.com/ansible-collections/community.general/pull/4061). -- mksysb - fixed bug for parameter ``backup_dmapi_fs`` was passing the wrong CLI argument (https://github.com/ansible-collections/community.general/pull/3295). -- nmcli - fix returning "changed" when no mask set for IPv4 or IPv6 addresses on task rerun (https://github.com/ansible-collections/community.general/issues/3768). -- nmcli - fix returning "changed" when routes parameters set, also suggest new routes4 and routes6 format (https://github.com/ansible-collections/community.general/issues/4131). -- nmcli - fixed falsely reported changed status when ``mtu`` is omitted with ``dummy`` connections (https://github.com/ansible-collections/community.general/issues/3612, https://github.com/ansible-collections/community.general/pull/3625). -- nmcli - pass ``flags``, ``ingress``, ``egress`` params to ``nmcli`` (https://github.com/ansible-collections/community.general/issues/1086). -- nrdp callback plugin - fix error ``string arguments without an encoding`` (https://github.com/ansible-collections/community.general/issues/3903). -- onepassword - search all valid configuration locations and use the first found (https://github.com/ansible-collections/community.general/pull/4640). -- opennebula inventory plugin - complete the implementation of ``constructable`` for opennebula inventory plugin. Now ``keyed_groups``, ``compose``, ``groups`` actually work (https://github.com/ansible-collections/community.general/issues/4497). -- opentelemetry - fix generating a trace with a task containing ``no_log: true`` (https://github.com/ansible-collections/community.general/pull/4043). -- opentelemetry callback plugin - fix task message attribute that is reported failed regardless of the task result (https://github.com/ansible-collections/community.general/pull/4624). -- opentelemetry callback plugin - fix warning for the include_tasks (https://github.com/ansible-collections/community.general/pull/4623). -- opentelemetry_plugin - honour ``ignore_errors`` when a task has failed instead of reporting an error (https://github.com/ansible-collections/community.general/pull/3837). -- pacman - Use ``--groups`` instead of ``--group`` (https://github.com/ansible-collections/community.general/pull/4312). -- pacman - fix URL based package installation (https://github.com/ansible-collections/community.general/pull/4286, https://github.com/ansible-collections/community.general/issues/4285). -- pacman - fix ``upgrade=yes`` (https://github.com/ansible-collections/community.general/pull/4275, https://github.com/ansible-collections/community.general/issues/4274). -- pacman - fixed bug where ``absent`` state did not work for locally installed packages (https://github.com/ansible-collections/community.general/pull/4464). -- pacman - make sure that ``packages`` is always returned when ``name`` or ``upgrade`` is specified, also if nothing is done (https://github.com/ansible-collections/community.general/pull/4329). -- pacman - when the ``update_cache`` option is combined with another option such as ``upgrade``, report ``changed`` based on the actions performed by the latter option. This was the behavior in community.general 4.4.0 and before. In community.general 4.5.0, a task combining these options would always report ``changed`` (https://github.com/ansible-collections/community.general/pull/4318). -- passwordstore lookup plugin - fix error detection for non-English locales (https://github.com/ansible-collections/community.general/pull/4219). -- passwordstore lookup plugin - prevent returning path names as passwords by accident (https://github.com/ansible-collections/community.general/issues/4185, https://github.com/ansible-collections/community.general/pull/4192). -- passwordstore lookup plugin - replace deprecated ``distutils.util.strtobool`` with Ansible's ``convert_bool.boolean`` to interpret values for the ``create``, ``returnall``, ``overwrite``, 'backup``, and ``nosymbols`` options (https://github.com/ansible-collections/community.general/pull/3934). -- pipx - passes the correct command line option ``--include-apps`` (https://github.com/ansible-collections/community.general/issues/3791). -- pritunl - fixed bug where pritunl plugin api add unneeded data in ``auth_string`` parameter (https://github.com/ansible-collections/community.general/issues/4527). -- proxmox - fixed ``onboot`` parameter causing module failures when undefined (https://github.com/ansible-collections/community.general/issues/3844). -- proxmox inventory plugin - always convert strings that follow the ``key=value[,key=value[...]]`` form into dictionaries (https://github.com/ansible-collections/community.general/pull/4349). -- proxmox inventory plugin - fix error when parsing container with LXC configs (https://github.com/ansible-collections/community.general/issues/4472, https://github.com/ansible-collections/community.general/pull/4472). -- proxmox inventory plugin - fixed the ``description`` field being ignored if it contained a comma (https://github.com/ansible-collections/community.general/issues/4348). -- proxmox inventory plugin - fixed the ``tags_parsed`` field when Proxmox returns a single space for the ``tags`` entry (https://github.com/ansible-collections/community.general/pull/4378). -- proxmox_kvm - fix a bug when getting a state of VM without name will fail (https://github.com/ansible-collections/community.general/pull/4508). -- proxmox_kvm - fix error in check when creating or cloning (https://github.com/ansible-collections/community.general/pull/4306). -- proxmox_kvm - fix error when checking whether Proxmox VM exists (https://github.com/ansible-collections/community.general/pull/4287). -- python_requirements_info - fails if version operator used without version (https://github.com/ansible-collections/community.general/pull/3785). -- python_requirements_info - store ``mismatched`` return values per package as documented in the module (https://github.com/ansible-collections/community.general/pull/4078). -- redfish_command - the iLO4 Redfish implementation only supports the ``image_url`` parameter in the underlying API calls to ``VirtualMediaInsert`` and ``VirtualMediaEject``. Any values set (or the defaults) for ``write_protected`` or ``inserted`` will be ignored (https://github.com/ansible-collections/community.general/pull/4596). -- say callback plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the ``say`` resp. ``espeak`` executables (https://github.com/ansible-collections/community.general/pull/3934). -- scaleway_user_data - fix double-quote added where no double-quote is needed to user data in scaleway's server (``Content-type`` -> ``Content-Type``) (https://github.com/ansible-collections/community.general/pull/3940). -- slack - add ``charset`` to HTTP headers to avoid Slack API warning (https://github.com/ansible-collections/community.general/issues/3932). -- terraform - fix command options being ignored during planned/plan in function ``build_plan`` such as ``lock`` or ``lock_timeout`` (https://github.com/ansible-collections/community.general/issues/3707, https://github.com/ansible-collections/community.general/pull/3726). -- terraform - fix list initialization to support both Python 2 and Python 3 (https://github.com/ansible-collections/community.general/issues/4531). -- vdo - fix options error (https://github.com/ansible-collections/community.general/pull/4163). -- xattr - fix exception caused by ``_run_xattr()`` raising a ``ValueError`` due to a mishandling of base64-encoded value (https://github.com/ansible-collections/community.general/issues/3673). -- xbps - fix error message that is reported when installing packages fails (https://github.com/ansible-collections/community.general/pull/4438). -- yarn - fix incorrect handling of ``yarn list`` and ``yarn global list`` output that could result in fatal error (https://github.com/ansible-collections/community.general/pull/4050). -- yarn - fix incorrectly reported status when installing a package globally (https://github.com/ansible-collections/community.general/issues/4045, https://github.com/ansible-collections/community.general/pull/4050). -- yarn - fix missing ``~`` expansion in yarn global install folder which resulted in incorrect task status (https://github.com/ansible-collections/community.general/issues/4045, https://github.com/ansible-collections/community.general/pull/4048). -- yum_versionlock - fix matching of existing entries with names passed to the module. Match yum and dnf lock format (https://github.com/ansible-collections/community.general/pull/4183). -- zone connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934). -- zypper - fix undefined variable when running in check mode (https://github.com/ansible-collections/community.general/pull/4667). -- zypper - fixed bug that caused zypper to always report [ok] and do nothing on ``state=present`` when all packages in ``name`` had a version specification (https://github.com/ansible-collections/community.general/issues/4371, https://github.com/ansible-collections/community.general/pull/4421). - -Known Issues ------------- - -- pacman - ``update_cache`` cannot differentiate between up to date and outdated package lists and will report ``changed`` in both situations (https://github.com/ansible-collections/community.general/pull/4318). -- pacman - binaries specified in the ``executable`` parameter must support ``--print-format`` in order to be used by this module. In particular, AUR helper ``yay`` is known not to currently support it (https://github.com/ansible-collections/community.general/pull/4312). - -New Plugins ------------ - -Filter -~~~~~~ - -- counter - Counts hashable elements in a sequence +This file is a placeholder; a version-specific ``CHANGELOG-vX.rst`` will be generated during releases from fragments +under ``changelogs/fragments``. On release branches once a release has been created, consult the branch's version-specific +file for changes that have occurred in that branch. diff --git a/CHANGELOG.rst.license b/CHANGELOG.rst.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/CHANGELOG.rst.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6ea1547f96..94c5299069 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,3 +1,9 @@ + + # Contributing We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our contributions and interactions within this repository. @@ -25,7 +31,9 @@ Also, consider taking up a valuable, reviewed, but abandoned pull request which * Try committing your changes with an informative but short commit message. * Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge. * Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the repository checkout. -* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) ) +* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/collection_development_process.html#creating-a-changelog-fragment). + * You must not include a fragment for new modules or new plugins. Also you shouldn't include one for docs-only changes. (If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) ) + * Please always include a link to the pull request itself, and if the PR is about an issue, also a link to the issue. Also make sure the fragment ends with a period, and begins with a lower-case letter after `-`. (Again, if you don't do this, we'll add suggestions to fix it, so don't worry too much :) ) * Avoid reformatting unrelated parts of the codebase in your PR. These types of changes will likely be requested for reversion, create additional work for reviewers, and may cause approval to be delayed. You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst). @@ -36,7 +44,49 @@ If you want to test a PR locally, refer to [our testing guide](https://github.co If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it. -## Run sanity, unit or integration tests locally +## Run sanity or unit locally (with antsibull-nox) + +The easiest way to run sanity and unit tests locally is to use [antsibull-nox](https://ansible.readthedocs.io/projects/antsibull-nox/). +(If you have [nox](https://nox.thea.codes/en/stable/) installed, it will automatically install antsibull-nox in a virtual environment for you.) + +### Sanity tests + +The following commands show how to run ansible-test sanity tests: + +```.bash +# Run basic sanity tests for all files in the collection: +nox -Re ansible-test-sanity-devel + +# Run basic sanity tests for the given files and directories: +nox -Re ansible-test-sanity-devel -- plugins/modules/system/pids.py tests/integration/targets/pids/ + +# Run all other sanity tests for all files in the collection: +nox -R +``` + +If you replace `-Re` with `-e`, respectively. If you leave `-R` away, then the virtual environments will be re-created. The `-R` re-uses them (if they already exist). + +### Unit tests + +The following commands show how to run unit tests: + +```.bash +# Run all unit tests: +nox -Re ansible-test-units-devel + +# Run all unit tests for one Python version (a lot faster): +nox -Re ansible-test-units-devel -- --python 3.13 + +# Run a specific unit test (for the nmcli module) for one Python version: +nox -Re ansible-test-units-devel -- --python 3.13 tests/unit/plugins/modules/net_tools/test_nmcli.py +``` + +If you replace `-Re` with `-e`, then the virtual environments will be re-created. The `-R` re-uses them (if they already exist). + +## Run basic sanity, unit or integration tests locally (with ansible-test) + +Instead of using antsibull-nox, you can also run sanity and unit tests with ansible-test directly. +This also allows you to run integration tests. You have to check out the repository into a specific path structure to be able to run `ansible-test`. The path to the git checkout must end with `.../ansible_collections/community/general`. Please see [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how to check out the repository into a correct path structure. The short version of these instructions is: @@ -48,16 +98,27 @@ cd ~/dev/ansible_collections/community/general Then you can run `ansible-test` (which is a part of [ansible-core](https://pypi.org/project/ansible-core/)) inside the checkout. The following example commands expect that you have installed Docker or Podman. Note that Podman has only been supported by more recent ansible-core releases. If you are using Docker, the following will work with Ansible 2.9+. -The following commands show how to run sanity tests: +### Basic sanity tests + +The following commands show how to run basic sanity tests: ```.bash -# Run sanity tests for all files in the collection: +# Run basic sanity tests for all files in the collection: ansible-test sanity --docker -v -# Run sanity tests for the given files and directories: +# Run basic sanity tests for the given files and directories: ansible-test sanity --docker -v plugins/modules/system/pids.py tests/integration/targets/pids/ ``` +### Unit tests + +Note that for running unit tests, you need to install required collections in the same folder structure that `community.general` is checked out in. +Right now, you need to install [`community.internal_test_tools`](https://github.com/ansible-collections/community.internal_test_tools). +If you want to use the latest version from GitHub, you can run: +``` +git clone https://github.com/ansible-collections/community.internal_test_tools.git ~/dev/ansible_collections/community/internal_test_tools +``` + The following commands show how to run unit tests: ```.bash @@ -71,13 +132,42 @@ ansible-test units --docker -v --python 3.8 ansible-test units --docker -v --python 3.8 tests/unit/plugins/modules/net_tools/test_nmcli.py ``` +### Integration tests + +Note that for running integration tests, you need to install required collections in the same folder structure that `community.general` is checked out in. +Right now, depending on the test, you need to install [`ansible.posix`](https://github.com/ansible-collections/ansible.posix), [`community.crypto`](https://github.com/ansible-collections/community.crypto), and [`community.docker`](https://github.com/ansible-collections/community.docker): +If you want to use the latest versions from GitHub, you can run: +``` +mkdir -p ~/dev/ansible_collections/ansible +git clone https://github.com/ansible-collections/ansible.posix.git ~/dev/ansible_collections/ansible/posix +git clone https://github.com/ansible-collections/community.crypto.git ~/dev/ansible_collections/community/crypto +git clone https://github.com/ansible-collections/community.docker.git ~/dev/ansible_collections/community/docker +``` + The following commands show how to run integration tests: -```.bash -# Run integration tests for the interfaces_files module in a Docker container using the -# fedora35 operating system image (the supported images depend on your ansible-core version): -ansible-test integration --docker fedora35 -v interfaces_file +#### In Docker +Integration tests on Docker have the following parameters: +- `image_name` (required): The name of the Docker image. To get the list of supported Docker images, run + `ansible-test integration --help` and look for _target docker images_. +- `test_name` (optional): The name of the integration test. + For modules, this equals the short name of the module; for example, `pacman` in case of `community.general.pacman`. + For plugins, the plugin type is added before the plugin's short name, for example `callback_yaml` for the `community.general.yaml` callback. +```.bash +# Test all plugins/modules on fedora40 +ansible-test integration -v --docker fedora40 + +# Template +ansible-test integration -v --docker image_name test_name + +# Example community.general.ini_file module on fedora40 Docker image: +ansible-test integration -v --docker fedora40 ini_file +``` + +#### Without isolation + +```.bash # Run integration tests for the flattened lookup **without any isolation**: ansible-test integration -v lookup_flattened ``` @@ -106,38 +196,12 @@ Creating new modules and plugins requires a bit more work than other Pull Reques - Make sure that new plugins and modules have tests (unit tests, integration tests, or both); it is preferable to have some tests which run in CI. -4. For modules and action plugins, make sure to create your module/plugin in the correct subdirectory, and add a redirect entry - in `meta/runtime.yml`. For example, for the `aerospike_migrations` module located in - `plugins/modules/database/aerospike/aerospike_migrations.py`, you need to create the following entry: - ```.yaml - aerospike_migrations: - redirect: community.general.database.aerospike.aerospike_migrations - ``` - Here, the relative path `database/aerospike/` is inserted into the module's FQCN (Fully Qualified Collection Name) after the - collection's name and before the module's name. This must not be done for other plugin types but modules and action plugins! - - - Action plugins need to be accompanied by a module, even if the module file only contains documentation - (`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/` - than the action plugin has in `plugins/action/`. +4. Action plugins need to be accompanied by a module, even if the module file only contains documentation + (`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/` + than the action plugin has in `plugins/action/`. 5. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the same directory to see how entries could look. You should list all authors either as `maintainers` or under `ignore`. People listed as `maintainers` will be pinged for new issues and PRs that modify the module/plugin or its tests. When you add a new plugin/module, we expect that you perform maintainer duty for at least some time after contributing it. - -## pre-commit - -To help ensure high-quality contributions this repository includes a [pre-commit](https://pre-commit.com) configuration which -corrects and tests against common issues that would otherwise cause CI to fail. To begin using these pre-commit hooks see -the [Installation](#installation) section below. - -This is optional and not required to contribute to this repository. - -### Installation - -Follow the [instructions](https://pre-commit.com/#install) provided with pre-commit and run `pre-commit install` under the repository base. If for any reason you would like to disable the pre-commit hooks run `pre-commit uninstall`. - -This is optional to run it locally. - -You can trigger it locally with `pre-commit run --all-files` or even to run only for a given file `pre-commit run --files YOUR_FILE`. diff --git a/LICENSES/BSD-2-Clause.txt b/LICENSES/BSD-2-Clause.txt new file mode 100644 index 0000000000..6810e04e32 --- /dev/null +++ b/LICENSES/BSD-2-Clause.txt @@ -0,0 +1,8 @@ +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/LICENSES/GPL-3.0-or-later.txt b/LICENSES/GPL-3.0-or-later.txt new file mode 120000 index 0000000000..012065c853 --- /dev/null +++ b/LICENSES/GPL-3.0-or-later.txt @@ -0,0 +1 @@ +../COPYING \ No newline at end of file diff --git a/LICENSES/MIT.txt b/LICENSES/MIT.txt new file mode 100644 index 0000000000..2071b23b0e --- /dev/null +++ b/LICENSES/MIT.txt @@ -0,0 +1,9 @@ +MIT License + +Copyright (c) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/LICENSES/PSF-2.0.txt b/LICENSES/PSF-2.0.txt new file mode 100644 index 0000000000..35acd7fb5f --- /dev/null +++ b/LICENSES/PSF-2.0.txt @@ -0,0 +1,48 @@ +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 Python Software Foundation; +All Rights Reserved" are retained in Python alone or in any derivative version +prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. diff --git a/README.md b/README.md index 325456e994..726d9cb872 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,17 @@ + + # Community General Collection -[![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=stable-5)](https://dev.azure.com/ansible/community.general/_build?definitionId=31) +[![Documentation](https://img.shields.io/badge/docs-brightgreen.svg)](https://docs.ansible.com/ansible/devel/collections/community/general/) +[![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=main)](https://dev.azure.com/ansible/community.general/_build?definitionId=31) +[![EOL CI](https://github.com/ansible-collections/community.general/actions/workflows/ansible-test.yml/badge.svg?branch=main)](https://github.com/ansible-collections/community.general/actions) +[![Nox CI](https://github.com/ansible-collections/community.general/actions/workflows/nox.yml/badge.svg?branch=main)](https://github.com/ansible-collections/community.general/actions) [![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general) +[![REUSE status](https://api.reuse.software/badge/github.com/ansible-collections/community.general)](https://api.reuse.software/info/github.com/ansible-collections/community.general) This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections. @@ -15,11 +25,21 @@ We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/comm If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint. +## Communication + +* Join the Ansible forum: + * [Get Help](https://forum.ansible.com/c/help/6): get help or help others. This is for questions about modules or plugins in the collection. Please add appropriate tags if you start new discussions. + * [Tag `community-general`](https://forum.ansible.com/tag/community-general): discuss the *collection itself*, instead of specific modules or plugins. + * [Social Spaces](https://forum.ansible.com/c/chat/4): gather and interact with fellow enthusiasts. + * [News & Announcements](https://forum.ansible.com/c/news/5): track project-wide announcements including social events. + +* The Ansible [Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn): used to announce releases and important changes. + +For more information about communication, see the [Ansible communication guide](https://docs.ansible.com/ansible/devel/community/communication.html). + ## Tested with Ansible -Tested with the current ansible-core 2.11, ansible-core 2.12, ansible-core 2.13 releases and the current development version of ansible-core. Ansible-core versions before 2.11.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. - -Parts of this collection will not work with ansible-core 2.11 on Python 3.12+. +Tested with the current ansible-core 2.17, ansible-core 2.18, ansible-core 2.19, ansible-core 2.20 releases and the current development version of ansible-core. Ansible-core versions before 2.17.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. ## External requirements @@ -27,13 +47,13 @@ Some modules and plugins require external libraries. Please check the requiremen ## Included content -Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/community/general) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/). +Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/ui/repo/published/community/general/) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/). ## Using this collection This collection is shipped with the Ansible package. So if you have it installed, no more action is required. -If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/community/general) manually with the `ansible-galaxy` command-line tool: +If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/ui/repo/published/community/general/) manually with the `ansible-galaxy` command-line tool: ansible-galaxy collection install community.general @@ -50,7 +70,7 @@ Note that if you install the collection manually, it will not be upgraded automa ansible-galaxy collection install community.general --upgrade ``` -You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/community/general): +You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/ui/repo/published/community/general/): ```bash ansible-galaxy collection install community.general:==X.Y.Z @@ -92,25 +112,13 @@ It is necessary for maintainers of this collection to be subscribed to: They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn). -## Communication - -We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed. - -Join us in the `#ansible` (general use questions and support), `#ansible-community` (community and collection development questions), and other [IRC channels](https://docs.ansible.com/ansible/devel/community/communication.html#irc-channels) on [Libera.chat](https://libera.chat). - -We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://eepurl.com/gZmiEP) and join us. - -For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community). - -For more information about communication, refer to Ansible's the [Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html). - ## Publishing New Version See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/main/releasing_collections.rst) to learn how to release this collection. ## Release notes -See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-5/CHANGELOG.rst). +See the [changelog](https://github.com/ansible-collections/community.general/blob/main/CHANGELOG.md). ## Roadmap @@ -127,6 +135,10 @@ See [this issue](https://github.com/ansible-collections/community.general/issues ## Licensing -GNU General Public License v3.0 or later. +This collection is primarily licensed and distributed as a whole under the GNU General Public License v3.0 or later. -See [COPYING](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text. +See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.general/blob/main/COPYING) for the full text. + +Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/PSF-2.0.txt). + +All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `REUSE.toml`. This conforms to the [REUSE specification](https://reuse.software/spec/). diff --git a/REUSE.toml b/REUSE.toml new file mode 100644 index 0000000000..ff95bb8217 --- /dev/null +++ b/REUSE.toml @@ -0,0 +1,11 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +version = 1 + +[[annotations]] +path = "changelogs/fragments/**" +precedence = "aggregate" +SPDX-FileCopyrightText = "Ansible Project" +SPDX-License-Identifier = "GPL-3.0-or-later" diff --git a/antsibull-nox.toml b/antsibull-nox.toml new file mode 100644 index 0000000000..735d572599 --- /dev/null +++ b/antsibull-nox.toml @@ -0,0 +1,99 @@ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# SPDX-FileCopyrightText: 2025 Felix Fontein + +[collection_sources] +"ansible.posix" = "git+https://github.com/ansible-collections/ansible.posix.git,main" +"community.crypto" = "git+https://github.com/ansible-collections/community.crypto.git,main" +"community.docker" = "git+https://github.com/ansible-collections/community.docker.git,main" +"community.internal_test_tools" = "git+https://github.com/ansible-collections/community.internal_test_tools.git,main" + +[collection_sources_per_ansible.'2.16'] +# community.crypto's main branch needs ansible-core >= 2.17 +"community.crypto" = "git+https://github.com/ansible-collections/community.crypto.git,stable-2" + +[vcs] +vcs = "git" +development_branch = "main" +stable_branches = [ "stable-*" ] + +[sessions] + +[sessions.lint] +run_isort = false +run_black = false +run_flake8 = false +run_pylint = false +run_yamllint = true +yamllint_config = ".yamllint" +# yamllint_config_plugins = ".yamllint-docs" +# yamllint_config_plugins_examples = ".yamllint-examples" +run_mypy = false + +[sessions.docs_check] +validate_collection_refs="all" +codeblocks_restrict_types = [ + "ansible-output", + "console", + "ini", + "json", + "python", + "shell", + "yaml", + "yaml+jinja", + "text", +] +codeblocks_restrict_type_exact_case = true +codeblocks_allow_without_type = false +codeblocks_allow_literal_blocks = false + +[sessions.license_check] + +[sessions.extra_checks] +run_no_unwanted_files = true +no_unwanted_files_module_extensions = [".py"] +no_unwanted_files_yaml_extensions = [".yml"] +run_action_groups = true +run_no_trailing_whitespace = true +no_trailing_whitespace_skip_paths = [ + "tests/integration/targets/iso_extract/files/test.iso", + "tests/integration/targets/java_cert/files/testpkcs.p12", + "tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz", + "tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz", + "tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz", +] +no_trailing_whitespace_skip_directories = [ + "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/", + "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/", +] + +[[sessions.extra_checks.action_groups_config]] +name = "consul" +pattern = "^consul_.*$" +exclusions = [ + "consul_acl_bootstrap", + "consul_kv", +] +doc_fragment = "community.general.consul.actiongroup_consul" + +[[sessions.extra_checks.action_groups_config]] +name = "keycloak" +pattern = "^keycloak_.*$" +exclusions = [ + "keycloak_realm_info", +] +doc_fragment = "community.general.keycloak.actiongroup_keycloak" + +[[sessions.extra_checks.action_groups_config]] +name = "scaleway" +pattern = "^scaleway_.*$" +doc_fragment = "community.general.scaleway.actiongroup_scaleway" + +[sessions.build_import_check] +run_galaxy_importer = true + +[sessions.ansible_test_sanity] +include_devel = true + +[sessions.ansible_test_units] +include_devel = true diff --git a/changelogs/.gitignore b/changelogs/.gitignore index 6be6b5331d..3d7ad8262c 100644 --- a/changelogs/.gitignore +++ b/changelogs/.gitignore @@ -1 +1,5 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + /.plugin-cache.yaml diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 5cef13733e..f8129d5d73 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1,793 +1,3 @@ -ancestor: 4.0.0 -releases: - 5.0.0: - changes: - breaking_changes: - - lists_mergeby and groupby_as_dict filter plugins - adjust filter plugin filename. - This change is not visible to end-users, it only affects possible other collections - importing Python paths (https://github.com/ansible-collections/community.general/pull/4625). - - yarn - remove unsupported and unnecessary ``--no-emoji`` flag (https://github.com/ansible-collections/community.general/pull/4662). - bugfixes: - - consul - fixed bug where class ``ConsulService`` was overwriting the field - ``checks``, preventing the addition of checks to a service (https://github.com/ansible-collections/community.general/pull/4590). - - gconftool2 - properly escape values when passing them to ``gconftool-2`` (https://github.com/ansible-collections/community.general/pull/4647). - - gitlab_hook - avoid errors during idempotency check when an attribute does - not exist (https://github.com/ansible-collections/community.general/pull/4668). - - onepassword - search all valid configuration locations and use the first found - (https://github.com/ansible-collections/community.general/pull/4640). - - opentelemetry callback plugin - fix task message attribute that is reported - failed regardless of the task result (https://github.com/ansible-collections/community.general/pull/4624). - - opentelemetry callback plugin - fix warning for the include_tasks (https://github.com/ansible-collections/community.general/pull/4623). - - redfish_command - the iLO4 Redfish implementation only supports the ``image_url`` - parameter in the underlying API calls to ``VirtualMediaInsert`` and ``VirtualMediaEject``. - Any values set (or the defaults) for ``write_protected`` or ``inserted`` will - be ignored (https://github.com/ansible-collections/community.general/pull/4596). - - terraform - fix list initialization to support both Python 2 and Python 3 - (https://github.com/ansible-collections/community.general/issues/4531). - - zypper - fix undefined variable when running in check mode (https://github.com/ansible-collections/community.general/pull/4667). - deprecated_features: - - ansible_galaxy_install - deprecated support for ``ansible`` 2.9 and ``ansible-base`` - 2.10 (https://github.com/ansible-collections/community.general/pull/4601). - - dig lookup plugin - the ``DLV`` record type has been decommissioned in 2017 - and support for it will be removed from community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/4618). - minor_changes: - - ModuleHelper module utils - ``ModuleHelperBase` now delegates the attributes - ``check_mode``, ``get_bin_path``, ``warn``, and ``deprecate`` to the underlying - ``AnsibleModule`` instance (https://github.com/ansible-collections/community.general/pull/4600). - - ModuleHelper module utils - ``ModuleHelperBase`` now has a convenience method - ``do_raise`` (https://github.com/ansible-collections/community.general/pull/4660). - - ipa_dnsrecord - add new argument ``record_values``, mutually exclusive to - ``record_value``, which supports multiple values for one record (https://github.com/ansible-collections/community.general/pull/4578). - - pritunl_user - add ``mac_addresses`` parameter (https://github.com/ansible-collections/community.general/pull/4535). - - rax_files_objects - minor refactoring improving code quality (https://github.com/ansible-collections/community.general/pull/4649). - - redfish_* modules - the contents of ``@Message.ExtendedInfo`` will be returned - as a string in the event that ``@Message.ExtendedInfo.Messages`` does not - exist. This is likely more useful than the standard HTTP error (https://github.com/ansible-collections/community.general/pull/4596). - - to_time_unit filter plugins - the time filters has been extended to also allow - ``0`` as input (https://github.com/ansible-collections/community.general/pull/4612). - - vmadm - minor refactoring and improvement on the module (https://github.com/ansible-collections/community.general/pull/4581). - - vmadm - minor refactoring and improvement on the module (https://github.com/ansible-collections/community.general/pull/4648). - - zfs - minor refactoring in the code (https://github.com/ansible-collections/community.general/pull/4650). - release_summary: This is release 5.0.0 of ``community.general``, released on - 2022-05-17. - fragments: - - 4065-onepassword-config.yml - - 4535-pritunl-add-mac_addresses-parameter.yml - - 4578-ipa_dnsrecord-add_multiple_record_support.yml - - 4581-vmadm-improvements.yaml - - 4590-consul-fix-service-checks.yaml - - 4595-fix-VirtualMediaInsert-iLO4.yml - - 4600-mh-delegate.yaml - - 4601-ansible-galaxy-install-deprecate-ansible29-and-210.yaml - - 4612-time_filter_zero.yml - - 4618-dig-dlv.yml - - 4621-terraform-py2-compat.yml - - 4623-opentelemetry_bug_fix_include_tasks.yml - - 4624-opentelemetry_bug_fix_hardcoded_value.yml - - 4625-fix-filter-filenames.yml - - 4647-gconftool2-command-arg.yaml - - 4648-vmadm-improvements-2.yaml - - 4649-rax-files-objects-improvements.yaml - - 4650-zfs-improvements.yaml - - 4651-zypper-checkmode-fix.yaml - - 4660-mh-added-do-raise.yaml - - 4662-yarn-emoji.yml - - 4668-gitlab_hook-use-None-for-non-existent-attr.yml - - 5.0.0.yml - release_date: '2022-05-17' - 5.0.0-a1: - changes: - breaking_changes: - - Parts of this collection do not work with ansible-core 2.11 on Python 3.12+. - Please either upgrade to ansible-core 2.12+, or use Python 3.11 or earlier - (https://github.com/ansible-collections/community.general/pull/3988). - - The symbolic links used to implement flatmapping for all modules were removed - and replaced by ``meta/runtime.yml`` redirects. This effectively breaks compatibility - with Ansible 2.9 for all modules (without using their "long" names, which - is discouraged and which can change without previous notice since they are - considered an implementation detail) (https://github.com/ansible-collections/community.general/pull/4548). - - a_module test plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). - - archive - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). - - git_config - remove Ansible 2.9 and early ansible-base 2.10 compatibility - code (https://github.com/ansible-collections/community.general/pull/4548). - - java_keystore - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). - - lists_mergeby filter plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). - - maven_artifact - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). - - memcached cache plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). - - path_join filter plugin shim - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). - - redis cache plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548). - bugfixes: - - Various modules and plugins - use vendored version of ``distutils.version`` - instead of the deprecated Python standard library ``distutils`` (https://github.com/ansible-collections/community.general/pull/3936). - - a_module test plugin - fix crash when testing a module name that was tombstoned - (https://github.com/ansible-collections/community.general/pull/3660). - - alternatives - fix output parsing for alternatives groups (https://github.com/ansible-collections/community.general/pull/3976). - - cargo - fix detection of outdated packages when ``state=latest`` (https://github.com/ansible-collections/community.general/pull/4052). - - cargo - fix incorrectly reported changed status for packages with a name containing - a hyphen (https://github.com/ansible-collections/community.general/issues/4044, - https://github.com/ansible-collections/community.general/pull/4052). - - counter_enabled callback plugin - fix output to correctly display host and - task counters in serial mode (https://github.com/ansible-collections/community.general/pull/3709). - - dconf - skip processes that disappeared while we inspected them (https://github.com/ansible-collections/community.general/issues/4151). - - dnsmadeeasy - fix failure on deleting DNS entries when API response does not - contain monitor value (https://github.com/ansible-collections/community.general/issues/3620). - - dsv lookup plugin - raise an Ansible error if the wrong ``python-dsv-sdk`` - version is installed (https://github.com/ansible-collections/community.general/pull/4422). - - filesize - add support for busybox dd implementation, that is used by default - on Alpine linux (https://github.com/ansible-collections/community.general/pull/4288, - https://github.com/ansible-collections/community.general/issues/4259). - - git_branch - remove deprecated and unnecessary branch ``unprotect`` method - (https://github.com/ansible-collections/community.general/pull/4496). - - github_repo - ``private`` and ``description`` attributes should not be set - to default values when the repo already exists (https://github.com/ansible-collections/community.general/pull/2386). - - 'gitlab_group - improve searching for projects inside group on deletion (https://github.com/ansible-collections/community.general/pull/4491). - - ' - - 'gitlab_group_members - handle more than 20 groups when finding a group (https://github.com/ansible-collections/community.general/pull/4491, - https://github.com/ansible-collections/community.general/issues/4460, https://github.com/ansible-collections/community.general/issues/3729). - - ' - - gitlab_group_variable - add missing documentation about GitLab versions that - support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/pull/4038). - - 'gitlab_group_variable - allow to set same variable name under different environment - scopes. Due this change, the return value ``group_variable`` differs from - previous version in check mode. It was counting ``updated`` values, because - it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/pull/4038). - - ' - - gitlab_group_variable - fix idempotent change behaviour for float and integer - variables (https://github.com/ansible-collections/community.general/pull/4038). - - 'gitlab_hook - handle more than 20 hooks when finding a hook (https://github.com/ansible-collections/community.general/pull/4491). - - ' - - 'gitlab_project - handle more than 20 namespaces when finding a namespace - (https://github.com/ansible-collections/community.general/pull/4491). - - ' - - 'gitlab_project_members - handle more than 20 projects and users when finding - a project resp. user (https://github.com/ansible-collections/community.general/pull/4491). - - ' - - gitlab_project_variable - ``value`` is not necessary when deleting variables - (https://github.com/ansible-collections/community.general/pull/4150). - - gitlab_project_variable - add missing documentation about GitLab versions - that support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/issues/4038). - - 'gitlab_project_variable - allow to set same variable name under different - environment scopes. Due this change, the return value ``project_variable`` - differs from previous version in check mode. It was counting ``updated`` values, - because it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/issues/4038). - - ' - - gitlab_project_variable - fix idempotent change behaviour for float and integer - variables (https://github.com/ansible-collections/community.general/issues/4038). - - gitlab_runner - make ``project`` and ``owned`` mutually exclusive (https://github.com/ansible-collections/community.general/pull/4136). - - gitlab_runner - use correct API endpoint to create and retrieve project level - runners when using ``project`` (https://github.com/ansible-collections/community.general/pull/3965). - - 'gitlab_user - handle more than 20 users and SSH keys when finding a user - resp. SSH key (https://github.com/ansible-collections/community.general/pull/4491). - - ' - - homebrew_cask - fix force install operation (https://github.com/ansible-collections/community.general/issues/3703). - - icinga2 inventory plugin - handle 404 error when filter produces no results - (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906). - - imc_rest - fixes the module failure due to the usage of ``itertools.izip_longest`` - which is not available in Python 3 (https://github.com/ansible-collections/community.general/issues/4206). - - ini_file - when removing nothing do not report changed (https://github.com/ansible-collections/community.general/issues/4154). - - interfaces_file - fixed the check for existing option in interface (https://github.com/ansible-collections/community.general/issues/3841). - - jail connection plugin - replace deprecated ``distutils.spawn.find_executable`` - with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934). - - jira - fixed bug where module returns error related to dictionary key ``body`` - (https://github.com/ansible-collections/community.general/issues/3419). - - keycloak - fix parameters types for ``defaultDefaultClientScopes`` and ``defaultOptionalClientScopes`` - from list of dictionaries to list of strings (https://github.com/ansible-collections/community.general/pull/4526). - - keycloak_* - the documented ``validate_certs`` parameter was not taken into - account when calling the ``open_url`` function in some cases, thus enforcing - certificate validation even when ``validate_certs`` was set to ``false``. - (https://github.com/ansible-collections/community.general/pull/4382) - - keycloak_user_federation - creating a user federation while specifying an - ID (that does not exist yet) no longer fail with a 404 Not Found (https://github.com/ansible-collections/community.general/pull/4212). - - keycloak_user_federation - mappers auto-created by keycloak are matched and - merged by their name and no longer create duplicated entries (https://github.com/ansible-collections/community.general/pull/4212). - - ldap_search - allow it to be used even in check mode (https://github.com/ansible-collections/community.general/issues/3619). - - linode inventory plugin - fix configuration handling relating to inventory - filtering (https://github.com/ansible-collections/community.general/pull/4336). - - listen_ports_facts - local port regex was not handling well IPv6 only binding. - Fixes the regex for ``ss`` (https://github.com/ansible-collections/community.general/pull/4092). - - lvol - allows logical volumes to be created with certain size arguments prefixed - with ``+`` to preserve behavior of older versions of this module (https://github.com/ansible-collections/community.general/issues/3665). - - lxd connection plugin - replace deprecated ``distutils.spawn.find_executable`` - with Ansible's ``get_bin_path`` to find the ``lxc`` executable (https://github.com/ansible-collections/community.general/pull/3934). - - 'lxd inventory plugin - do not crash if OS and release metadata are not present - - (https://github.com/ansible-collections/community.general/pull/4351). - - ' - - mail callback plugin - fix crash on Python 3 (https://github.com/ansible-collections/community.general/issues/4025, - https://github.com/ansible-collections/community.general/pull/4026). - - mail callback plugin - fix encoding of the name of sender and recipient (https://github.com/ansible-collections/community.general/issues/4060, - https://github.com/ansible-collections/community.general/pull/4061). - - mksysb - fixed bug for parameter ``backup_dmapi_fs`` was passing the wrong - CLI argument (https://github.com/ansible-collections/community.general/pull/3295). - - nmcli - fix returning "changed" when no mask set for IPv4 or IPv6 addresses - on task rerun (https://github.com/ansible-collections/community.general/issues/3768). - - nmcli - fix returning "changed" when routes parameters set, also suggest new - routes4 and routes6 format (https://github.com/ansible-collections/community.general/issues/4131). - - nmcli - fixed falsely reported changed status when ``mtu`` is omitted with - ``dummy`` connections (https://github.com/ansible-collections/community.general/issues/3612, - https://github.com/ansible-collections/community.general/pull/3625). - - nmcli - pass ``flags``, ``ingress``, ``egress`` params to ``nmcli`` (https://github.com/ansible-collections/community.general/issues/1086). - - nrdp callback plugin - fix error ``string arguments without an encoding`` - (https://github.com/ansible-collections/community.general/issues/3903). - - opennebula inventory plugin - complete the implementation of ``constructable`` - for opennebula inventory plugin. Now ``keyed_groups``, ``compose``, ``groups`` - actually work (https://github.com/ansible-collections/community.general/issues/4497). - - 'opentelemetry - fix generating a trace with a task containing ``no_log: true`` - (https://github.com/ansible-collections/community.general/pull/4043).' - - opentelemetry_plugin - honour ``ignore_errors`` when a task has failed instead - of reporting an error (https://github.com/ansible-collections/community.general/pull/3837). - - pacman - Use ``--groups`` instead of ``--group`` (https://github.com/ansible-collections/community.general/pull/4312). - - pacman - fix URL based package installation (https://github.com/ansible-collections/community.general/pull/4286, - https://github.com/ansible-collections/community.general/issues/4285). - - pacman - fix ``upgrade=yes`` (https://github.com/ansible-collections/community.general/pull/4275, - https://github.com/ansible-collections/community.general/issues/4274). - - pacman - fixed bug where ``absent`` state did not work for locally installed - packages (https://github.com/ansible-collections/community.general/pull/4464). - - pacman - make sure that ``packages`` is always returned when ``name`` or ``upgrade`` - is specified, also if nothing is done (https://github.com/ansible-collections/community.general/pull/4329). - - pacman - when the ``update_cache`` option is combined with another option - such as ``upgrade``, report ``changed`` based on the actions performed by - the latter option. This was the behavior in community.general 4.4.0 and before. - In community.general 4.5.0, a task combining these options would always report - ``changed`` (https://github.com/ansible-collections/community.general/pull/4318). - - passwordstore lookup plugin - fix error detection for non-English locales - (https://github.com/ansible-collections/community.general/pull/4219). - - passwordstore lookup plugin - prevent returning path names as passwords by - accident (https://github.com/ansible-collections/community.general/issues/4185, - https://github.com/ansible-collections/community.general/pull/4192). - - passwordstore lookup plugin - replace deprecated ``distutils.util.strtobool`` - with Ansible's ``convert_bool.boolean`` to interpret values for the ``create``, - ``returnall``, ``overwrite``, 'backup``, and ``nosymbols`` options (https://github.com/ansible-collections/community.general/pull/3934). - - pipx - passes the correct command line option ``--include-apps`` (https://github.com/ansible-collections/community.general/issues/3791). - - pritunl - fixed bug where pritunl plugin api add unneeded data in ``auth_string`` - parameter (https://github.com/ansible-collections/community.general/issues/4527). - - proxmox - fixed ``onboot`` parameter causing module failures when undefined - (https://github.com/ansible-collections/community.general/issues/3844). - - proxmox inventory plugin - always convert strings that follow the ``key=value[,key=value[...]]`` - form into dictionaries (https://github.com/ansible-collections/community.general/pull/4349). - - proxmox inventory plugin - fix error when parsing container with LXC configs - (https://github.com/ansible-collections/community.general/issues/4472, https://github.com/ansible-collections/community.general/pull/4472). - - proxmox inventory plugin - fixed the ``description`` field being ignored if - it contained a comma (https://github.com/ansible-collections/community.general/issues/4348). - - proxmox inventory plugin - fixed the ``tags_parsed`` field when Proxmox returns - a single space for the ``tags`` entry (https://github.com/ansible-collections/community.general/pull/4378). - - proxmox_kvm - fix a bug when getting a state of VM without name will fail - (https://github.com/ansible-collections/community.general/pull/4508). - - proxmox_kvm - fix error in check when creating or cloning (https://github.com/ansible-collections/community.general/pull/4306). - - proxmox_kvm - fix error when checking whether Proxmox VM exists (https://github.com/ansible-collections/community.general/pull/4287). - - python_requirements_info - fails if version operator used without version - (https://github.com/ansible-collections/community.general/pull/3785). - - python_requirements_info - store ``mismatched`` return values per package - as documented in the module (https://github.com/ansible-collections/community.general/pull/4078). - - say callback plugin - replace deprecated ``distutils.spawn.find_executable`` - with Ansible's ``get_bin_path`` to find the ``say`` resp. ``espeak`` executables - (https://github.com/ansible-collections/community.general/pull/3934). - - scaleway_user_data - fix double-quote added where no double-quote is needed - to user data in scaleway's server (``Content-type`` -> ``Content-Type``) (https://github.com/ansible-collections/community.general/pull/3940). - - slack - add ``charset`` to HTTP headers to avoid Slack API warning (https://github.com/ansible-collections/community.general/issues/3932). - - terraform - fix command options being ignored during planned/plan in function - ``build_plan`` such as ``lock`` or ``lock_timeout`` (https://github.com/ansible-collections/community.general/issues/3707, - https://github.com/ansible-collections/community.general/pull/3726). - - vdo - fix options error (https://github.com/ansible-collections/community.general/pull/4163). - - xattr - fix exception caused by ``_run_xattr()`` raising a ``ValueError`` - due to a mishandling of base64-encoded value (https://github.com/ansible-collections/community.general/issues/3673). - - xbps - fix error message that is reported when installing packages fails (https://github.com/ansible-collections/community.general/pull/4438). - - yarn - fix incorrect handling of ``yarn list`` and ``yarn global list`` output - that could result in fatal error (https://github.com/ansible-collections/community.general/pull/4050). - - yarn - fix incorrectly reported status when installing a package globally - (https://github.com/ansible-collections/community.general/issues/4045, https://github.com/ansible-collections/community.general/pull/4050). - - yarn - fix missing ``~`` expansion in yarn global install folder which resulted - in incorrect task status (https://github.com/ansible-collections/community.general/issues/4045, - https://github.com/ansible-collections/community.general/pull/4048). - - yum_versionlock - fix matching of existing entries with names passed to the - module. Match yum and dnf lock format (https://github.com/ansible-collections/community.general/pull/4183). - - zone connection plugin - replace deprecated ``distutils.spawn.find_executable`` - with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934). - - zypper - fixed bug that caused zypper to always report [ok] and do nothing - on ``state=present`` when all packages in ``name`` had a version specification - (https://github.com/ansible-collections/community.general/issues/4371, https://github.com/ansible-collections/community.general/pull/4421). - deprecated_features: - - gem - the default of the ``norc`` option has been deprecated and will change - to ``true`` in community.general 6.0.0. Explicitly specify a value to avoid - a deprecation warning (https://github.com/ansible-collections/community.general/pull/4517). - - mail callback plugin - not specifying ``sender`` is deprecated and will be - disallowed in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/4140). - - module_helper module utils - deprecated the attribute ``ModuleHelper.VarDict`` - (https://github.com/ansible-collections/community.general/pull/3801). - - nmcli - deprecate default hairpin mode for a bridge. This so we can change - it to ``false`` in community.general 7.0.0, as this is also the default in - ``nmcli`` (https://github.com/ansible-collections/community.general/pull/4334). - - 'pacman - from community.general 5.0.0 on, the ``changed`` status of ``update_cache`` - will no longer be ignored if ``name`` or ``upgrade`` is specified. To keep - the old behavior, add something like ``register: result`` and ``changed_when: - result.packages | length > 0`` to your task (https://github.com/ansible-collections/community.general/pull/4329).' - - proxmox inventory plugin - the current default ``true`` of the ``want_proxmox_nodes_ansible_host`` - option has been deprecated. The default will change to ``false`` in community.general - 6.0.0. To keep the current behavior, explicitly set ``want_proxmox_nodes_ansible_host`` - to ``true`` in your inventory configuration. We suggest to already switch - to the new behavior by explicitly setting it to ``false``, and by using ``compose:`` - to set ``ansible_host`` to the correct value. See the examples in the plugin - documentation for details (https://github.com/ansible-collections/community.general/pull/4466). - - vmadm - deprecated module parameter ``debug`` that was not used anywhere (https://github.com/ansible-collections/community.general/pull/4580). - known_issues: - - pacman - ``update_cache`` cannot differentiate between up to date and outdated - package lists and will report ``changed`` in both situations (https://github.com/ansible-collections/community.general/pull/4318). - - pacman - binaries specified in the ``executable`` parameter must support ``--print-format`` - in order to be used by this module. In particular, AUR helper ``yay`` is known - not to currently support it (https://github.com/ansible-collections/community.general/pull/4312). - major_changes: - - The community.general collection no longer supports Ansible 2.9 and ansible-base - 2.10. While we take no active measures to prevent usage, we will remove a - lot of compatibility code and other compatility measures that will effectively - prevent using most content from this collection with Ansible 2.9, and some - content of this collection with ansible-base 2.10. Both Ansible 2.9 and ansible-base - 2.10 will very soon be End of Life and if you are still using them, you should - consider upgrading to ansible-core 2.11 or later as soon as possible (https://github.com/ansible-collections/community.general/pull/4548). - minor_changes: - - Avoid internal ansible-core module_utils in favor of equivalent public API - available since at least Ansible 2.9. This fixes some instances added since - the last time this was fixed (https://github.com/ansible-collections/community.general/pull/4232). - - Remove vendored copy of ``distutils.version`` in favor of vendored copy included - with ansible-core 2.12+. For ansible-core 2.11, uses ``distutils.version`` - for Python < 3.12. There is no support for ansible-core 2.11 with Python 3.12+ - (https://github.com/ansible-collections/community.general/pull/3988). - - aix_filesystem - calling ``run_command`` with arguments as ``list`` instead - of ``str`` (https://github.com/ansible-collections/community.general/pull/3833). - - aix_lvg - calling ``run_command`` with arguments as ``list`` instead of ``str`` - (https://github.com/ansible-collections/community.general/pull/3834). - - alternatives - add ``state`` parameter, which provides control over whether - the alternative should be set as the active selection for its alternatives - group (https://github.com/ansible-collections/community.general/issues/4543, - https://github.com/ansible-collections/community.general/pull/4557). - - ansible_galaxy_install - added option ``no_deps`` to the module (https://github.com/ansible-collections/community.general/issues/4174). - - atomic_container - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). - - clc_alert_policy - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556). - - clc_group - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556). - - clc_loadbalancer - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556). - - clc_server - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556). - - cmd_runner module util - reusable command runner with consistent argument - formatting and sensible defaults (https://github.com/ansible-collections/community.general/pull/4476). - - cobbler inventory plugin - add ``include_profiles`` option (https://github.com/ansible-collections/community.general/pull/4068). - - datadog_monitor - support new datadog event monitor of type `event-v2 alert` - (https://github.com/ansible-collections/community.general/pull/4457) - - filesystem - add support for resizing btrfs (https://github.com/ansible-collections/community.general/issues/4465). - - gitlab - add more token authentication support with the new options ``api_oauth_token`` - and ``api_job_token`` (https://github.com/ansible-collections/community.general/issues/705). - - gitlab - clean up modules and utils (https://github.com/ansible-collections/community.general/pull/3694). - - gitlab_group, gitlab_project - add new option ``avatar_path`` (https://github.com/ansible-collections/community.general/pull/3792). - - gitlab_group_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/pull/4038 - and https://github.com/ansible-collections/community.general/issues/4074). - - gitlab_project - add new option ``default_branch`` to gitlab_project (if ``readme - = true``) (https://github.com/ansible-collections/community.general/pull/3792). - - gitlab_project_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/issues/4038). - - hponcfg - revamped module using ModuleHelper (https://github.com/ansible-collections/community.general/pull/3840). - - icinga2 inventory plugin - added the ``display_name`` field to variables (https://github.com/ansible-collections/community.general/issues/3875, - https://github.com/ansible-collections/community.general/pull/3906). - - icinga2 inventory plugin - implemented constructed interface (https://github.com/ansible-collections/community.general/pull/4088). - - icinga2 inventory plugin - inventory object names are changable using ``inventory_attr`` - in your config file to the host object name, address, or display_name fields - (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906). - - ip_netns - calling ``run_command`` with arguments as ``list`` instead of ``str`` - (https://github.com/ansible-collections/community.general/pull/3822). - - ipa_dnszone - ``dynamicupdate`` is now a boolean parameter, instead of a string - parameter accepting ``"true"`` and ``"false"``. Also the module is now idempotent - with respect to ``dynamicupdate`` (https://github.com/ansible-collections/community.general/pull/3374). - - ipa_dnszone - add DNS zone synchronization support (https://github.com/ansible-collections/community.general/pull/3374). - - ipa_service - add ``skip_host_check`` parameter. (https://github.com/ansible-collections/community.general/pull/4417). - - ipmi_boot - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698). - - ipmi_power - add ``machine`` option to ensure the power state via the remote - target address (https://github.com/ansible-collections/community.general/pull/3968). - - ipmi_power - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698). - - iso_extract - calling ``run_command`` with arguments as ``list`` instead of - ``str`` (https://github.com/ansible-collections/community.general/pull/3805). - - java_cert - calling ``run_command`` with arguments as ``list`` instead of - ``str`` (https://github.com/ansible-collections/community.general/pull/3835). - - jira - add support for Bearer token auth (https://github.com/ansible-collections/community.general/pull/3838). - - jira - when creating a comment, ``fields`` now is used for additional data - (https://github.com/ansible-collections/community.general/pull/4304). - - keycloak_* modules - added connection timeout parameter when calling server - (https://github.com/ansible-collections/community.general/pull/4168). - - keycloak_client - add ``always_display_in_console`` parameter (https://github.com/ansible-collections/community.general/issues/4390). - - keycloak_client - add ``default_client_scopes`` and ``optional_client_scopes`` - parameters. (https://github.com/ansible-collections/community.general/pull/4385). - - keycloak_user_federation - add sssd user federation support (https://github.com/ansible-collections/community.general/issues/3767). - - ldap_entry - add support for recursive deletion (https://github.com/ansible-collections/community.general/issues/3613). - - linode inventory plugin - add support for caching inventory results (https://github.com/ansible-collections/community.general/pull/4179). - - linode inventory plugin - allow templating of ``access_token`` variable in - Linode inventory plugin (https://github.com/ansible-collections/community.general/pull/4040). - - listen_ports_facts - add support for ``ss`` command besides ``netstat`` (https://github.com/ansible-collections/community.general/pull/3708). - - lists_mergeby filter plugin - add parameters ``list_merge`` and ``recursive``. - These are only supported when used with ansible-base 2.10 or ansible-core, - but not with Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/4058). - - logentries - calling ``run_command`` with arguments as ``list`` instead of - ``str`` (https://github.com/ansible-collections/community.general/pull/3807). - - logstash_plugin - calling ``run_command`` with arguments as ``list`` instead - of ``str`` (https://github.com/ansible-collections/community.general/pull/3808). - - lxc_container - added ``wait_for_container`` parameter. If ``true`` the module - will wait until the running task reports success as the status (https://github.com/ansible-collections/community.general/pull/4039). - - lxc_container - calling ``run_command`` with arguments as ``list`` instead - of ``str`` (https://github.com/ansible-collections/community.general/pull/3851). - - lxd connection plugin - make sure that ``ansible_lxd_host``, ``ansible_executable``, - and ``ansible_lxd_executable`` work (https://github.com/ansible-collections/community.general/pull/3798). - - lxd inventory plugin - support virtual machines (https://github.com/ansible-collections/community.general/pull/3519). - - lxd_container - adds ``project`` option to allow selecting project for LXD - instance (https://github.com/ansible-collections/community.general/pull/4479). - - lxd_container - adds ``type`` option which also allows to operate on virtual - machines and not just containers (https://github.com/ansible-collections/community.general/pull/3661). - - lxd_profile - adds ``project`` option to allow selecting project for LXD profile - (https://github.com/ansible-collections/community.general/pull/4479). - - mail callback plugin - add ``Message-ID`` and ``Date`` headers (https://github.com/ansible-collections/community.general/issues/4055, - https://github.com/ansible-collections/community.general/pull/4056). - - mail callback plugin - properly use Ansible's option handling to split lists - (https://github.com/ansible-collections/community.general/pull/4140). - - mattermost - add the possibility to send attachments instead of text messages - (https://github.com/ansible-collections/community.general/pull/3946). - - mksysb - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3295). - - module_helper module utils - added decorators ``check_mode_skip`` and ``check_mode_skip_returns`` - for skipping methods when ``check_mode=True`` (https://github.com/ansible-collections/community.general/pull/3849). - - monit - calling ``run_command`` with arguments as ``list`` instead of ``str`` - (https://github.com/ansible-collections/community.general/pull/3821). - - nmap inventory plugin - add ``sudo`` option in plugin in order to execute - ``sudo nmap`` so that ``nmap`` runs with elevated privileges (https://github.com/ansible-collections/community.general/pull/4506). - - nmcli - add ``wireguard`` connection type (https://github.com/ansible-collections/community.general/pull/3985). - - nmcli - add missing connection aliases ``802-3-ethernet`` and ``802-11-wireless`` - (https://github.com/ansible-collections/community.general/pull/4108). - - nmcli - add multiple addresses support for ``ip4`` parameter (https://github.com/ansible-collections/community.general/issues/1088, - https://github.com/ansible-collections/community.general/pull/3738). - - nmcli - add multiple addresses support for ``ip6`` parameter (https://github.com/ansible-collections/community.general/issues/1088). - - nmcli - add support for ``eui64`` and ``ipv6privacy`` parameters (https://github.com/ansible-collections/community.general/issues/3357). - - nmcli - adds ``routes6`` and ``route_metric6`` parameters for supporting IPv6 - routes (https://github.com/ansible-collections/community.general/issues/4059). - - nmcli - remove nmcli modify dependency on ``type`` parameter (https://github.com/ansible-collections/community.general/issues/2858). - - nomad_job - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). - - nomad_job_info - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). - - npm - add ability to use ``production`` flag when ``ci`` is set (https://github.com/ansible-collections/community.general/pull/4299). - - open_iscsi - extended module to allow rescanning of established session for - one or all targets (https://github.com/ansible-collections/community.general/issues/3763). - - opennebula - add the release action for VMs in the ``HOLD`` state (https://github.com/ansible-collections/community.general/pull/4036). - - opentelemetry_plugin - enrich service when using the ``docker_login`` (https://github.com/ansible-collections/community.general/pull/4104). - - opentelemetry_plugin - enrich service when using the ``jenkins``, ``hetzner`` - or ``jira`` modules (https://github.com/ansible-collections/community.general/pull/4105). - - packet_device - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). - - packet_sshkey - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). - - packet_volume - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). - - pacman - add ``remove_nosave`` parameter to avoid saving modified configuration - files as ``.pacsave`` files. (https://github.com/ansible-collections/community.general/pull/4316, - https://github.com/ansible-collections/community.general/issues/4315). - - pacman - add ``stdout`` and ``stderr`` as return values (https://github.com/ansible-collections/community.general/pull/3758). - - pacman - now implements proper change detection for ``update_cache=true``. - Adds ``cache_updated`` return value to when ``update_cache=true`` to report - this result independently of the module's overall changed return value (https://github.com/ansible-collections/community.general/pull/4337). - - pacman - the module has been rewritten and is now much faster when using ``state=latest``. - Operations are now done all packages at once instead of package per package - and the configured output format of ``pacman`` no longer affect the module's - operation. (https://github.com/ansible-collections/community.general/pull/3907, - https://github.com/ansible-collections/community.general/issues/3783, https://github.com/ansible-collections/community.general/issues/4079) - - passwordstore lookup plugin - add configurable ``lock`` and ``locktimeout`` - options to avoid race conditions in itself and in the ``pass`` utility it - calls. By default, the plugin now locks on write operations (https://github.com/ansible-collections/community.general/pull/4194). - - pipx - added options ``editable`` and ``pip_args`` (https://github.com/ansible-collections/community.general/issues/4300). - - profitbricks - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). - - proxmox - add ``clone`` parameter (https://github.com/ansible-collections/community.general/pull/3930). - - proxmox - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). - - proxmox inventory plugin - add support for client-side jinja filters (https://github.com/ansible-collections/community.general/issues/3553). - - proxmox inventory plugin - add support for templating the ``url``, ``user``, - and ``password`` options (https://github.com/ansible-collections/community.general/pull/4418). - - proxmox inventory plugin - add token authentication as an alternative to username/password - (https://github.com/ansible-collections/community.general/pull/4540). - - proxmox inventory plugin - parse LXC configs returned by the proxmox API (https://github.com/ansible-collections/community.general/pull/4472). - - proxmox modules - move ``HAS_PROXMOXER`` check into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4030). - - proxmox modules - move common code into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4029). - - proxmox_kvm - added EFI disk support when creating VM with OVMF UEFI BIOS - with new ``efidisk0`` option (https://github.com/ansible-collections/community.general/pull/4106, - https://github.com/ansible-collections/community.general/issues/1638). - - proxmox_kwm - add ``win11`` to ``ostype`` parameter for Windows 11 and Windows - Server 2022 support (https://github.com/ansible-collections/community.general/issues/4023, - https://github.com/ansible-collections/community.general/pull/4191). - - proxmox_snap - add restore snapshot option (https://github.com/ansible-collections/community.general/pull/4377). - - proxmox_snap - fixed timeout value to correctly reflect time in seconds. The - timeout was off by one second (https://github.com/ansible-collections/community.general/pull/4377). - - puppet - remove deprecation for ``show_diff`` parameter. Its alias ``show-diff`` - is still deprecated and will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/3980). - - python_requirements_info - returns python version broken down into its components, - and some minor refactoring (https://github.com/ansible-collections/community.general/pull/3797). - - redfish_command - add ``GetHostInterfaces`` command to enable reporting Redfish - Host Interface information (https://github.com/ansible-collections/community.general/issues/3693). - - redfish_command - add ``IndicatorLedOn``, ``IndicatorLedOff``, and ``IndicatorLedBlink`` - commands to the Systems category for controling system LEDs (https://github.com/ansible-collections/community.general/issues/4084). - - redfish_command - add ``SetHostInterface`` command to enable configuring the - Redfish Host Interface (https://github.com/ansible-collections/community.general/issues/3632). - - redis - add authentication parameters ``login_user``, ``tls``, ``validate_certs``, - and ``ca_certs`` (https://github.com/ansible-collections/community.general/pull/4207). - - scaleway inventory plugin - add profile parameter ``scw_profile`` (https://github.com/ansible-collections/community.general/pull/4049). - - scaleway_compute - add possibility to use project identifier (new ``project`` - option) instead of deprecated organization identifier (https://github.com/ansible-collections/community.general/pull/3951). - - scaleway_volume - all volumes are systematically created on par1 (https://github.com/ansible-collections/community.general/pull/3964). - - seport - minor refactoring (https://github.com/ansible-collections/community.general/pull/4471). - - smartos_image_info - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). - - snap - add option ``options`` permitting to set options using the ``snap set`` - command (https://github.com/ansible-collections/community.general/pull/3943). - - sudoers - add support for ``runas`` parameter (https://github.com/ansible-collections/community.general/issues/4379). - - svc - calling ``run_command`` with arguments as ``list`` instead of ``str`` - (https://github.com/ansible-collections/community.general/pull/3829). - - syslog_json - add option to skip logging of ``gather_facts`` playbook tasks; - use v2 callback API (https://github.com/ansible-collections/community.general/pull/4223). - - terraform - adds ``terraform_upgrade`` parameter which allows ``terraform - init`` to satisfy new provider constraints in an existing Terraform project - (https://github.com/ansible-collections/community.general/issues/4333). - - udm_group - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556). - - udm_share - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556). - - vmadm - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). - - webfaction_app - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). - - webfaction_db - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567). - - xattr - calling ``run_command`` with arguments as ``list`` instead of ``str`` - (https://github.com/ansible-collections/community.general/pull/3806). - - xfconf - added missing value types ``char``, ``uchar``, ``int64`` and ``uint64`` - (https://github.com/ansible-collections/community.general/pull/4534). - - xfconf - minor refactor on the base class for the module (https://github.com/ansible-collections/community.general/pull/3919). - - zypper - add support for ``--clean-deps`` option to remove packages that depend - on a package being removed (https://github.com/ansible-collections/community.general/pull/4195). - release_summary: Alpha release for community.general 5.0.0. - removed_features: - - ali_instance_info - removed the options ``availability_zone``, ``instance_ids``, - and ``instance_names``. Use filter item ``zone_id`` instead of ``availability_zone``, - filter item ``instance_ids`` instead of ``instance_ids``, and filter item - ``instance_name`` instead of ``instance_names`` (https://github.com/ansible-collections/community.general/pull/4516). - - apt_rpm - removed the deprecated alias ``update-cache`` of ``update_cache`` - (https://github.com/ansible-collections/community.general/pull/4516). - - compose - removed various deprecated aliases. Use the version with ``_`` instead - of ``-`` instead (https://github.com/ansible-collections/community.general/pull/4516). - - dnsimple - remove support for dnsimple < 2.0.0 (https://github.com/ansible-collections/community.general/pull/4516). - - github_deploy_key - removed the deprecated alias ``2fa_token`` of ``otp`` - (https://github.com/ansible-collections/community.general/pull/4516). - - homebrew, homebrew_cask - removed the deprecated alias ``update-brew`` of - ``update_brew`` (https://github.com/ansible-collections/community.general/pull/4516). - - linode - removed the ``backupsenabled`` option. Use ``backupweeklyday`` or - ``backupwindow`` to enable backups (https://github.com/ansible-collections/community.general/pull/4516). - - opkg - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516). - - pacman - if ``update_cache=true`` is used with ``name`` or ``upgrade``, the - changed state will now also indicate if only the cache was updated. To keep - the old behavior - only indicate ``changed`` when a package was installed/upgraded - -, use ``changed_when`` as indicated in the module examples (https://github.com/ansible-collections/community.general/pull/4516). - - pacman - removed the deprecated alias ``update-cache`` of ``update_cache`` - (https://github.com/ansible-collections/community.general/pull/4516). - - proxmox, proxmox_kvm, proxmox_snap - no longer allow to specify a VM name - that matches multiple VMs. If this happens, the modules now fail (https://github.com/ansible-collections/community.general/pull/4516). - - serverless - removed the ``functions`` option. It was not used by the module - (https://github.com/ansible-collections/community.general/pull/4516). - - slackpkg - removed the deprecated alias ``update-cache`` of ``update_cache`` - (https://github.com/ansible-collections/community.general/pull/4516). - - urpmi - removed the deprecated alias ``no-recommends`` of ``no_recommends`` - (https://github.com/ansible-collections/community.general/pull/4516). - - urpmi - removed the deprecated alias ``update-cache`` of ``update_cache`` - (https://github.com/ansible-collections/community.general/pull/4516). - - xbps - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516). - - xfconf - the ``get`` state has been removed. Use the ``xfconf_info`` module - instead (https://github.com/ansible-collections/community.general/pull/4516). - fragments: - - 1088-add_multiple_ipv6_address_support.yml - - 1088-nmcli_add_multiple_addresses_support.yml - - 2386-github_repo-fix-idempotency-issues.yml - - 3295-mksysb-revamp.yaml - - 3357-nmcli-eui64-and-ipv6privacy.yml - - 3374-add-ipa-ptr-sync-support.yml - - 3519-inventory-support-lxd-4.yml - - 3625-nmcli_false_changed_mtu_fix.yml - - 3632-add-redfish-host-interface-config-support.yml - - 3660-a_module-tombstone.yml - - 3661-lxd_container-add-vm-support.yml - - 3667-ldap_search.yml - - 3675-xattr-handle-base64-values.yml - - 3681-lvol-fix-create.yml - - 3693-add-redfish-host-interface-info-support.yml - - 3694-gitlab-cleanup.yml - - 3702-ipmi-encryption-key.yml - - 3703-force-install-homebrew-cask.yml - - 3708-listen_ports_facts-add-ss-support.yml - - 3709-support-batch-mode.yml - - 3726-terraform-missing-parameters-planned-fix.yml - - 3758-pacman-add-stdout-stderr.yml - - 3765-extend-open_iscsi-with-rescan.yml - - 3768-nmcli_fix_changed_when_no_mask_set.yml - - 3780-add-keycloak-sssd-user-federation.yml - - 3785-python_requirements_info-versionless-op.yaml - - 3792-improve_gitlab_group_and_project.yml - - 3797-python_requirements_info-improvements.yaml - - 3798-fix-lxd-connection-option-vars-support.yml - - 3800-pipx-include-apps.yaml - - 3801-mh-deprecate-vardict-attr.yaml - - 3805-iso_extract-run_command-list.yaml - - 3806-xattr-run_command-list.yaml - - 3807-logentries-run_command-list.yaml - - 3808-logstash_plugin-run_command-list.yaml - - 3821-monit-run-list.yaml - - 3822-ip_netns-run-list.yaml - - 3829-svc-run-list.yaml - - 3833-aix_filesystem-run-list.yaml - - 3834-aix-lvg-run-list.yaml - - 3835-java-cert-run-list.yaml - - 3837-opentelemetry_plugin-honour_ignore_errors.yaml - - 3838-jira-token.yaml - - 3840-hponcfg-mh-revamp.yaml - - 3849-mh-check-mode-decos.yaml - - 3851-lxc-container-run-list.yaml - - 3862-interfaces-file-fix-dup-option.yaml - - 3867-jira-fix-body.yaml - - 3874-proxmox-fix-onboot-param.yml - - 3875-icinga2-inv-fix.yml - - 3896-nmcli_vlan_missing_options.yaml - - 3907-pacman-speedup.yml - - 3909-nrdp_fix_string_args_without_encoding.yaml - - 3916-fix-vdo-options-type.yml - - 3919-xfconf-baseclass.yaml - - 3921-add-counter-filter-plugin.yml - - 3930-proxmox-add-clone.yaml - - 3933-slack-charset-header.yaml - - 3934-distutils.yml - - 3935-use-gitlab-instance-runner-to-create-runner.yml - - 3936-distutils.version.yml - - 3940_fix_contenttype_scaleway_user_data.yml - - 3943-add-option-options-to-snap-module.yml - - 3946-mattermost_attachments.yml - - 3951-scaleway_compute_add_project_id.yml - - 3964-scaleway_volume_add_region.yml - - 3968-ipmi_power-add-machine-option.yaml - - 3976-fix-alternatives-parsing.yml - - 3980-puppet-show_diff.yml - - 3985-nmcli-add-wireguard-connection-type.yml - - 3988-distutils-vendor-removed.yml - - 4026-fix-mail-callback.yml - - 4029-proxmox-refactor.yml - - 4030-proxmox-has-proxmoxer.yml - - 4036-onevm-add-release-action.yaml - - 4038-fix-and-rework-gitlb-project-variable.yml - - 4039-cluster-container-wait.yml - - 4040-linode-token-templating.yaml - - 4043-fix-no-log-opentelemetry.yml - - 4048-expand-tilde-in-yarn-global-install-folder.yaml - - 4049-profile-for-scaleway-inventory.yml - - 4050-properly-parse-json-lines-output-from-yarn.yaml - - 4052-fix-detection-of-installed-cargo-packages-with-hyphens.yaml - - 4056-add-missing-mail-headers.yml - - 4058-lists_mergeby-add-parameters.yml - - 4061-fix-mail-recipient-encoding.yml - - 4062-nmcli-ipv6-routes-support.yml - - 4068-add-include_file-option.yml - - 4078-python_requirements_info.yaml - - 4084-add-redfish-system-indicator-led.yml - - 4086-rework_of_gitlab_proyect_variable_over_gitlab_group_variable.yml - - 4088-add-constructed-interface-for-icinga2-inventory.yml - - 4092-fix_local_ports_regex_listen_ports_facts.yaml - - 4104-opentelemetry_plugin-enrich_docker_login.yaml - - 4105-opentelemetry_plugin-enrich_jira_hetzner_jenkins_services.yaml - - 4106-proxmox-efidisk0-support.yaml - - 4108-nmcli-support-modifcation-without-type-param.yml - - 4131-nmcli_fix_reports_changed_for_routes4_parameter.yml - - 4136-gitlab_runner-make-project-owned-mutually-exclusive.yml - - 4140-mail-callback-options.yml - - 4150-gitlab-project-variable-absent-fix.yml - - 4151-dconf-catch-psutil-nosuchprocess.yaml - - 4154-ini_file_changed.yml - - 4168-add-keycloak-url-timeout.yml - - 4179-linode-inventory-cache.yaml - - 4183-fix-yum_versionlock.yaml - - 4191-proxmox-add-win11.yml - - 4192-improve-passwordstore-consistency.yml - - 4192-zypper-add-clean-deps.yml - - 4194-configurable-passwordstore-locking.yml - - 4206-imc-rest-module.yaml - - 4207-add-redis-tls-support.yml - - 4212-fixes-for-keycloak-user-federation.yml - - 4219-passwordstore-locale-fix.yml - - 4223-syslog-json-skip-syslog-option.yml - - 4232-text-converter-import.yml - - 4240-ansible_galaxy_install-no_deps.yml - - 4275-pacman-sysupgrade.yml - - 4286-pacman-url-pkgs.yml - - 4287-fix-proxmox-vm-chek.yml - - 4288-fix-4259-support-busybox-dd.yml - - 4299-npm-add-production-with-ci-flag.yml - - 4303-pipx-editable.yml - - 4304-jira-fields-in-comment.yml - - 4306-proxmox-fix-error-on-vm-clone.yml - - 4312-pacman-groups.yml - - 4316-pacman-remove-nosave.yml - - 4318-pacman-restore-old-changed-behavior.yml - - 4320-nmcli-hairpin.yml - - 4330-pacman-packages-update_cache.yml - - 4336-linode-inventory-filtering.yaml - - 4337-pacman-update_cache.yml - - 4349-proxmox-inventory-dict-facts.yml - - 4351-inventory-lxd-handling_metadata_wo_os_and_release.yml - - 4352-proxmox-inventory-filters.yml - - 4355-ldap-recursive-delete.yml - - 4377-allow-proxmox-snapshot-restoring.yml - - 4378-proxmox-inventory-tags.yml - - 4380-sudoers-runas-parameter.yml - - 4382-keycloak-add-missing-validate_certs-parameters.yml - - 4385-keycloak-client-default-optional-scopes.yml - - 4386-proxmox-support-templating-in-inventory-file.yml - - 4417-ipa_service-add-skip_host_check.yml - - 4421-zypper_package_version_handling_fix.yml - - 4422-warn-user-if-incorrect-SDK-version-is-installed.yaml - - 4429-keycloak-client-add-always-display-in-console.yml - - 4438-fix-error-message.yaml - - 4455-terraform-provider-upgrade.yml - - 4457-support-datadog-monitors-type-event-v2.yaml - - 4459-only-get-monitor-if-it-is-not-null-api-response.yaml - - 4464-pacman-fix-local-remove.yaml - - 4465-btrfs-resize.yml - - 4466-proxmox-ansible_host-deprecation.yml - - 4471-seport-refactor.yaml - - 4476-cmd_runner.yml - - 4479-add-project-support-for-lxd_container-and-lxd_profile.yml - - 4491-specify_all_in_list_calls.yaml - - 4492-proxmox_kvm_fix_vm_without_name.yaml - - 4496-remove-deprecated-method-in-gitlab-branch-module.yml - - 4506-sudo-in-nmap-inv-plugin.yaml - - 4516-deprecation-removals.yml - - 4517-gem-deprecate-norc.yml - - 4524-update-opennebula-inventory-plugin-to-match-documentation.yaml - - 4526-keycloak-realm-types.yaml - - 4530-fix-unauthorized-pritunl-request.yaml - - 4534-xfconf-added-value-types.yaml - - 4540-proxmox-inventory-token-auth.yml - - 4548-remove-2.9-2.10-compatibility.yml - - 4555-proxmox-lxc-key.yml - - 4556-remove-default-none-1.yml - - 4557-alternatives-add-state-parameter.yml - - 4567-remove-default-none-2.yml - - 4580-vmadm-deprecate-param-debug.yaml - - 5.0.0-a1.yml - - 705-gitlab-auth-support.yml - plugins: - filter: - - description: Counts hashable elements in a sequence - name: counter - namespace: null - release_date: '2022-04-29' - 5.0.1: - changes: - bugfixes: - - consul - fixed bug introduced in PR 4590 (https://github.com/ansible-collections/community.general/issues/4680). - - filesystem - handle ``fatresize --info`` output lines without ``:`` (https://github.com/ansible-collections/community.general/pull/4700). - - filesystem - improve error messages when output cannot be parsed by including - newlines in escaped form (https://github.com/ansible-collections/community.general/pull/4700). - - keycloak_realm - fix default groups and roles (https://github.com/ansible-collections/community.general/issues/4241). - - redis* modules - fix call to ``module.fail_json`` when failing because of - missing Python libraries (https://github.com/ansible-collections/community.general/pull/4733). - - xcc_redfish_command - for compatibility due to Redfish spec changes the virtualMedia - resource location changed from Manager to System (https://github.com/ansible-collections/community.general/pull/4682). - - zfs - fix wrong quoting of properties (https://github.com/ansible-collections/community.general/issues/4707, - https://github.com/ansible-collections/community.general/pull/4726). - minor_changes: - - cpanm - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived - modules (https://github.com/ansible-collections/community.general/pull/4674). - - mksysb - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived - modules (https://github.com/ansible-collections/community.general/pull/4674). - - pipx - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived - modules (https://github.com/ansible-collections/community.general/pull/4674). - - snap - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived - modules (https://github.com/ansible-collections/community.general/pull/4674). - - xfconf - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived - modules (https://github.com/ansible-collections/community.general/pull/4674). - release_summary: Regular bugfix release for inclusion in Ansible 6.0.0. - fragments: - - 4674-use-mh-raise.yaml - - 4682-compatibility-virtualmedia-resource-location.yaml - - 4700-code-changes.yml - - 4712-consul-bugfix.yaml - - 4719-fix-keycloak-realm.yaml - - 4726-zfs.yml - - 4733-redis-fail.yml - - 5.0.1.yml - release_date: '2022-05-30' +--- +ancestor: 11.0.0 +releases: {} diff --git a/changelogs/changelog.yaml.license b/changelogs/changelog.yaml.license new file mode 100644 index 0000000000..edff8c7685 --- /dev/null +++ b/changelogs/changelog.yaml.license @@ -0,0 +1,3 @@ +GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +SPDX-License-Identifier: GPL-3.0-or-later +SPDX-FileCopyrightText: Ansible Project diff --git a/changelogs/config.yaml b/changelogs/config.yaml index fd0b422a5b..578b8c3765 100644 --- a/changelogs/config.yaml +++ b/changelogs/config.yaml @@ -1,29 +1,43 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + changelog_filename_template: ../CHANGELOG.rst changelog_filename_version_depth: 0 changes_file: changelog.yaml changes_format: combined +ignore_other_fragment_extensions: true keep_fragments: false mention_ancestor: true -flatmap: true new_plugins_after_name: removed_features notesdir: fragments +output_formats: + - md + - rst prelude_section_name: release_summary prelude_section_title: Release Summary sections: -- - major_changes - - Major Changes -- - minor_changes - - Minor Changes -- - breaking_changes - - Breaking Changes / Porting Guide -- - deprecated_features - - Deprecated Features -- - removed_features - - Removed Features (previously deprecated) -- - security_fixes - - Security Fixes -- - bugfixes - - Bugfixes -- - known_issues - - Known Issues + - - major_changes + - Major Changes + - - minor_changes + - Minor Changes + - - breaking_changes + - Breaking Changes / Porting Guide + - - deprecated_features + - Deprecated Features + - - removed_features + - Removed Features (previously deprecated) + - - security_fixes + - Security Fixes + - - bugfixes + - Bugfixes + - - known_issues + - Known Issues title: Community General +trivial_section_name: trivial +use_fqcn: true +add_plugin_period: true +changelog_nice_yaml: true +changelog_sort: version +vcs: auto diff --git a/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml b/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml new file mode 100644 index 0000000000..d1cfee7816 --- /dev/null +++ b/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml @@ -0,0 +1,7 @@ +deprecated_features: + - pacemaker_cluster - the parameter ``state`` will become a required parameter in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/10227). + +minor_changes: + - pacemaker_cluster - add ``state=maintenance`` for managing pacemaker maintenance mode (https://github.com/ansible-collections/community.general/issues/10200, https://github.com/ansible-collections/community.general/pull/10227). + - pacemaker_cluster - rename ``node`` to ``name`` and add ``node`` alias (https://github.com/ansible-collections/community.general/pull/10227). + - pacemaker_resource - enhance module by removing duplicative code (https://github.com/ansible-collections/community.general/pull/10227). diff --git a/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml b/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml new file mode 100644 index 0000000000..eec12e8669 --- /dev/null +++ b/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak - add support for ``grant_type=client_credentials`` to all keycloak modules, so that specifying ``auth_client_id`` and ``auth_client_secret`` is sufficient for authentication (https://github.com/ansible-collections/community.general/pull/10231). diff --git a/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml b/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml new file mode 100644 index 0000000000..29d71ca393 --- /dev/null +++ b/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml @@ -0,0 +1,2 @@ +minor_changes: + - cloudflare_dns - adds support for PTR records (https://github.com/ansible-collections/community.general/pull/10267). diff --git a/changelogs/fragments/10269-cloudflare-dns-refactor.yml b/changelogs/fragments/10269-cloudflare-dns-refactor.yml new file mode 100644 index 0000000000..9f91040d63 --- /dev/null +++ b/changelogs/fragments/10269-cloudflare-dns-refactor.yml @@ -0,0 +1,2 @@ +minor_changes: + - cloudflare_dns - simplify validations and refactor some code, no functional changes (https://github.com/ansible-collections/community.general/pull/10269). diff --git a/changelogs/fragments/10271--disable_lookups.yml b/changelogs/fragments/10271--disable_lookups.yml new file mode 100644 index 0000000000..d28e2ac833 --- /dev/null +++ b/changelogs/fragments/10271--disable_lookups.yml @@ -0,0 +1,3 @@ +bugfixes: + - "icinga2 inventory plugin - avoid using deprecated option when templating options (https://github.com/ansible-collections/community.general/pull/10271)." + - "linode inventory plugin - avoid using deprecated option when templating options (https://github.com/ansible-collections/community.general/pull/10271)." diff --git a/changelogs/fragments/10285-fstr-plugins.yml b/changelogs/fragments/10285-fstr-plugins.yml new file mode 100644 index 0000000000..6fff590fee --- /dev/null +++ b/changelogs/fragments/10285-fstr-plugins.yml @@ -0,0 +1,7 @@ +minor_changes: + - dense callback plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - mail callback plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - wsl connection plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - jc filter plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - iocage inventory plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - xen_orchestra inventory plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). diff --git a/changelogs/fragments/10299-github_app_access_token-lookup.yml b/changelogs/fragments/10299-github_app_access_token-lookup.yml new file mode 100644 index 0000000000..59233e2a05 --- /dev/null +++ b/changelogs/fragments/10299-github_app_access_token-lookup.yml @@ -0,0 +1,2 @@ +minor_changes: + - github_app_access_token lookup plugin - support both ``jwt`` and ``pyjwt`` to avoid conflict with other modules requirements (https://github.com/ansible-collections/community.general/issues/10299). diff --git a/changelogs/fragments/10311-xfconf-refactor.yml b/changelogs/fragments/10311-xfconf-refactor.yml new file mode 100644 index 0000000000..9d71bd17d8 --- /dev/null +++ b/changelogs/fragments/10311-xfconf-refactor.yml @@ -0,0 +1,2 @@ +minor_changes: + - xfconf - minor adjustments the the code (https://github.com/ansible-collections/community.general/pull/10311). diff --git a/changelogs/fragments/10323-nmcli-improvements.yml b/changelogs/fragments/10323-nmcli-improvements.yml new file mode 100644 index 0000000000..53436ea7d6 --- /dev/null +++ b/changelogs/fragments/10323-nmcli-improvements.yml @@ -0,0 +1,2 @@ +minor_changes: + - nmcli - simplify validations and refactor some code, no functional changes (https://github.com/ansible-collections/community.general/pull/10323). diff --git a/changelogs/fragments/10328-redundant-brackets.yml b/changelogs/fragments/10328-redundant-brackets.yml new file mode 100644 index 0000000000..f8f74a336c --- /dev/null +++ b/changelogs/fragments/10328-redundant-brackets.yml @@ -0,0 +1,32 @@ +minor_changes: + - logstash callback plugin - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - keycloak module utils - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - python_runner module utils - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - cloudflare_dns - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - crypttab - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - datadog_monitor - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_deploy_key - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_group_access_token - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_hook - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_project_access_token - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_runner - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - ipa_group - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - jenkins_build - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - jenkins_build_info - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - nmcli - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - oneandone_firewall_policy - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - oneandone_load_balancer - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - oneandone_monitoring_policy - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - onepassword_info - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - osx_defaults - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - ovh_ip_loadbalancing_backend - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - packet_device - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - pagerduty - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - pingdom - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - rhevm - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - rocketchat - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - sensu_silence - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - sl_vm - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - urpmi - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - xattr - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - xml - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). diff --git a/changelogs/fragments/10329-catapult-deprecation.yml b/changelogs/fragments/10329-catapult-deprecation.yml new file mode 100644 index 0000000000..5e5209edda --- /dev/null +++ b/changelogs/fragments/10329-catapult-deprecation.yml @@ -0,0 +1,2 @@ +deprecated_features: + - catapult - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10329). diff --git a/changelogs/fragments/10339-github_app_access_token.yml b/changelogs/fragments/10339-github_app_access_token.yml new file mode 100644 index 0000000000..00cd71f559 --- /dev/null +++ b/changelogs/fragments/10339-github_app_access_token.yml @@ -0,0 +1,2 @@ +bugfixes: + - github_release - support multiple types of GitHub tokens; no longer failing when ``ghs_`` token type is provided (https://github.com/ansible-collections/community.general/issues/10338, https://github.com/ansible-collections/community.general/pull/10339). \ No newline at end of file diff --git a/changelogs/fragments/10346-jenkins-plugins-fixes.yml b/changelogs/fragments/10346-jenkins-plugins-fixes.yml new file mode 100644 index 0000000000..382fe7aa53 --- /dev/null +++ b/changelogs/fragments/10346-jenkins-plugins-fixes.yml @@ -0,0 +1,6 @@ +bugfixes: + - "jenkins_plugin - install latest compatible version instead of latest (https://github.com/ansible-collections/community.general/issues/854, https://github.com/ansible-collections/community.general/pull/10346)." + - "jenkins_plugin - separate Jenkins and external URL credentials (https://github.com/ansible-collections/community.general/issues/4419, https://github.com/ansible-collections/community.general/pull/10346)." + +minor_changes: + - "jenkins_plugin - install dependencies for specific version (https://github.com/ansible-collections/community.general/issue/4995, https://github.com/ansible-collections/community.general/pull/10346)." diff --git a/changelogs/fragments/10349-incus_connection-error-handling.yml b/changelogs/fragments/10349-incus_connection-error-handling.yml new file mode 100644 index 0000000000..b35da354d2 --- /dev/null +++ b/changelogs/fragments/10349-incus_connection-error-handling.yml @@ -0,0 +1,2 @@ +bugfixes: + - incus connection plugin - fix error handling to return more useful Ansible errors to the user (https://github.com/ansible-collections/community.general/issues/10344, https://github.com/ansible-collections/community.general/pull/10349). diff --git a/changelogs/fragments/10359-dependent.yml b/changelogs/fragments/10359-dependent.yml new file mode 100644 index 0000000000..e48a6142e8 --- /dev/null +++ b/changelogs/fragments/10359-dependent.yml @@ -0,0 +1,2 @@ +bugfixes: + - "dependent lookup plugin - avoid deprecated ansible-core 2.19 functionality (https://github.com/ansible-collections/community.general/pull/10359)." diff --git a/changelogs/fragments/10413-pacemaker-resource-cleanup.yml b/changelogs/fragments/10413-pacemaker-resource-cleanup.yml new file mode 100644 index 0000000000..f4157559cc --- /dev/null +++ b/changelogs/fragments/10413-pacemaker-resource-cleanup.yml @@ -0,0 +1,3 @@ +minor_changes: + - pacemaker_resource - add ``state=cleanup`` for cleaning up pacemaker resources (https://github.com/ansible-collections/community.general/pull/10413) + - pacemaker_resource - the parameter ``name`` is no longer a required parameter in community.general 11.3.0 (https://github.com/ansible-collections/community.general/pull/10413) diff --git a/changelogs/fragments/10415-keycloak-realm-brute-force-attributes.yml b/changelogs/fragments/10415-keycloak-realm-brute-force-attributes.yml new file mode 100644 index 0000000000..22433b584e --- /dev/null +++ b/changelogs/fragments/10415-keycloak-realm-brute-force-attributes.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_realm - add support for ``brute_force_strategy`` and ``max_temporary_lockouts`` (https://github.com/ansible-collections/community.general/issues/10412, https://github.com/ansible-collections/community.general/pull/10415). diff --git a/changelogs/fragments/10417-sysrc-refactor.yml b/changelogs/fragments/10417-sysrc-refactor.yml new file mode 100644 index 0000000000..b1b5db632b --- /dev/null +++ b/changelogs/fragments/10417-sysrc-refactor.yml @@ -0,0 +1,4 @@ +minor_changes: + - sysrc - adjustments to the code (https://github.com/ansible-collections/community.general/pull/10417). +bugfixes: + - sysrc - fixes parsing with multi-line variables (https://github.com/ansible-collections/community.general/issues/10394, https://github.com/ansible-collections/community.general/pull/10417). \ No newline at end of file diff --git a/changelogs/fragments/10422-tasks_only-result_format.yml b/changelogs/fragments/10422-tasks_only-result_format.yml new file mode 100644 index 0000000000..13e5e749bf --- /dev/null +++ b/changelogs/fragments/10422-tasks_only-result_format.yml @@ -0,0 +1,2 @@ +minor_changes: + - "tasks_only callback plugin - add ``result_format`` and ``pretty_results`` options similarly to the default callback (https://github.com/ansible-collections/community.general/pull/10422)." diff --git a/changelogs/fragments/10423-apache_module-condition.yml b/changelogs/fragments/10423-apache_module-condition.yml new file mode 100644 index 0000000000..9a30d06b4e --- /dev/null +++ b/changelogs/fragments/10423-apache_module-condition.yml @@ -0,0 +1,2 @@ +bugfixes: + - apache2_module - check the ``cgi`` module restrictions only during activation (https://github.com/ansible-collections/community.general/pull/10423). diff --git a/changelogs/fragments/10424-scaleway-update-zones.yml b/changelogs/fragments/10424-scaleway-update-zones.yml new file mode 100644 index 0000000000..ffa508cd3a --- /dev/null +++ b/changelogs/fragments/10424-scaleway-update-zones.yml @@ -0,0 +1,2 @@ +minor_changes: + - scaleway_* modules, scaleway inventory plugin - update available zones and API URLs (https://github.com/ansible-collections/community.general/issues/10383, https://github.com/ansible-collections/community.general/pull/10424). \ No newline at end of file diff --git a/changelogs/fragments/10434-cpanm-deprecate-compat-mode.yml b/changelogs/fragments/10434-cpanm-deprecate-compat-mode.yml new file mode 100644 index 0000000000..84b6ecf471 --- /dev/null +++ b/changelogs/fragments/10434-cpanm-deprecate-compat-mode.yml @@ -0,0 +1,2 @@ +deprecated_features: + - cpanm - deprecate ``mode=compatibility``, ``mode=new`` should be used instead (https://github.com/ansible-collections/community.general/pull/10434). diff --git a/changelogs/fragments/10435-github-repo-deprecate-force-defaults.yml b/changelogs/fragments/10435-github-repo-deprecate-force-defaults.yml new file mode 100644 index 0000000000..cccb3a4c5f --- /dev/null +++ b/changelogs/fragments/10435-github-repo-deprecate-force-defaults.yml @@ -0,0 +1,2 @@ +deprecated_features: + - github_repo - deprecate ``force_defaults=true`` (https://github.com/ansible-collections/community.general/pull/10435). diff --git a/changelogs/fragments/10442-apk-fix-empty-names.yml b/changelogs/fragments/10442-apk-fix-empty-names.yml new file mode 100644 index 0000000000..24d68b52df --- /dev/null +++ b/changelogs/fragments/10442-apk-fix-empty-names.yml @@ -0,0 +1,3 @@ +bugfixes: + - apk - handle empty name strings properly + (https://github.com/ansible-collections/community.general/issues/10441, https://github.com/ansible-collections/community.general/pull/10442). \ No newline at end of file diff --git a/changelogs/fragments/10445-cronvar-reject-empty-values.yml b/changelogs/fragments/10445-cronvar-reject-empty-values.yml new file mode 100644 index 0000000000..1bf39619cc --- /dev/null +++ b/changelogs/fragments/10445-cronvar-reject-empty-values.yml @@ -0,0 +1,2 @@ +bugfixes: + - "cronvar - handle empty strings on ``value`` properly (https://github.com/ansible-collections/community.general/issues/10439, https://github.com/ansible-collections/community.general/pull/10445)." diff --git a/changelogs/fragments/10455-capabilities-improve-error-detection.yml b/changelogs/fragments/10455-capabilities-improve-error-detection.yml new file mode 100644 index 0000000000..40337a424b --- /dev/null +++ b/changelogs/fragments/10455-capabilities-improve-error-detection.yml @@ -0,0 +1,2 @@ +bugfixes: + - capabilities - using invalid path (symlink/directory/...) returned unrelated and incoherent error messages (https://github.com/ansible-collections/community.general/issues/5649, https://github.com/ansible-collections/community.general/pull/10455). \ No newline at end of file diff --git a/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml b/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml new file mode 100644 index 0000000000..70af0932b3 --- /dev/null +++ b/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml @@ -0,0 +1,2 @@ +bugfixes: + - "listen_port_facts - avoid crash when required commands are missing (https://github.com/ansible-collections/community.general/issues/10457, https://github.com/ansible-collections/community.general/pull/10458)." \ No newline at end of file diff --git a/changelogs/fragments/10459-deprecations.yml b/changelogs/fragments/10459-deprecations.yml new file mode 100644 index 0000000000..4b3f317454 --- /dev/null +++ b/changelogs/fragments/10459-deprecations.yml @@ -0,0 +1,6 @@ +bugfixes: + - "apache2_module - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "htpasswd - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "syspatch - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "sysupgrade - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "zypper_repository - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." diff --git a/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml b/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml new file mode 100644 index 0000000000..c4b77299f5 --- /dev/null +++ b/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml @@ -0,0 +1,2 @@ +bugfixes: + - "cronvar - fix crash on missing ``cron_file`` parent directories (https://github.com/ansible-collections/community.general/issues/10460, https://github.com/ansible-collections/community.general/pull/10461)." diff --git a/changelogs/fragments/10483-sensu-subscription-quotes.yml b/changelogs/fragments/10483-sensu-subscription-quotes.yml new file mode 100644 index 0000000000..355099684c --- /dev/null +++ b/changelogs/fragments/10483-sensu-subscription-quotes.yml @@ -0,0 +1,2 @@ +minor_changes: + - sensu_subscription - normalize quotes in the module output (https://github.com/ansible-collections/community.general/pull/10483). diff --git a/changelogs/fragments/10490-rocketchat.yml b/changelogs/fragments/10490-rocketchat.yml new file mode 100644 index 0000000000..73657ba67c --- /dev/null +++ b/changelogs/fragments/10490-rocketchat.yml @@ -0,0 +1,3 @@ +deprecated_features: + - "rocketchat - the default value for ``is_pre740``, currently ``true``, is deprecated and will change to ``false`` in community.general 13.0.0 + (https://github.com/ansible-collections/community.general/pull/10490)." diff --git a/changelogs/fragments/10491-irc.yml b/changelogs/fragments/10491-irc.yml new file mode 100644 index 0000000000..74867e71a7 --- /dev/null +++ b/changelogs/fragments/10491-irc.yml @@ -0,0 +1,2 @@ +bugfixes: + - "irc - pass hostname to ``wrap_socket()`` if ``use_tls=true`` and ``validate_certs=true`` (https://github.com/ansible-collections/community.general/issues/10472, https://github.com/ansible-collections/community.general/pull/10491)." diff --git a/changelogs/fragments/10493-nagios-services.yml b/changelogs/fragments/10493-nagios-services.yml new file mode 100644 index 0000000000..3a04556c68 --- /dev/null +++ b/changelogs/fragments/10493-nagios-services.yml @@ -0,0 +1,2 @@ +minor_changes: + - nagios - make parameter ``services`` a ``list`` instead of a ``str`` (https://github.com/ansible-collections/community.general/pull/10493). diff --git a/changelogs/fragments/10494-rfdn-1.yml b/changelogs/fragments/10494-rfdn-1.yml new file mode 100644 index 0000000000..09a0c442b0 --- /dev/null +++ b/changelogs/fragments/10494-rfdn-1.yml @@ -0,0 +1,27 @@ +minor_changes: + - aerospike_migrations - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - airbrake_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bigpanda - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bootc_manage - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bower - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - btrfs_subvolume - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bundler - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - campfire - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - cargo - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - catapult - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - cisco_webex - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - consul_kv - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - consul_policy - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - copr - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - datadog_downtime - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - datadog_monitor - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dconf - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dimensiondata_network - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dimensiondata_vlan - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dnf_config_manager - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dnsmadeeasy - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dpkg_divert - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - easy_install - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - elasticsearch_plugin - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - facter - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - filesystem - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). diff --git a/changelogs/fragments/10505-rfdn-2.yml b/changelogs/fragments/10505-rfdn-2.yml new file mode 100644 index 0000000000..89aeab9356 --- /dev/null +++ b/changelogs/fragments/10505-rfdn-2.yml @@ -0,0 +1,39 @@ +minor_changes: + - gem - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - git_config_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_deploy_key - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_repo - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_webhook - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_webhook_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_branch - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_group_access_token - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_group_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_hook - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_instance_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_issue - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_label - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_merge_request - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_milestone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_project - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_project_access_token - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_project_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - grove - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - hg - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - homebrew - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - homebrew_cask - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - homebrew_tap - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - honeybadger_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - htpasswd - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - icinga2_host - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - influxdb_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ini_file - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipa_dnsrecord - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipa_dnszone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipa_service - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipbase_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipwcli_dns - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - irc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jabber - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jenkins_credential - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jenkins_job - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jenkins_script - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). diff --git a/changelogs/fragments/10507-rfdn-3.yml b/changelogs/fragments/10507-rfdn-3.yml new file mode 100644 index 0000000000..fae9d118bc --- /dev/null +++ b/changelogs/fragments/10507-rfdn-3.yml @@ -0,0 +1,35 @@ +minor_changes: + - keycloak_authz_authorization_scope - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keycloak_authz_permission - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keycloak_role - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keycloak_userprofile - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keyring - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - kibana_plugin - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - layman - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - ldap_attrs - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - ldap_inc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - librato_annotation - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - lldp - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - logentries - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - lxca_cmms - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - lxca_nodes - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - macports - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mail - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_alerts - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_group - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_policies - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_policies_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_tags - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_tenant - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - matrix - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mattermost - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - maven_artifact - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - memset_dns_reload - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - memset_zone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - memset_zone_record - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mqtt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mssql_db - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mssql_script - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - netcup_dns - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - newrelic_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - nsupdate - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). diff --git a/changelogs/fragments/10512-rfdn-4.yml b/changelogs/fragments/10512-rfdn-4.yml new file mode 100644 index 0000000000..6d8f9e7d77 --- /dev/null +++ b/changelogs/fragments/10512-rfdn-4.yml @@ -0,0 +1,42 @@ +minor_changes: + - oci_vcn - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - one_image_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - one_template - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - one_vnet - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - onepassword_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - oneview_fc_network_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - opendj_backendprop - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - ovh_monthly_billing - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pagerduty - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pagerduty_change - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pagerduty_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pam_limits - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pear - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pkgng - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pnpm - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - portage - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_org - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_org_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_user_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pubnub_blocks - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pushbullet - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pushover - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - redis_data - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - redis_data_incr - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - riak - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - rocketchat - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - rollbar_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - say - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - scaleway_database_backup - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sendgrid - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sensu_silence - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sorcery - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - ssh_config - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - statusio_maintenance - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - svr4pkg - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - swdepot - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - syslogger - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sysrc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - systemd_creds_decrypt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - systemd_creds_encrypt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). diff --git a/changelogs/fragments/10513-rfdn-5.yml b/changelogs/fragments/10513-rfdn-5.yml new file mode 100644 index 0000000000..d930d7345c --- /dev/null +++ b/changelogs/fragments/10513-rfdn-5.yml @@ -0,0 +1,18 @@ +minor_changes: + - taiga_issue - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - twilio - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_aaa_group - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_ca_host_key_cert - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_dns_host - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_network_interface_address - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_auth_profile - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_exception - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_frontend - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_location - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - vertica_configuration - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - vertica_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - vertica_role - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - xbps - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - yarn - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - zypper - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - zypper_repository - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). diff --git a/changelogs/fragments/10514-deprecate-bearychat.yml b/changelogs/fragments/10514-deprecate-bearychat.yml new file mode 100644 index 0000000000..202210ac8c --- /dev/null +++ b/changelogs/fragments/10514-deprecate-bearychat.yml @@ -0,0 +1,2 @@ +deprecated_features: + - bearychat - module is deprecated and will be removed in community.general 12.0.0 (https://github.com/ansible-collections/community.general/issues/10514). diff --git a/changelogs/fragments/10520-arg-runcommand-list.yml b/changelogs/fragments/10520-arg-runcommand-list.yml new file mode 100644 index 0000000000..4479b3a694 --- /dev/null +++ b/changelogs/fragments/10520-arg-runcommand-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - apk - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/issues/10479, https://github.com/ansible-collections/community.general/pull/10520). diff --git a/changelogs/fragments/10523-bzr-cmd-list.yml b/changelogs/fragments/10523-bzr-cmd-list.yml new file mode 100644 index 0000000000..fb6c8a6c47 --- /dev/null +++ b/changelogs/fragments/10523-bzr-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - bzr - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10523). diff --git a/changelogs/fragments/10524-capabilities-cmd-list.yml b/changelogs/fragments/10524-capabilities-cmd-list.yml new file mode 100644 index 0000000000..e6af832b5c --- /dev/null +++ b/changelogs/fragments/10524-capabilities-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - capabilities - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10524). diff --git a/changelogs/fragments/10525-composer-cmd-list.yml b/changelogs/fragments/10525-composer-cmd-list.yml new file mode 100644 index 0000000000..a2aebc8a6d --- /dev/null +++ b/changelogs/fragments/10525-composer-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - composer - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10525). diff --git a/changelogs/fragments/10526-easy-install-cmd-list.yml b/changelogs/fragments/10526-easy-install-cmd-list.yml new file mode 100644 index 0000000000..6fa6717adc --- /dev/null +++ b/changelogs/fragments/10526-easy-install-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - easy_install - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10526). diff --git a/changelogs/fragments/10527-keycloak-idp-well-known-url-support.yml b/changelogs/fragments/10527-keycloak-idp-well-known-url-support.yml new file mode 100644 index 0000000000..cc2ae7efa0 --- /dev/null +++ b/changelogs/fragments/10527-keycloak-idp-well-known-url-support.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_identity_provider – add support for ``fromUrl`` to automatically fetch OIDC endpoints from the well-known discovery URL, simplifying identity provider configuration (https://github.com/ansible-collections/community.general/pull/10527). \ No newline at end of file diff --git a/changelogs/fragments/10531-wsl-paramiko.yml b/changelogs/fragments/10531-wsl-paramiko.yml new file mode 100644 index 0000000000..08257d6c78 --- /dev/null +++ b/changelogs/fragments/10531-wsl-paramiko.yml @@ -0,0 +1,3 @@ +bugfixes: + - "wsl connection plugin - avoid deprecated ansible-core paramiko import helper, import paramiko directly instead + (https://github.com/ansible-collections/community.general/issues/10515, https://github.com/ansible-collections/community.general/pull/10531)." diff --git a/changelogs/fragments/10532-apk.yml b/changelogs/fragments/10532-apk.yml new file mode 100644 index 0000000000..84c5d985e8 --- /dev/null +++ b/changelogs/fragments/10532-apk.yml @@ -0,0 +1,2 @@ +bugfixes: + - "apk - fix check for empty/whitespace-only package names (https://github.com/ansible-collections/community.general/pull/10532)." diff --git a/changelogs/fragments/10536-imgadm-cmd-list.yml b/changelogs/fragments/10536-imgadm-cmd-list.yml new file mode 100644 index 0000000000..0f22c774d8 --- /dev/null +++ b/changelogs/fragments/10536-imgadm-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - imgadm - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10536). diff --git a/changelogs/fragments/10538-keycloak-realm-add-support-client-options.yml b/changelogs/fragments/10538-keycloak-realm-add-support-client-options.yml new file mode 100644 index 0000000000..66333b01a8 --- /dev/null +++ b/changelogs/fragments/10538-keycloak-realm-add-support-client-options.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_realm - add support for client-related options and Oauth2 device (https://github.com/ansible-collections/community.general/pull/10538). \ No newline at end of file diff --git a/changelogs/fragments/10539-json_query.yml b/changelogs/fragments/10539-json_query.yml new file mode 100644 index 0000000000..7e84b7ecb0 --- /dev/null +++ b/changelogs/fragments/10539-json_query.yml @@ -0,0 +1,2 @@ +bugfixes: + - "json_query filter plugin - make compatible with lazy evaluation list and dictionary types of ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/10539)." diff --git a/changelogs/fragments/10566-merge_variables.yml b/changelogs/fragments/10566-merge_variables.yml new file mode 100644 index 0000000000..c0de6dd845 --- /dev/null +++ b/changelogs/fragments/10566-merge_variables.yml @@ -0,0 +1,2 @@ +bugfixes: + - "merge_variables lookup plugin - avoid deprecated functionality from ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/10566)." diff --git a/changelogs/fragments/10573-logstash-plugin-cmd-list.yml b/changelogs/fragments/10573-logstash-plugin-cmd-list.yml new file mode 100644 index 0000000000..441c1c49a3 --- /dev/null +++ b/changelogs/fragments/10573-logstash-plugin-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - logstash_plugin - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/issues/10479, https://github.com/ansible-collections/community.general/pull/10520). diff --git a/changelogs/fragments/10574-django-runner.yml b/changelogs/fragments/10574-django-runner.yml new file mode 100644 index 0000000000..a0bf6ec6d4 --- /dev/null +++ b/changelogs/fragments/10574-django-runner.yml @@ -0,0 +1,2 @@ +minor_changes: + - django module utils - remove deprecated parameter ``_DjangoRunner`` call (https://github.com/ansible-collections/community.general/pull/10574). diff --git a/changelogs/fragments/10599-open-iscsi-cmd-list.yml b/changelogs/fragments/10599-open-iscsi-cmd-list.yml new file mode 100644 index 0000000000..f8ef659ee9 --- /dev/null +++ b/changelogs/fragments/10599-open-iscsi-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - open_iscsi - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10599). diff --git a/changelogs/fragments/10601-pear-cmd-list.yml b/changelogs/fragments/10601-pear-cmd-list.yml new file mode 100644 index 0000000000..d5ab2d3d0e --- /dev/null +++ b/changelogs/fragments/10601-pear-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - pear - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10601). diff --git a/changelogs/fragments/10602-portage-cmd-list.yml b/changelogs/fragments/10602-portage-cmd-list.yml new file mode 100644 index 0000000000..36b6711e00 --- /dev/null +++ b/changelogs/fragments/10602-portage-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - portage - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10602). diff --git a/changelogs/fragments/10603-riak-cmd-list.yml b/changelogs/fragments/10603-riak-cmd-list.yml new file mode 100644 index 0000000000..1a29a07c7f --- /dev/null +++ b/changelogs/fragments/10603-riak-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - riak - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10603). diff --git a/changelogs/fragments/10604-solaris-zone-cmd-list.yml b/changelogs/fragments/10604-solaris-zone-cmd-list.yml new file mode 100644 index 0000000000..2fe52cbf31 --- /dev/null +++ b/changelogs/fragments/10604-solaris-zone-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - solaris_zone - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10604). diff --git a/changelogs/fragments/10605-swupd-cmd-list.yml b/changelogs/fragments/10605-swupd-cmd-list.yml new file mode 100644 index 0000000000..23669d7974 --- /dev/null +++ b/changelogs/fragments/10605-swupd-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - swupd - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10605). diff --git a/changelogs/fragments/10606-urpmi-cmd-list.yml b/changelogs/fragments/10606-urpmi-cmd-list.yml new file mode 100644 index 0000000000..a7a2e54a1e --- /dev/null +++ b/changelogs/fragments/10606-urpmi-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - urpmi - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10606). diff --git a/changelogs/fragments/10608-xbps-cmd-list.yml b/changelogs/fragments/10608-xbps-cmd-list.yml new file mode 100644 index 0000000000..ff951a4520 --- /dev/null +++ b/changelogs/fragments/10608-xbps-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - xbps - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10608). diff --git a/changelogs/fragments/10609-xfs-quota-cmd-list.yml b/changelogs/fragments/10609-xfs-quota-cmd-list.yml new file mode 100644 index 0000000000..74e170ef09 --- /dev/null +++ b/changelogs/fragments/10609-xfs-quota-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - xfs_quota - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10609). diff --git a/changelogs/fragments/10612-timezone-cmd-list.yml b/changelogs/fragments/10612-timezone-cmd-list.yml new file mode 100644 index 0000000000..601375fbc5 --- /dev/null +++ b/changelogs/fragments/10612-timezone-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - timezone - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10612). diff --git a/changelogs/fragments/10642-parted-cmd-list.yml b/changelogs/fragments/10642-parted-cmd-list.yml new file mode 100644 index 0000000000..29025512dd --- /dev/null +++ b/changelogs/fragments/10642-parted-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - parted - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10642). diff --git a/changelogs/fragments/10644-oneview-os.yml b/changelogs/fragments/10644-oneview-os.yml new file mode 100644 index 0000000000..f2789cf5fc --- /dev/null +++ b/changelogs/fragments/10644-oneview-os.yml @@ -0,0 +1,2 @@ +breaking_changes: + - oneview module utils - remove import of standard library ``os`` (https://github.com/ansible-collections/community.general/pull/10644). diff --git a/changelogs/fragments/10646-scaleway_container_cpu_limit.yml b/changelogs/fragments/10646-scaleway_container_cpu_limit.yml new file mode 100644 index 0000000000..f23a1bb96d --- /dev/null +++ b/changelogs/fragments/10646-scaleway_container_cpu_limit.yml @@ -0,0 +1,2 @@ +minor_changes: + - scaleway_container - add a ``cpu_limit`` argument (https://github.com/ansible-collections/community.general/pull/10646). diff --git a/changelogs/fragments/10647-scaleway-module-defaults.yml b/changelogs/fragments/10647-scaleway-module-defaults.yml new file mode 100644 index 0000000000..7fca7a171a --- /dev/null +++ b/changelogs/fragments/10647-scaleway-module-defaults.yml @@ -0,0 +1,2 @@ +minor_changes: + - scaleway modules - add a ``scaleway`` group to use ``module_defaults`` (https://github.com/ansible-collections/community.general/pull/10647). diff --git a/changelogs/fragments/10652-oracle-deprecation.yml b/changelogs/fragments/10652-oracle-deprecation.yml new file mode 100644 index 0000000000..3842e994f8 --- /dev/null +++ b/changelogs/fragments/10652-oracle-deprecation.yml @@ -0,0 +1,4 @@ +deprecated_features: + - oci_utils module utils - utils is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10652). + - oci_vcn - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10652). + - oracle* doc fragments - fragments are deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10652). diff --git a/changelogs/fragments/10661-support-gpg-auto-impor-keys-in-zypper.yml b/changelogs/fragments/10661-support-gpg-auto-impor-keys-in-zypper.yml new file mode 100644 index 0000000000..333121902f --- /dev/null +++ b/changelogs/fragments/10661-support-gpg-auto-impor-keys-in-zypper.yml @@ -0,0 +1,2 @@ +minor_changes: + - zypper - support the ``--gpg-auto-import-keys`` option in zypper (https://github.com/ansible-collections/community.general/issues/10660, https://github.com/ansible-collections/community.general/pull/10661). diff --git a/changelogs/fragments/10663-pacemaker-resource-fix-resource-type.yml b/changelogs/fragments/10663-pacemaker-resource-fix-resource-type.yml new file mode 100644 index 0000000000..270488d248 --- /dev/null +++ b/changelogs/fragments/10663-pacemaker-resource-fix-resource-type.yml @@ -0,0 +1,2 @@ +bugfixes: + - "pacemaker_resource - fix ``resource_type`` parameter formatting (https://github.com/ansible-collections/community.general/issues/10426, https://github.com/ansible-collections/community.general/pull/10663)." diff --git a/changelogs/fragments/10665-pacemaker-resource-clone.yml b/changelogs/fragments/10665-pacemaker-resource-clone.yml new file mode 100644 index 0000000000..c24420c598 --- /dev/null +++ b/changelogs/fragments/10665-pacemaker-resource-clone.yml @@ -0,0 +1,2 @@ +minor_changes: + - pacemaker_resource - add ``state=cloned`` for cloning pacemaker resources or groups (https://github.com/ansible-collections/community.general/issues/10322, https://github.com/ansible-collections/community.general/pull/10665). diff --git a/changelogs/fragments/10679-gitlab-access-token-add-planner-role.yml b/changelogs/fragments/10679-gitlab-access-token-add-planner-role.yml new file mode 100644 index 0000000000..65aeae2a86 --- /dev/null +++ b/changelogs/fragments/10679-gitlab-access-token-add-planner-role.yml @@ -0,0 +1,3 @@ +minor_changes: + - gitlab_group_access_token - add ``planner`` access level (https://github.com/ansible-collections/community.general/pull/10679). + - gitlab_project_access_token - add ``planner`` access level (https://github.com/ansible-collections/community.general/pull/10679). diff --git a/changelogs/fragments/10684-django-improvements.yml b/changelogs/fragments/10684-django-improvements.yml new file mode 100644 index 0000000000..a8ca1cfbe9 --- /dev/null +++ b/changelogs/fragments/10684-django-improvements.yml @@ -0,0 +1,4 @@ +minor_changes: + - django module utils - simplify/consolidate the common settings for the command line (https://github.com/ansible-collections/community.general/pull/10684). + - django_check - simplify/consolidate the common settings for the command line (https://github.com/ansible-collections/community.general/pull/10684). + - django_createcachetable - simplify/consolidate the common settings for the command line (https://github.com/ansible-collections/community.general/pull/10684). diff --git a/changelogs/fragments/10687-deprecations.yml b/changelogs/fragments/10687-deprecations.yml new file mode 100644 index 0000000000..62974ab6a0 --- /dev/null +++ b/changelogs/fragments/10687-deprecations.yml @@ -0,0 +1,2 @@ +bugfixes: + - "Avoid deprecated functionality in ansible-core 2.20 (https://github.com/ansible-collections/community.general/pull/10687)." diff --git a/changelogs/fragments/10688-pids.yml b/changelogs/fragments/10688-pids.yml new file mode 100644 index 0000000000..1ed97a6fed --- /dev/null +++ b/changelogs/fragments/10688-pids.yml @@ -0,0 +1,2 @@ +bugfixes: + - "pids - prevent error when an empty string is provided for ``name`` (https://github.com/ansible-collections/community.general/issues/10672, https://github.com/ansible-collections/community.general/pull/10688)." diff --git a/changelogs/fragments/10689-gem-prevent-soundness-issue.yml b/changelogs/fragments/10689-gem-prevent-soundness-issue.yml new file mode 100644 index 0000000000..a55dba1ea1 --- /dev/null +++ b/changelogs/fragments/10689-gem-prevent-soundness-issue.yml @@ -0,0 +1,2 @@ +bugfixes: + - "gem - fix soundness issue when uninstalling default gems on Ubuntu (https://github.com/ansible-collections/community.general/issues/10451, https://github.com/ansible-collections/community.general/pull/10689)." \ No newline at end of file diff --git a/changelogs/fragments/10700-django-check-databases.yml b/changelogs/fragments/10700-django-check-databases.yml new file mode 100644 index 0000000000..cfb8897f6a --- /dev/null +++ b/changelogs/fragments/10700-django-check-databases.yml @@ -0,0 +1,2 @@ +minor_changes: + - django_check - rename parameter ``database`` to ``databases``, add alias for compatibility (https://github.com/ansible-collections/community.general/pull/10700). diff --git a/changelogs/fragments/10705-openbsd-pkg-remove-unused.yml b/changelogs/fragments/10705-openbsd-pkg-remove-unused.yml new file mode 100644 index 0000000000..2ceb1352b4 --- /dev/null +++ b/changelogs/fragments/10705-openbsd-pkg-remove-unused.yml @@ -0,0 +1,2 @@ +minor_changes: + - openbsd_pkg - add ``autoremove`` parameter to remove unused dependencies (https://github.com/ansible-collections/community.general/pull/10705). diff --git a/changelogs/fragments/10707-pacemaker-maintenance-mode-regex.yml b/changelogs/fragments/10707-pacemaker-maintenance-mode-regex.yml new file mode 100644 index 0000000000..ba5e08edd3 --- /dev/null +++ b/changelogs/fragments/10707-pacemaker-maintenance-mode-regex.yml @@ -0,0 +1,2 @@ +bugfixes: + - "pacemaker - use regex for matching ``maintenance-mode`` output to determine cluster maintenance status (https://github.com/ansible-collections/community.general/issues/10426, https://github.com/ansible-collections/community.general/pull/10707)." diff --git a/changelogs/fragments/10711-pytohn-idioms-1.yml b/changelogs/fragments/10711-pytohn-idioms-1.yml new file mode 100644 index 0000000000..18ae9db37b --- /dev/null +++ b/changelogs/fragments/10711-pytohn-idioms-1.yml @@ -0,0 +1,6 @@ +minor_changes: + - gitlab_label - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711). + - gitlab_milestone - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711). + - ipa_host - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711). + - lvg_rename - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711). + - terraform - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711). diff --git a/changelogs/fragments/10712-python-idioms-2.yml b/changelogs/fragments/10712-python-idioms-2.yml new file mode 100644 index 0000000000..8d49f1f86f --- /dev/null +++ b/changelogs/fragments/10712-python-idioms-2.yml @@ -0,0 +1,7 @@ +minor_changes: + - iocage inventory plugin - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). + - manageiq - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). + - android_sdk - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). + - elasticsearch_plugin - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). + - manageiq_alert_profiles - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). + - one_vm - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). diff --git a/changelogs/fragments/10727-python-idioms-3.yml b/changelogs/fragments/10727-python-idioms-3.yml new file mode 100644 index 0000000000..9b92b8bbef --- /dev/null +++ b/changelogs/fragments/10727-python-idioms-3.yml @@ -0,0 +1,10 @@ +minor_changes: + - filesize - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - iptables_state - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - manageiq_group - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - manageiq_tenant - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - mssql_db - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - openbsd_pkg - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - ufw - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - xenserver_facts - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - zfs_facts - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). diff --git a/changelogs/fragments/10741-pacemaker-cluster-cleanup-deprecate.yml b/changelogs/fragments/10741-pacemaker-cluster-cleanup-deprecate.yml new file mode 100644 index 0000000000..4bb018a9c7 --- /dev/null +++ b/changelogs/fragments/10741-pacemaker-cluster-cleanup-deprecate.yml @@ -0,0 +1,2 @@ +deprecated_features: + - pacemaker_cluster - the state ``cleanup`` will be removed from community.general 14.0.0 (https://github.com/ansible-collections/community.general/pull/10741). diff --git a/changelogs/fragments/10743-monit-handle-unknown-status.yml b/changelogs/fragments/10743-monit-handle-unknown-status.yml new file mode 100644 index 0000000000..1c9fbb1101 --- /dev/null +++ b/changelogs/fragments/10743-monit-handle-unknown-status.yml @@ -0,0 +1,2 @@ +bugfixes: + - monit - fix crash caused by an unknown status value returned from the monit service (https://github.com/ansible-collections/community.general/issues/10742, https://github.com/ansible-collections/community.general/pull/10743). diff --git a/changelogs/fragments/10751-kdeconfig-support-kwriteconfig6.yml b/changelogs/fragments/10751-kdeconfig-support-kwriteconfig6.yml new file mode 100644 index 0000000000..716ffa35f1 --- /dev/null +++ b/changelogs/fragments/10751-kdeconfig-support-kwriteconfig6.yml @@ -0,0 +1,3 @@ +bugfixes: + - kdeconfig - ``kwriteconfig`` executable could not be discovered automatically on systems with only ``kwriteconfig6`` installed. + ``kwriteconfig6`` can now be discovered by Ansible (https://github.com/ansible-collections/community.general/issues/10746, https://github.com/ansible-collections/community.general/pull/10751). \ No newline at end of file diff --git a/changelogs/fragments/10752-selective-hardcoded-loop-var.yml b/changelogs/fragments/10752-selective-hardcoded-loop-var.yml new file mode 100644 index 0000000000..cfc6bdd9e9 --- /dev/null +++ b/changelogs/fragments/10752-selective-hardcoded-loop-var.yml @@ -0,0 +1,2 @@ +bugfixes: + - selective callback plugin - specify ``ansible_loop_var`` instead of the explicit value ``item`` when printing task result (https://github.com/ansible-collections/community.general/pull/10752). diff --git a/changelogs/fragments/10769-xenserver-rf.yml b/changelogs/fragments/10769-xenserver-rf.yml new file mode 100644 index 0000000000..2c31edf886 --- /dev/null +++ b/changelogs/fragments/10769-xenserver-rf.yml @@ -0,0 +1,2 @@ +minor_changes: + - xenserver module utils - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10769). diff --git a/changelogs/fragments/10785-gitlab-token-add-missing-scopes.yml b/changelogs/fragments/10785-gitlab-token-add-missing-scopes.yml new file mode 100644 index 0000000000..a38d98a444 --- /dev/null +++ b/changelogs/fragments/10785-gitlab-token-add-missing-scopes.yml @@ -0,0 +1,3 @@ +minor_changes: + - gitlab_group_access_token - add missing scopes (https://github.com/ansible-collections/community.general/pull/10785). + - gitlab_project_access_token - add missing scopes (https://github.com/ansible-collections/community.general/pull/10785). diff --git a/changelogs/fragments/10787-gitlab-variable-support-masked-and-hidden-variables.yml b/changelogs/fragments/10787-gitlab-variable-support-masked-and-hidden-variables.yml new file mode 100644 index 0000000000..bbf5b6d9a5 --- /dev/null +++ b/changelogs/fragments/10787-gitlab-variable-support-masked-and-hidden-variables.yml @@ -0,0 +1,3 @@ +minor_changes: + - gitlab_group_variable - support masked-and-hidden variables (https://github.com/ansible-collections/community.general/pull/10787). + - gitlab_project_variable - support masked-and-hidden variables (https://github.com/ansible-collections/community.general/pull/10787). diff --git a/changelogs/fragments/10795-gitlab_protected_branch-add-allow_force_push-code_owner_approval_required.yml b/changelogs/fragments/10795-gitlab_protected_branch-add-allow_force_push-code_owner_approval_required.yml new file mode 100644 index 0000000000..ed4d4d78e8 --- /dev/null +++ b/changelogs/fragments/10795-gitlab_protected_branch-add-allow_force_push-code_owner_approval_required.yml @@ -0,0 +1,3 @@ +minor_changes: + - gitlab_protected_branch - add ``allow_force_push``, ``code_owner_approval_required`` (https://github.com/ansible-collections/community.general/pull/10795, https://github.com/ansible-collections/community.general/issues/6432, https://github.com/ansible-collections/community.general/issues/10289, https://github.com/ansible-collections/community.general/issues/10765). + - gitlab_protected_branch - update protected branches if possible instead of recreating them (https://github.com/ansible-collections/community.general/pull/10795). diff --git a/changelogs/fragments/10796-rocketchat-force-content-type.yml b/changelogs/fragments/10796-rocketchat-force-content-type.yml new file mode 100644 index 0000000000..96ca116e62 --- /dev/null +++ b/changelogs/fragments/10796-rocketchat-force-content-type.yml @@ -0,0 +1,2 @@ +bugfixes: + - rocketchat - fix message delivery in Rocket Chat >= 7.5.3 by forcing ``Content-Type`` header to ``application/json`` instead of the default ``application/x-www-form-urlencoded`` (https://github.com/ansible-collections/community.general/issues/10796, https://github.com/ansible-collections/community.general/pull/10796). diff --git a/changelogs/fragments/10805-homebrew-support-old-names.yml b/changelogs/fragments/10805-homebrew-support-old-names.yml new file mode 100644 index 0000000000..43d5a1c8bf --- /dev/null +++ b/changelogs/fragments/10805-homebrew-support-old-names.yml @@ -0,0 +1,2 @@ +bugfixes: + - homebrew - do not fail when cask or formula name has changed in homebrew repo (https://github.com/ansible-collections/community.general/issues/10804, https://github.com/ansible-collections/community.general/pull/10805). \ No newline at end of file diff --git a/changelogs/fragments/10810-github_app_access_token-jwt.yml b/changelogs/fragments/10810-github_app_access_token-jwt.yml new file mode 100644 index 0000000000..804ab9fbaa --- /dev/null +++ b/changelogs/fragments/10810-github_app_access_token-jwt.yml @@ -0,0 +1,2 @@ +bugfixes: + - "github_app_access_token lookup plugin - fix compatibility imports for using jwt (https://github.com/ansible-collections/community.general/issues/10807, https://github.com/ansible-collections/community.general/pull/10810)." diff --git a/changelogs/fragments/10812-gitlab-variable-add-description.yml b/changelogs/fragments/10812-gitlab-variable-add-description.yml new file mode 100644 index 0000000000..1de0405aff --- /dev/null +++ b/changelogs/fragments/10812-gitlab-variable-add-description.yml @@ -0,0 +1,4 @@ +minor_changes: + - gitlab_group_variable - add ``description`` option (https://github.com/ansible-collections/community.general/pull/10812). + - gitlab_instance_variable - add ``description`` option (https://github.com/ansible-collections/community.general/pull/10812). + - gitlab_project_variable - add ``description`` option (https://github.com/ansible-collections/community.general/pull/10812, https://github.com/ansible-collections/community.general/issues/8584, https://github.com/ansible-collections/community.general/issues/10809). diff --git a/changelogs/fragments/10823-parted-fail-json-command.yml b/changelogs/fragments/10823-parted-fail-json-command.yml new file mode 100644 index 0000000000..8a52be589e --- /dev/null +++ b/changelogs/fragments/10823-parted-fail-json-command.yml @@ -0,0 +1,2 @@ +bugfixes: + - parted - variable is a list, not text (https://github.com/ansible-collections/community.general/pull/10823, https://github.com/ansible-collections/community.general/issues/10817). diff --git a/changelogs/fragments/10829-fix-keycloak-role-changed-status.yml b/changelogs/fragments/10829-fix-keycloak-role-changed-status.yml new file mode 100644 index 0000000000..8fd05ec182 --- /dev/null +++ b/changelogs/fragments/10829-fix-keycloak-role-changed-status.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_role - fixes an issue where the module incorrectly returns ``changed=true`` when using the alias ``clientId`` in composite roles (https://github.com/ansible-collections/community.general/pull/10829). \ No newline at end of file diff --git a/changelogs/fragments/10840-fix-keycloak-subgroup-search-realm.yml b/changelogs/fragments/10840-fix-keycloak-subgroup-search-realm.yml new file mode 100644 index 0000000000..3b7818ee3e --- /dev/null +++ b/changelogs/fragments/10840-fix-keycloak-subgroup-search-realm.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_group - fixes an issue where module ignores realm when searching subgroups by name (https://github.com/ansible-collections/community.general/pull/10840). \ No newline at end of file diff --git a/changelogs/fragments/10842-keycloak-client-scope-support.yml b/changelogs/fragments/10842-keycloak-client-scope-support.yml new file mode 100644 index 0000000000..80266fa43b --- /dev/null +++ b/changelogs/fragments/10842-keycloak-client-scope-support.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_client - add idempotent support for ``optional_client_scopes`` and ``optional_client_scopes``, and ensure consistent change detection between check mode and live run (https://github.com/ansible-collections/community.general/issues/5495, https://github.com/ansible-collections/community.general/pull/10842). \ No newline at end of file diff --git a/changelogs/fragments/10852-yaml.yml b/changelogs/fragments/10852-yaml.yml new file mode 100644 index 0000000000..1319b94ab5 --- /dev/null +++ b/changelogs/fragments/10852-yaml.yml @@ -0,0 +1,2 @@ +bugfixes: + - "yaml cache plugin - make compatible with ansible-core 2.19 (https://github.com/ansible-collections/community.general/issues/10849, https://github.com/ansible-collections/community.general/issues/10852)." diff --git a/changelogs/fragments/10857-github_deploy_key-err.yml b/changelogs/fragments/10857-github_deploy_key-err.yml new file mode 100644 index 0000000000..58bac31c5e --- /dev/null +++ b/changelogs/fragments/10857-github_deploy_key-err.yml @@ -0,0 +1,2 @@ +bugfixes: + - "github_deploy_key - fix bug during error handling if no body was present in the result (https://github.com/ansible-collections/community.general/issues/10853, https://github.com/ansible-collections/community.general/pull/10857)." diff --git a/changelogs/fragments/10873-six.yml b/changelogs/fragments/10873-six.yml new file mode 100644 index 0000000000..d9ea201520 --- /dev/null +++ b/changelogs/fragments/10873-six.yml @@ -0,0 +1,2 @@ +bugfixes: + - "Avoid usage of deprecated ``ansible.module_utils.six`` in all code that does not have to support Python 2 (https://github.com/ansible-collections/community.general/pull/10873)." diff --git a/changelogs/fragments/10874-pipx-180.yml b/changelogs/fragments/10874-pipx-180.yml new file mode 100644 index 0000000000..dd776827e8 --- /dev/null +++ b/changelogs/fragments/10874-pipx-180.yml @@ -0,0 +1,2 @@ +minor_changes: + - pipx module_utils - use ``PIPX_USE_EMOJI`` to disable emojis in the output of ``pipx`` 1.8.0 (https://github.com/ansible-collections/community.general/pull/10874). diff --git a/changelogs/fragments/10880-github_app_access_token-lookup.yml b/changelogs/fragments/10880-github_app_access_token-lookup.yml new file mode 100644 index 0000000000..b3c9503d59 --- /dev/null +++ b/changelogs/fragments/10880-github_app_access_token-lookup.yml @@ -0,0 +1,2 @@ +minor_changes: + - "github_app_access_token lookup plugin - add support for GitHub Enterprise Server (https://github.com/ansible-collections/community.general/issues/10879, https://github.com/ansible-collections/community.general/pull/10880)." diff --git a/changelogs/fragments/10888-six.yml b/changelogs/fragments/10888-six.yml new file mode 100644 index 0000000000..b1f09accb3 --- /dev/null +++ b/changelogs/fragments/10888-six.yml @@ -0,0 +1,2 @@ +bugfixes: + - "Remove all usage of ``ansible.module_utils.six`` (https://github.com/ansible-collections/community.general/pull/10888)." diff --git a/changelogs/fragments/10891-dict-refactor.yml b/changelogs/fragments/10891-dict-refactor.yml new file mode 100644 index 0000000000..63d5e585ff --- /dev/null +++ b/changelogs/fragments/10891-dict-refactor.yml @@ -0,0 +1,6 @@ +minor_changes: + - dependent lookup plugin - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891). + - scaleway module_utils - improve code readability, no impact to users (https://github.com/ansible-collections/community.general/pull/10891). + - pacemaker_cluster.py - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891). + - pacemaker_resource.py - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891). + - pacemaker_stonith.py - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891). diff --git a/changelogs/fragments/10892-remove-py2.yml b/changelogs/fragments/10892-remove-py2.yml new file mode 100644 index 0000000000..69904d4777 --- /dev/null +++ b/changelogs/fragments/10892-remove-py2.yml @@ -0,0 +1,7 @@ +minor_changes: + - known_hosts module_utils - drop Python 2 support when parsing output of ``urlparse`` (https://github.com/ansible-collections/community.general/pull/10892). + - aix_inittab - drop Python 2 support for function ``zip`` (https://github.com/ansible-collections/community.general/pull/10892). + - copr - drop support for Python 2 interpreter (https://github.com/ansible-collections/community.general/pull/10892). + - dconf - drop support for Python 2 interpreter (https://github.com/ansible-collections/community.general/pull/10892). + - irc - drop Python 2 support for SSL context creation (https://github.com/ansible-collections/community.general/pull/10892). + - mail - drop Python 2 support for Message-ID domain setting (https://github.com/ansible-collections/community.general/pull/10892). diff --git a/changelogs/fragments/10899-use-f-strings.yml b/changelogs/fragments/10899-use-f-strings.yml new file mode 100644 index 0000000000..9752e5ebf2 --- /dev/null +++ b/changelogs/fragments/10899-use-f-strings.yml @@ -0,0 +1,14 @@ +minor_changes: + - wsl connection plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - accumulate filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - counter filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - crc32 filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - groupby_as_dict filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - hashids filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - json_query filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - lists filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - random_mac filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - time filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - unicode_normalize filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - passwordstore lookup plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - ansible_type plugin_utils plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). diff --git a/changelogs/fragments/10903-2to3.yml b/changelogs/fragments/10903-2to3.yml new file mode 100644 index 0000000000..af0b744456 --- /dev/null +++ b/changelogs/fragments/10903-2to3.yml @@ -0,0 +1,8 @@ +minor_changes: + - pickle cache plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - counter_enabled callback plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - wsl connection plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - cobbler inventory plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - linode inventory plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - utm_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - vexata module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). diff --git a/changelogs/fragments/10904-2to3-mods.yml b/changelogs/fragments/10904-2to3-mods.yml new file mode 100644 index 0000000000..12ca58b250 --- /dev/null +++ b/changelogs/fragments/10904-2to3-mods.yml @@ -0,0 +1,30 @@ +minor_changes: + - bitbucket_access_key - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - bitbucket_pipeline_known_host - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - bitbucket_pipeline_variable - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - bzr - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - capabilities - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - gitlab_milestone - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - haproxy - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - homebrew - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - homebrew_cask - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - hwc_network_vpc - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - hwc_smn_topic - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - idrac_redfish_config - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - idrac_redfish_info - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - influxdb_retention_policy - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - ini_file - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - interfaces_file - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - launchd - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - logentries - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - packet_sshkey - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - pamd - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - taiga_issue - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vdo - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vertica_role - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vertica_schema - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vertica_user - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vexata_eg - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vexata_volume - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - xcc_redfish_command - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - zypper - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). diff --git a/changelogs/fragments/10905-java-keystore-simplify.yml b/changelogs/fragments/10905-java-keystore-simplify.yml new file mode 100644 index 0000000000..7b2a0de53a --- /dev/null +++ b/changelogs/fragments/10905-java-keystore-simplify.yml @@ -0,0 +1,2 @@ +minor_changes: + - java_keystore - remove redundant function (https://github.com/ansible-collections/community.general/pull/10905). diff --git a/changelogs/fragments/10906-linode-modutils.yml b/changelogs/fragments/10906-linode-modutils.yml new file mode 100644 index 0000000000..ced88a7474 --- /dev/null +++ b/changelogs/fragments/10906-linode-modutils.yml @@ -0,0 +1,2 @@ +minor_changes: + - linode module utils - remove redundant code for ancient versions of Ansible (https://github.com/ansible-collections/community.general/pull/10906). diff --git a/changelogs/fragments/10907-2to3-mu.yml b/changelogs/fragments/10907-2to3-mu.yml new file mode 100644 index 0000000000..af19593cf0 --- /dev/null +++ b/changelogs/fragments/10907-2to3-mu.yml @@ -0,0 +1,9 @@ +minor_changes: + - csv module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - gitlab module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - homebrew module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - ilo_redfish_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - redfish_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - saslprep module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - utm_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - vexata module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). diff --git a/changelogs/fragments/10908-archive-lzma.yml b/changelogs/fragments/10908-archive-lzma.yml new file mode 100644 index 0000000000..bcce681bed --- /dev/null +++ b/changelogs/fragments/10908-archive-lzma.yml @@ -0,0 +1,2 @@ +minor_changes: + - archive - remove conditional code for older Python versions (https://github.com/ansible-collections/community.general/pull/10908). diff --git a/changelogs/fragments/10909-launchd-plistlib.yml b/changelogs/fragments/10909-launchd-plistlib.yml new file mode 100644 index 0000000000..fc798c9ddb --- /dev/null +++ b/changelogs/fragments/10909-launchd-plistlib.yml @@ -0,0 +1,2 @@ +minor_changes: + - launchd - remove conditional code supporting Python versions prior to 3.4 (https://github.com/ansible-collections/community.general/pull/10909). diff --git a/changelogs/fragments/10918-gitlab-runner-fix-check-mode.yml b/changelogs/fragments/10918-gitlab-runner-fix-check-mode.yml new file mode 100644 index 0000000000..214487938b --- /dev/null +++ b/changelogs/fragments/10918-gitlab-runner-fix-check-mode.yml @@ -0,0 +1,2 @@ +bugfixes: + - gitlab_runner - fix exception in check mode when a new runner is created (https://github.com/ansible-collections/community.general/issues/8854). diff --git a/changelogs/fragments/10933-keycloak-add-client-auth-for-clientsecret-modules.yml b/changelogs/fragments/10933-keycloak-add-client-auth-for-clientsecret-modules.yml new file mode 100644 index 0000000000..df70186ff5 --- /dev/null +++ b/changelogs/fragments/10933-keycloak-add-client-auth-for-clientsecret-modules.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_clientsecret, keycloak_clientsecret_info - make ``client_auth`` work (https://github.com/ansible-collections/community.general/issues/10932, https://github.com/ansible-collections/community.general/pull/10933). \ No newline at end of file diff --git a/changelogs/fragments/10934-cloudflare-dns-caa-bug.yml b/changelogs/fragments/10934-cloudflare-dns-caa-bug.yml new file mode 100644 index 0000000000..e40acc30f4 --- /dev/null +++ b/changelogs/fragments/10934-cloudflare-dns-caa-bug.yml @@ -0,0 +1,2 @@ +bugfixes: + - cloudflare_dns - roll back changes to CAA record validation (https://github.com/ansible-collections/community.general/issues/10934, https://github.com/ansible-collections/community.general/pull/10956). diff --git a/changelogs/fragments/10934-cloudflare-dns-srv-bug.yml b/changelogs/fragments/10934-cloudflare-dns-srv-bug.yml new file mode 100644 index 0000000000..eb2b06d2f1 --- /dev/null +++ b/changelogs/fragments/10934-cloudflare-dns-srv-bug.yml @@ -0,0 +1,2 @@ +bugfixes: + - cloudflare_dns - roll back changes to SRV record validation (https://github.com/ansible-collections/community.general/issues/10934, https://github.com/ansible-collections/community.general/pull/10937). diff --git a/changelogs/fragments/10935-ipa-host-add-parameters.yml b/changelogs/fragments/10935-ipa-host-add-parameters.yml new file mode 100644 index 0000000000..7b832914ec --- /dev/null +++ b/changelogs/fragments/10935-ipa-host-add-parameters.yml @@ -0,0 +1,2 @@ +minor_changes: + - ipa_host - add ``userclass`` and ``locality`` parameters (https://github.com/ansible-collections/community.general/pull/10935). \ No newline at end of file diff --git a/changelogs/fragments/10939-use-f-strings-redfish-utils.yml b/changelogs/fragments/10939-use-f-strings-redfish-utils.yml new file mode 100644 index 0000000000..d896edb752 --- /dev/null +++ b/changelogs/fragments/10939-use-f-strings-redfish-utils.yml @@ -0,0 +1,2 @@ +minor_changes: + - redfish_utils module utils plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10939). diff --git a/changelogs/fragments/10940-use-f-strings-xenserver.yml b/changelogs/fragments/10940-use-f-strings-xenserver.yml new file mode 100644 index 0000000000..114ac46486 --- /dev/null +++ b/changelogs/fragments/10940-use-f-strings-xenserver.yml @@ -0,0 +1,2 @@ +minor_changes: + - xenserver module utils plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10940). diff --git a/changelogs/fragments/10941-use-f-strings-keycloak.yml b/changelogs/fragments/10941-use-f-strings-keycloak.yml new file mode 100644 index 0000000000..504a6dd611 --- /dev/null +++ b/changelogs/fragments/10941-use-f-strings-keycloak.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak module utils plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10941). diff --git a/changelogs/fragments/10942-mod-fstr-a.yml b/changelogs/fragments/10942-mod-fstr-a.yml new file mode 100644 index 0000000000..fcfdba3e42 --- /dev/null +++ b/changelogs/fragments/10942-mod-fstr-a.yml @@ -0,0 +1,22 @@ +minor_changes: + - hwc_utils module_utils plugin - adjust f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - manageiq module_utils plugin - adjust f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - aerospike_migrations - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - airbrake_deployment - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - aix_devices - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - aix_filesystem - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - aix_inittab - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - aix_lvg - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - aix_lvol - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - alerta_customer - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - ali_instance - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - alternatives - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - android_sdk - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - ansible_galaxy_install - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - apache2_mod_proxy - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - apache2_module - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - apk - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - apt_repo - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - apt_rpm - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - archive - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). + - awall - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10942). diff --git a/changelogs/fragments/9499-typetalk-deprecation.yml b/changelogs/fragments/9499-typetalk-deprecation.yml new file mode 100644 index 0000000000..8323bbe959 --- /dev/null +++ b/changelogs/fragments/9499-typetalk-deprecation.yml @@ -0,0 +1,2 @@ +deprecated_features: + - typetalk - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9499). diff --git a/changelogs/fragments/ansible-core-2.16.yml b/changelogs/fragments/ansible-core-2.16.yml new file mode 100644 index 0000000000..1132d20e3e --- /dev/null +++ b/changelogs/fragments/ansible-core-2.16.yml @@ -0,0 +1,2 @@ +removed_features: + - "Ansible-core 2.16 is no longer supported. This also means that the collection now requires Python 3.7+ (https://github.com/ansible-collections/community.general/pull/10884)." diff --git a/changelogs/fragments/become-pipelining.yml b/changelogs/fragments/become-pipelining.yml new file mode 100644 index 0000000000..201d85f71c --- /dev/null +++ b/changelogs/fragments/become-pipelining.yml @@ -0,0 +1,3 @@ +bugfixes: + - "doas become plugin - disable pipelining on ansible-core 2.19+. The plugin does not work with pipelining, and since ansible-core 2.19 become plugins can indicate that they do not work with pipelining (https://github.com/ansible-collections/community.general/issues/9977, https://github.com/ansible-collections/community.general/pull/10537)." + - "machinectl become plugin - disable pipelining on ansible-core 2.19+. The plugin does not work with pipelining, and since ansible-core 2.19 become plugins can indicate that they do not work with pipelining (https://github.com/ansible-collections/community.general/pull/10537)." diff --git a/changelogs/fragments/deprecations.yml b/changelogs/fragments/deprecations.yml new file mode 100644 index 0000000000..424b2d439b --- /dev/null +++ b/changelogs/fragments/deprecations.yml @@ -0,0 +1,16 @@ +removed_features: + - "yaml callback plugin - the deprecated plugin has been removed. Use the default callback with ``result_format=yaml`` instead (https://github.com/ansible-collections/community.general/pull/10883)." + - "purestorage doc fragment - the modules using this doc fragment have been removed from community.general 3.0.0 (https://github.com/ansible-collections/community.general/pull/10883)." + - "pure module utils - the modules using this module utils have been removed from community.general 3.0.0 (https://github.com/ansible-collections/community.general/pull/10883)." + - "bearychat - the module has been removed as the chat service is no longer available (https://github.com/ansible-collections/community.general/pull/10883)." + - "facter - the module has been replaced by ``community.general.facter_facts`` (https://github.com/ansible-collections/community.general/pull/10883)." + - "pacemaker_cluster - the option ``state`` is now required (https://github.com/ansible-collections/community.general/pull/10883)." + - >- + opkg - the value ``""`` for the option ``force`` is no longer allowed. Omit ``force`` instead (https://github.com/ansible-collections/community.general/pull/10883). + - "cmd_runner_fmt module utils - the parameter ``ctx_ignore_none`` to argument formatters has been removed (https://github.com/ansible-collections/community.general/pull/10883)." + - "cmd_runner module utils - the parameter ``ignore_value_none`` to ``CmdRunner.__call__()`` has been removed (https://github.com/ansible-collections/community.general/pull/10883)." + - >- + mh.deco module utils - the parameters ``on_success`` and ``on_failure`` of ``cause()`` have been removed; use ``when="success"`` and ``when="failure"`` instead (https://github.com/ansible-collections/community.general/pull/10883). +breaking_changes: + - "slack - the default of ``prepend_hash`` changed from ``auto`` to ``never`` (https://github.com/ansible-collections/community.general/pull/10883)." + - "mh.base module utils - ``debug`` will now always be delegated to the underlying ``AnsibleModule`` object (https://github.com/ansible-collections/community.general/pull/10883)." diff --git a/changelogs/fragments/hiera.yml b/changelogs/fragments/hiera.yml new file mode 100644 index 0000000000..70c75f059e --- /dev/null +++ b/changelogs/fragments/hiera.yml @@ -0,0 +1,4 @@ +deprecated_features: + - "hiera lookup plugin - retrieving data with Hiera has been deprecated a long time ago; because of that this plugin will be removed from community.general 13.0.0. + If you disagree with this deprecation, please create an issue in the community.general repository + (https://github.com/ansible-collections/community.general/issues/4462, https://github.com/ansible-collections/community.general/pull/10779)." diff --git a/changelogs/fragments/keycloak-realm-webauthn-policies.yml b/changelogs/fragments/keycloak-realm-webauthn-policies.yml new file mode 100644 index 0000000000..91b1f67b3a --- /dev/null +++ b/changelogs/fragments/keycloak-realm-webauthn-policies.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_realm - add support for WebAuthn policy configuration options, including both regular and passwordless WebAuthn policies (https://github.com/ansible-collections/community.general/pull/10791). diff --git a/changelogs/fragments/logstash.yml b/changelogs/fragments/logstash.yml new file mode 100644 index 0000000000..1c7ec89b7d --- /dev/null +++ b/changelogs/fragments/logstash.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - logstash callback plugin - remove reference to Python 2 library (https://github.com/ansible-collections/community.general/pull/10345). diff --git a/changelogs/fragments/lvm_pv.yml b/changelogs/fragments/lvm_pv.yml new file mode 100644 index 0000000000..d0198d7ffb --- /dev/null +++ b/changelogs/fragments/lvm_pv.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - lvm_pv - properly detect SCSI or NVMe devices to rescan (https://github.com/ansible-collections/community.general/issues/10444, https://github.com/ansible-collections/community.general/pull/10596). diff --git a/changelogs/fragments/random_string_seed.yml b/changelogs/fragments/random_string_seed.yml new file mode 100644 index 0000000000..a90b7d93b5 --- /dev/null +++ b/changelogs/fragments/random_string_seed.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - random_string lookup plugin - allow to specify seed while generating random string (https://github.com/ansible-collections/community.general/issues/5362, https://github.com/ansible-collections/community.general/pull/10710). diff --git a/changelogs/fragments/replace-random-with-secrets.yml b/changelogs/fragments/replace-random-with-secrets.yml new file mode 100644 index 0000000000..b82e59e7e9 --- /dev/null +++ b/changelogs/fragments/replace-random-with-secrets.yml @@ -0,0 +1,4 @@ +bugfixes: + - random_string lookup plugin - replace ``random.SystemRandom()`` with ``secrets.SystemRandom()`` when + generating strings. This has no practical effect, as both are the same + (https://github.com/ansible-collections/community.general/pull/10893). diff --git a/commit-rights.md b/commit-rights.md index 43836350c5..196565eca7 100644 --- a/commit-rights.md +++ b/commit-rights.md @@ -1,3 +1,9 @@ + + Committers Guidelines for community.general =========================================== diff --git a/docs/docsite/config.yml b/docs/docsite/config.yml new file mode 100644 index 0000000000..1d6cf8554a --- /dev/null +++ b/docs/docsite/config.yml @@ -0,0 +1,7 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +changelog: + write_changelog: true diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml index 83f533ec08..4594ab4c2d 100644 --- a/docs/docsite/extra-docs.yml +++ b/docs/docsite/extra-docs.yml @@ -1,6 +1,24 @@ --- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + sections: - title: Guides toctree: - filter_guide - test_guide + - title: Technology Guides + toctree: + - guide_alicloud + - guide_iocage + - guide_online + - guide_packet + - guide_scaleway + - title: Developer Guides + toctree: + - guide_deps + - guide_vardict + - guide_cmdrunner + - guide_modulehelper + - guide_uthelper diff --git a/docs/docsite/helper/lists_mergeby/default-common.yml b/docs/docsite/helper/lists_mergeby/default-common.yml deleted file mode 100644 index 69227fbe44..0000000000 --- a/docs/docsite/helper/lists_mergeby/default-common.yml +++ /dev/null @@ -1,13 +0,0 @@ -list1: - - name: foo - extra: true - - name: bar - extra: false - - name: meh - extra: true - -list2: - - name: foo - path: /foo - - name: baz - path: /baz diff --git a/docs/docsite/helper/lists_mergeby/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/default-recursive-true.yml deleted file mode 100644 index 7d8a7cf640..0000000000 --- a/docs/docsite/helper/lists_mergeby/default-recursive-true.yml +++ /dev/null @@ -1,19 +0,0 @@ -list1: - - name: myname01 - param01: - x: default_value - y: default_value - list: - - default_value - - name: myname02 - param01: [1, 1, 2, 3] - -list2: - - name: myname01 - param01: - y: patch_value - z: patch_value - list: - - patch_value - - name: myname02 - param01: [3, 4, 4, {key: value}] diff --git a/docs/docsite/helper/lists_mergeby/example-001.yml b/docs/docsite/helper/lists_mergeby/example-001.yml deleted file mode 100644 index d1cbb4b3b4..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-001.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 1. Merge two lists by common attribute 'name' - include_vars: - dir: example-001_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-001.out diff --git a/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml b/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml deleted file mode 120000 index 7ea8984a8d..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml +++ /dev/null @@ -1 +0,0 @@ -../default-common.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml deleted file mode 100644 index 4ecfb0a6c6..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml +++ /dev/null @@ -1,2 +0,0 @@ -list3: "{{ list1| - community.general.lists_mergeby(list2, 'name') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-002.yml b/docs/docsite/helper/lists_mergeby/example-002.yml deleted file mode 100644 index d21441a8df..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-002.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 2. Merge two lists by common attribute 'name' - include_vars: - dir: example-002_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-002.out diff --git a/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml b/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml deleted file mode 120000 index 7ea8984a8d..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml +++ /dev/null @@ -1 +0,0 @@ -../default-common.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml deleted file mode 100644 index 9eb6775f44..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml +++ /dev/null @@ -1,2 +0,0 @@ -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-003.yml b/docs/docsite/helper/lists_mergeby/example-003.yml deleted file mode 100644 index 7692278609..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-003.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 3. Merge recursive by 'name', replace lists (default) - include_vars: - dir: example-003_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-003.out diff --git a/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8ad..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml deleted file mode 100644 index 6d6bf8a478..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml +++ /dev/null @@ -1,3 +0,0 @@ -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true) }}" diff --git a/docs/docsite/helper/lists_mergeby/example-004.yml b/docs/docsite/helper/lists_mergeby/example-004.yml deleted file mode 100644 index 8a473a7328..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-004.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 4. Merge recursive by 'name', keep lists - include_vars: - dir: example-004_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-004.out diff --git a/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8ad..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml deleted file mode 100644 index a525ae4f69..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml +++ /dev/null @@ -1,4 +0,0 @@ -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true, - list_merge='keep') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-005.yml b/docs/docsite/helper/lists_mergeby/example-005.yml deleted file mode 100644 index 8bdf92c359..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-005.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 5. Merge recursive by 'name', append lists - include_vars: - dir: example-005_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-005.out diff --git a/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8ad..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml deleted file mode 100644 index 650686104b..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml +++ /dev/null @@ -1,4 +0,0 @@ -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true, - list_merge='append') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-006.yml b/docs/docsite/helper/lists_mergeby/example-006.yml deleted file mode 100644 index 9dcb9b684d..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-006.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 6. Merge recursive by 'name', prepend lists - include_vars: - dir: example-006_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-006.out diff --git a/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8ad..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml deleted file mode 100644 index d880dfa9f0..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml +++ /dev/null @@ -1,4 +0,0 @@ -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true, - list_merge='prepend') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-007.yml b/docs/docsite/helper/lists_mergeby/example-007.yml deleted file mode 100644 index e1a6f2c7e3..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-007.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 7. Merge recursive by 'name', append lists 'remove present' - include_vars: - dir: example-007_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-007.out diff --git a/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8ad..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml deleted file mode 100644 index af71d6dfd5..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml +++ /dev/null @@ -1,4 +0,0 @@ -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true, - list_merge='append_rp') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-008.yml b/docs/docsite/helper/lists_mergeby/example-008.yml deleted file mode 100644 index 18a598864a..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-008.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: 8. Merge recursive by 'name', prepend lists 'remove present' - include_vars: - dir: example-008_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-008.out diff --git a/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8ad..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml deleted file mode 100644 index 8a20578507..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml +++ /dev/null @@ -1,4 +0,0 @@ -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true, - list_merge='prepend_rp') }}" diff --git a/docs/docsite/helper/lists_mergeby/examples.yml b/docs/docsite/helper/lists_mergeby/examples.yml deleted file mode 100644 index 1e798cb8dc..0000000000 --- a/docs/docsite/helper/lists_mergeby/examples.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -examples: - - label: 'In the example below the lists are merged by the attribute ``name``:' - file: example-001_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces:' - file: example-001.out - lang: 'yaml' - - label: 'It is possible to use a list of lists as an input of the filter:' - file: example-002_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces the same result as in the previous example:' - file: example-002.out - lang: 'yaml' - - label: 'Example ``list_merge=replace`` (default):' - file: example-003_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces:' - file: example-003.out - lang: 'yaml' - - label: 'Example ``list_merge=keep``:' - file: example-004_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces:' - file: example-004.out - lang: 'yaml' - - label: 'Example ``list_merge=append``:' - file: example-005_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces:' - file: example-005.out - lang: 'yaml' - - label: 'Example ``list_merge=prepend``:' - file: example-006_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces:' - file: example-006.out - lang: 'yaml' - - label: 'Example ``list_merge=append_rp``:' - file: example-007_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces:' - file: example-007.out - lang: 'yaml' - - label: 'Example ``list_merge=prepend_rp``:' - file: example-008_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces:' - file: example-008.out - lang: 'yaml' diff --git a/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 b/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 deleted file mode 100644 index 014ff2d112..0000000000 --- a/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 +++ /dev/null @@ -1,8 +0,0 @@ -{% for i in examples %} -{{ i.label }} - -.. code-block:: {{ i.lang }} - - {{ lookup('file', i.file)|indent(2) }} - -{% endfor %} diff --git a/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 b/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 deleted file mode 100644 index 23cb6de07c..0000000000 --- a/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 +++ /dev/null @@ -1,57 +0,0 @@ -Merging lists of dictionaries -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the ``lists_mergeby`` filter. - -.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ref:`the documentation for the community.general.yaml callback plugin `. - -Let us use the lists below in the following examples: - -.. code-block:: yaml - - {{ lookup('file', 'default-common.yml')|indent(2) }} - -{% for i in examples[0:2] %} -{{ i.label }} - -.. code-block:: {{ i.lang }} - - {{ lookup('file', i.file)|indent(2) }} - -{% endfor %} - -.. versionadded:: 2.0.0 - -{% for i in examples[2:4] %} -{{ i.label }} - -.. code-block:: {{ i.lang }} - - {{ lookup('file', i.file)|indent(2) }} - -{% endfor %} - -The filter also accepts two optional parameters: ``recursive`` and ``list_merge``. These parameters are only supported when used with ansible-base 2.10 or ansible-core, but not with Ansible 2.9. This is available since community.general 4.4.0. - -**recursive** - Is a boolean, default to ``False``. Should the ``community.general.lists_mergeby`` recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``. - -**list_merge** - Is a string, its possible values are ``replace`` (default), ``keep``, ``append``, ``prepend``, ``append_rp`` or ``prepend_rp``. It modifies the behaviour of ``community.general.lists_mergeby`` when the hashes to merge contain arrays/lists. - -The examples below set ``recursive=true`` and display the differences among all six options of ``list_merge``. Functionality of the parameters is exactly the same as in the filter ``combine``. See :ref:`Combining hashes/dictionaries ` to learn details about these options. - -Let us use the lists below in the following examples - -.. code-block:: yaml - - {{ lookup('file', 'default-recursive-true.yml')|indent(2) }} - -{% for i in examples[4:16] %} -{{ i.label }} - -.. code-block:: {{ i.lang }} - - {{ lookup('file', i.file)|indent(2) }} - -{% endfor %} diff --git a/docs/docsite/helper/lists_mergeby/list3.out.j2 b/docs/docsite/helper/lists_mergeby/list3.out.j2 deleted file mode 100644 index 764ce3bd1d..0000000000 --- a/docs/docsite/helper/lists_mergeby/list3.out.j2 +++ /dev/null @@ -1,2 +0,0 @@ -list3: -{{ list3|to_nice_yaml(indent=0) }} diff --git a/docs/docsite/helper/lists_mergeby/playbook.yml b/docs/docsite/helper/lists_mergeby/playbook.yml deleted file mode 100644 index ae82932275..0000000000 --- a/docs/docsite/helper/lists_mergeby/playbook.yml +++ /dev/null @@ -1,59 +0,0 @@ ---- - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# 1) Run all examples and create example-XXX.out -# shell> ansible-playbook playbook.yml -e examples=true -# -# 2) Optionally, for testing, create examples_all.rst -# shell> ansible-playbook playbook.yml -e examples_all=true -# -# 3) Create docs REST files -# shell> ansible-playbook playbook.yml -e merging_lists_of_dictionaries=true -# -# Notes: -# * Use YAML callback, e.g. set ANSIBLE_STDOUT_CALLBACK=community.general.yaml -# * Use sphinx-view to render and review the REST files -# shell> sphinx-view /examples_all.rst -# * Proofread and copy completed docs *.rst files into the directory rst. -# * Then delete the *.rst and *.out files from this directory. Do not -# add *.rst and *.out in this directory to the version control. -# -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# community.general/docs/docsite/helper/lists_mergeby/playbook.yml - -- hosts: localhost - gather_facts: false - tasks: - - - block: - - import_tasks: example-001.yml - tags: t001 - - import_tasks: example-002.yml - tags: t002 - - import_tasks: example-003.yml - tags: t003 - - import_tasks: example-004.yml - tags: t004 - - import_tasks: example-005.yml - tags: t005 - - import_tasks: example-006.yml - tags: t006 - - import_tasks: example-007.yml - tags: t007 - - import_tasks: example-008.yml - tags: t008 - when: examples|d(false)|bool - - - block: - - include_vars: examples.yml - - template: - src: examples_all.rst.j2 - dest: examples_all.rst - when: examples_all|d(false)|bool - - - block: - - include_vars: examples.yml - - template: - src: filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 - dest: filter_guide_abstract_informations_merging_lists_of_dictionaries.rst - when: merging_lists_of_dictionaries|d(false)|bool diff --git a/docs/docsite/links.yml b/docs/docsite/links.yml index b5a1720974..fe41d1d2fd 100644 --- a/docs/docsite/links.yml +++ b/docs/docsite/links.yml @@ -1,10 +1,16 @@ --- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + edit_on_github: repository: ansible-collections/community.general branch: main path_prefix: '' extra_links: + - description: Ask for help + url: https://forum.ansible.com/c/help/6/none - description: Submit a bug report url: https://github.com/ansible-collections/community.general/issues/new?assignees=&labels=&template=bug_report.yml - description: Request a feature @@ -18,6 +24,10 @@ communication: - topic: General usage and support questions network: Libera channel: '#ansible' - mailing_lists: - - topic: Ansible Project List - url: https://groups.google.com/g/ansible-project + forums: + - topic: "Ansible Forum: General usage and support questions" + # The following URL directly points to the "Get Help" section + url: https://forum.ansible.com/c/help/6/none + - topic: "Ansible Forum: Discussions about the collection itself, not for specific modules or plugins" + # The following URL directly points to the "community-general" tag + url: https://forum.ansible.com/tag/community-general diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst new file mode 100644 index 0000000000..3549d29ba7 --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst @@ -0,0 +1,151 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +keep_keys +""""""""" + +Use the filter :ansplugin:`community.general.keep_keys#filter` if you have a list of dictionaries and want to keep certain keys only. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + - k0_x0: A0 + k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k0_x0: A1 + k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +* By default, match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1 + + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.keep_keys(target=target) }}" + + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-5 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + + +1. Match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +2. Match keys that start with any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: ['k0', 'k1'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +3. Match keys that end with any of the items in target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: ['x0', 'x1'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +4. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ['^.*[01]_x.*$'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +5. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*[01]_x.*$ + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + + +* The results of the below examples 6-9 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {k0_x0: A0} + - {k0_x0: A1} + + +6. Match keys that equal the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: k0_x0 + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +7. Match keys that start with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: k0 + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +8. Match keys that end with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: x0 + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +9. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*0_x.*$ + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst new file mode 100644 index 0000000000..4ac87ab79c --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst @@ -0,0 +1,159 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +remove_keys +""""""""""" + +Use the filter :ansplugin:`community.general.remove_keys#filter` if you have a list of dictionaries and want to remove certain keys. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + - k0_x0: A0 + k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k0_x0: A1 + k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +* By default, match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1 + + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.remove_keys(target=target) }}" + + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - k2_x2: [C0] + k3_x3: foo + - k2_x2: [C1] + k3_x3: bar + + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-5 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - k2_x2: [C0] + k3_x3: foo + - k2_x2: [C1] + k3_x3: bar + + +1. Match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +2. Match keys that start with any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: ['k0', 'k1'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +3. Match keys that end with any of the items in target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: ['x0', 'x1'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +4. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ['^.*[01]_x.*$'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +5. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*[01]_x.*$ + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + + +* The results of the below examples 6-9 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +6. Match keys that equal the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: k0_x0 + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +7. Match keys that start with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: k0 + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +8. Match keys that end with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: x0 + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +9. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*0_x.*$ + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst new file mode 100644 index 0000000000..d0eb202bfe --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst @@ -0,0 +1,175 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +replace_keys +"""""""""""" + +Use the filter :ansplugin:`community.general.replace_keys#filter` if you have a list of dictionaries and want to replace certain keys. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + - k0_x0: A0 + k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k0_x0: A1 + k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +* By default, match keys that equal any of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + target: + - {after: a0, before: k0_x0} + - {after: a1, before: k1_x1} + + result: "{{ input | community.general.replace_keys(target=target) }}" + + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - a0: A0 + a1: B0 + k2_x2: [C0] + k3_x3: foo + - a0: A1 + a1: B1 + k2_x2: [C1] + k3_x3: bar + + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-3 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - a0: A0 + a1: B0 + k2_x2: [C0] + k3_x3: foo + - a0: A1 + a1: B1 + k2_x2: [C1] + k3_x3: bar + + +1. Replace keys that starts with any of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: starts_with + target: + - {after: a0, before: k0} + - {after: a1, before: k1} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +2. Replace keys that ends with any of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: ends_with + target: + - {after: a0, before: x0} + - {after: a1, before: x1} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +3. Replace keys that match any regex of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: regex + target: + - {after: a0, before: ^.*0_x.*$} + - {after: a1, before: ^.*1_x.*$} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + + +* The results of the below examples 4-5 are the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {X: foo} + - {X: bar} + + +4. If more keys match the same attribute before the last one will be used. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + mp: regex + target: + - {after: X, before: ^.*_x.*$} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +5. If there are items with equal attribute before the first one will be used. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + mp: regex + target: + - {after: X, before: ^.*_x.*$} + - {after: Y, before: ^.*_x.*$} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + + +6. If there are more matches for a key the first one will be used. + +.. code-block:: yaml + :emphasize-lines: 1- + + input: + - {aaa1: A, bbb1: B, ccc1: C} + - {aaa2: D, bbb2: E, ccc2: F} + + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: starts_with + target: + - {after: X, before: a} + - {after: Y, before: aa} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {X: A, bbb1: B, ccc1: C} + - {X: D, bbb2: E, ccc2: F} + + diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst new file mode 100644 index 0000000000..64a82536d8 --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst @@ -0,0 +1,18 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.filter_guide.filter_guide_abstract_informations.lists_of_dicts: + +Lists of dictionaries +^^^^^^^^^^^^^^^^^^^^^ + +Filters to manage keys in a list of dictionaries: + +.. toctree:: + :maxdepth: 1 + + filter_guide-abstract_informations-lists_of_dictionaries-keep_keys + filter_guide-abstract_informations-lists_of_dictionaries-remove_keys + filter_guide-abstract_informations-lists_of_dictionaries-replace_keys diff --git a/docs/docsite/rst/filter_guide.rst b/docs/docsite/rst/filter_guide.rst index bab223d344..da8a90af3c 100644 --- a/docs/docsite/rst/filter_guide.rst +++ b/docs/docsite/rst/filter_guide.rst @@ -1,10 +1,14 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later .. _ansible_collections.community.general.docsite.filter_guide: community.general Filter Guide ============================== -The :ref:`community.general collection ` offers several useful filter plugins. +The :anscollection:`community.general collection ` offers several useful filter plugins. .. toctree:: :maxdepth: 2 diff --git a/docs/docsite/rst/filter_guide_abstract_informations.rst b/docs/docsite/rst/filter_guide_abstract_informations.rst index 04fb49bdb0..818c09f02c 100644 --- a/docs/docsite/rst/filter_guide_abstract_informations.rst +++ b/docs/docsite/rst/filter_guide_abstract_informations.rst @@ -1,3 +1,8 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + Abstract transformations ------------------------ @@ -6,5 +11,7 @@ Abstract transformations filter_guide_abstract_informations_dictionaries filter_guide_abstract_informations_grouping + filter_guide-abstract_informations-lists_of_dictionaries filter_guide_abstract_informations_merging_lists_of_dictionaries + filter_guide_abstract_informations_lists_helper filter_guide_abstract_informations_counting_elements_in_sequence diff --git a/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst b/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst index c4282abab1..98e8eb1c4d 100644 --- a/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst +++ b/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst @@ -1,7 +1,12 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + Counting elements in a sequence ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The ``community.general.counter`` filter plugin allows you to count (hashable) elements in a sequence. Elements are returned as dictionary keys and their counts are stored as dictionary values. +The :ansplugin:`community.general.counter filter plugin ` allows you to count (hashable) elements in a sequence. Elements are returned as dictionary keys and their counts are stored as dictionary values. .. code-block:: yaml+jinja diff --git a/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst b/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst index 944eda2ba4..e5b5bb7e36 100644 --- a/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst +++ b/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst @@ -1,7 +1,12 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + Dictionaries ^^^^^^^^^^^^ -You can use the ``dict_kv`` filter to create a single-entry dictionary with ``value | community.general.dict_kv(key)``: +You can use the :ansplugin:`community.general.dict_kv filter ` to create a single-entry dictionary with ``value | community.general.dict_kv(key)``: .. code-block:: yaml+jinja @@ -21,8 +26,8 @@ You can use the ``dict_kv`` filter to create a single-entry dictionary with ``va type: host database: all myservers: - - server1 - - server2 + - server1 + - server2 This produces: @@ -53,7 +58,7 @@ This produces: .. versionadded:: 2.0.0 -If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the ``community.general.dict`` filter can be used: +If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the :ansplugin:`community.general.dict filter ` can be used: .. code-block:: yaml+jinja diff --git a/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst b/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst index 8a46c10ebf..cb15989659 100644 --- a/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst +++ b/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst @@ -1,7 +1,12 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + Grouping ^^^^^^^^ -If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the ``community.general.groupby_as_dict`` filter to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary. +If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the :ansplugin:`community.general.groupby_as_dict filter ` to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary. One example is ``ansible_facts.mounts``, which is a list of dictionaries where each has one ``device`` element to indicate the device which is mounted. Therefore, ``ansible_facts.mounts | community.general.groupby_as_dict('device')`` is a dictionary mapping a device to the mount information: diff --git a/docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst b/docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst new file mode 100644 index 0000000000..505320c79c --- /dev/null +++ b/docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst @@ -0,0 +1,81 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +Union, intersection and difference of lists +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Starting with Ansible Core 2.16, the builtin filters :ansplugin:`ansible.builtin.union#filter`, :ansplugin:`ansible.builtin.intersect#filter`, :ansplugin:`ansible.builtin.difference#filter` and :ansplugin:`ansible.builtin.symmetric_difference#filter` began to behave differently and do no longer preserve the item order. Items in the resulting lists are returned in arbitrary order and the order can vary between subsequent runs. + +The Ansible community.general collection provides the following additional list filters: + +- :ansplugin:`community.general.lists_union#filter` +- :ansplugin:`community.general.lists_intersect#filter` +- :ansplugin:`community.general.lists_difference#filter` +- :ansplugin:`community.general.lists_symmetric_difference#filter` + +These filters preserve the item order, eliminate duplicates and are an extended version of the builtin ones, because they can operate on more than two lists. + +.. note:: Stick to the builtin filters, when item order is not important or when you do not need the n-ary operating mode. The builtin filters are faster, because they rely mostly on sets as their underlying datastructure. + +Let us use the lists below in the following examples: + +.. code-block:: yaml + + A: [9, 5, 7, 1, 9, 4, 10, 5, 9, 7] + B: [4, 1, 2, 8, 3, 1, 7] + C: [10, 2, 1, 9, 1] + +The union of ``A`` and ``B`` can be written as: + +.. code-block:: yaml+jinja + + result: "{{ A | community.general.lists_union(B) }}" + +This statement produces: + +.. code-block:: yaml + + result: [9, 5, 7, 1, 4, 10, 2, 8, 3] + +If you want to calculate the intersection of ``A``, ``B`` and ``C``, you can use the following statement: + +.. code-block:: yaml+jinja + + result: "{{ A | community.general.lists_intersect(B, C) }}" + +Alternatively, you can use a list of lists as an input of the filter + +.. code-block:: yaml+jinja + + result: "{{ [A, B] | community.general.lists_intersect(C) }}" + +or + +.. code-block:: yaml+jinja + + result: "{{ [A, B, C] | community.general.lists_intersect(flatten=true) }}" + +All three statements are equivalent and give: + +.. code-block:: yaml + + result: [1] + +.. note:: Be aware that in most cases, filter calls without any argument require ``flatten=true``, otherwise the input is returned as result. The reason for this is, that the input is considered as a variable argument and is wrapped by an additional outer list. ``flatten=true`` ensures that this list is removed before the input is processed by the filter logic. + +The filters :ansplugin:`community.general.lists_difference#filter` or :ansplugin:`community.general.lists_symmetric_difference#filter` can be used in the same way as the filters in the examples above. They calculate the difference or the symmetric difference between two or more lists and preserve the item order. + +For example, the symmetric difference of ``A``, ``B`` and ``C`` may be written as: + +.. code-block:: yaml+jinja + + result: "{{ A | community.general.lists_symmetric_difference(B, C) }}" + +This gives: + +.. code-block:: yaml + + result: [5, 8, 3, 1] + diff --git a/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst b/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst index de60869059..cafe04e5c4 100644 --- a/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst +++ b/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst @@ -1,33 +1,35 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + Merging lists of dictionaries ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the ``lists_mergeby`` filter. +If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the :ansplugin:`community.general.lists_mergeby ` filter. -.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ref:`the documentation for the community.general.yaml callback plugin `. +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See the documentation for the :ansplugin:`community.general.yaml callback plugin `. Let us use the lists below in the following examples: .. code-block:: yaml list1: - - name: foo - extra: true - - name: bar - extra: false - - name: meh - extra: true + - {name: foo, extra: true} + - {name: bar, extra: false} + - {name: meh, extra: true} list2: - - name: foo - path: /foo - - name: baz - path: /baz + - {name: foo, path: /foo} + - {name: baz, path: /baz} +Two lists +""""""""" In the example below the lists are merged by the attribute ``name``: .. code-block:: yaml+jinja - list3: "{{ list1| + list3: "{{ list1 | community.general.lists_mergeby(list2, 'name') }}" This produces: @@ -35,24 +37,21 @@ This produces: .. code-block:: yaml list3: - - extra: false - name: bar - - name: baz - path: /baz - - extra: true - name: foo - path: /foo - - extra: true - name: meh + - {name: bar, extra: false} + - {name: baz, path: /baz} + - {name: foo, extra: true, path: /foo} + - {name: meh, extra: true} .. versionadded:: 2.0.0 +List of two lists +""""""""""""""""" It is possible to use a list of lists as an input of the filter: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name') }}" This produces the same result as in the previous example: @@ -60,26 +59,40 @@ This produces the same result as in the previous example: .. code-block:: yaml list3: - - extra: false - name: bar - - name: baz - path: /baz - - extra: true - name: foo - path: /foo - - extra: true - name: meh + - {name: bar, extra: false} + - {name: baz, path: /baz} + - {name: foo, extra: true, path: /foo} + - {name: meh, extra: true} + +Single list +""""""""""" +It is possible to merge single list: + +.. code-block:: yaml+jinja + + list3: "{{ [list1 + list2, []] | + community.general.lists_mergeby('name') }}" + +This produces the same result as in the previous example: + +.. code-block:: yaml + + list3: + - {name: bar, extra: false} + - {name: baz, path: /baz} + - {name: foo, extra: true, path: /foo} + - {name: meh, extra: true} -The filter also accepts two optional parameters: ``recursive`` and ``list_merge``. These parameters are only supported when used with ansible-base 2.10 or ansible-core, but not with Ansible 2.9. This is available since community.general 4.4.0. +The filter also accepts two optional parameters: :ansopt:`community.general.lists_mergeby#filter:recursive` and :ansopt:`community.general.lists_mergeby#filter:list_merge`. This is available since community.general 4.4.0. **recursive** - Is a boolean, default to ``False``. Should the ``community.general.lists_mergeby`` recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``. + Is a boolean, default to ``false``. Should the :ansplugin:`community.general.lists_mergeby#filter` filter recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``. **list_merge** - Is a string, its possible values are ``replace`` (default), ``keep``, ``append``, ``prepend``, ``append_rp`` or ``prepend_rp``. It modifies the behaviour of ``community.general.lists_mergeby`` when the hashes to merge contain arrays/lists. + Is a string, its possible values are :ansval:`replace` (default), :ansval:`keep`, :ansval:`append`, :ansval:`prepend`, :ansval:`append_rp` or :ansval:`prepend_rp`. It modifies the behaviour of :ansplugin:`community.general.lists_mergeby#filter` when the hashes to merge contain arrays/lists. -The examples below set ``recursive=true`` and display the differences among all six options of ``list_merge``. Functionality of the parameters is exactly the same as in the filter ``combine``. See :ref:`Combining hashes/dictionaries ` to learn details about these options. +The examples below set :ansopt:`community.general.lists_mergeby#filter:recursive=true` and display the differences among all six options of :ansopt:`community.general.lists_mergeby#filter:list_merge`. Functionality of the parameters is exactly the same as in the filter :ansplugin:`ansible.builtin.combine#filter`. See :ref:`Combining hashes/dictionaries ` to learn details about these options. Let us use the lists below in the following examples @@ -90,8 +103,7 @@ Let us use the lists below in the following examples param01: x: default_value y: default_value - list: - - default_value + list: [default_value] - name: myname02 param01: [1, 1, 2, 3] @@ -100,16 +112,17 @@ Let us use the lists below in the following examples param01: y: patch_value z: patch_value - list: - - patch_value + list: [patch_value] - name: myname02 - param01: [3, 4, 4, {key: value}] + param01: [3, 4, 4] -Example ``list_merge=replace`` (default): +list_merge=replace (default) +"""""""""""""""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=replace` (default): .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true) }}" @@ -118,25 +131,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - patch_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 3 - - 4 - - 4 - - key: value + - name: myname01 + param01: + x: default_value + y: patch_value + list: [patch_value] + z: patch_value + - name: myname02 + param01: [3, 4, 4] -Example ``list_merge=keep``: +list_merge=keep +""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=keep`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='keep') }}" @@ -146,25 +156,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - default_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 1 - - 1 - - 2 - - 3 + - name: myname01 + param01: + x: default_value + y: patch_value + list: [default_value] + z: patch_value + - name: myname02 + param01: [1, 1, 2, 3] -Example ``list_merge=append``: +list_merge=append +""""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='append') }}" @@ -174,30 +181,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - default_value - - patch_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 1 - - 1 - - 2 - - 3 - - 3 - - 4 - - 4 - - key: value + - name: myname01 + param01: + x: default_value + y: patch_value + list: [default_value, patch_value] + z: patch_value + - name: myname02 + param01: [1, 1, 2, 3, 3, 4, 4] -Example ``list_merge=prepend``: +list_merge=prepend +"""""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='prepend') }}" @@ -207,30 +206,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - patch_value - - default_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 3 - - 4 - - 4 - - key: value - - 1 - - 1 - - 2 - - 3 + - name: myname01 + param01: + x: default_value + y: patch_value + list: [patch_value, default_value] + z: patch_value + - name: myname02 + param01: [3, 4, 4, 1, 1, 2, 3] -Example ``list_merge=append_rp``: +list_merge=append_rp +"""""""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append_rp`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='append_rp') }}" @@ -240,29 +231,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - default_value - - patch_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 1 - - 1 - - 2 - - 3 - - 4 - - 4 - - key: value + - name: myname01 + param01: + x: default_value + y: patch_value + list: [default_value, patch_value] + z: patch_value + - name: myname02 + param01: [1, 1, 2, 3, 4, 4] -Example ``list_merge=prepend_rp``: +list_merge=prepend_rp +""""""""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend_rp`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='prepend_rp') }}" @@ -272,21 +256,12 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - patch_value - - default_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 3 - - 4 - - 4 - - key: value - - 1 - - 1 - - 2 + - name: myname01 + param01: + x: default_value + y: patch_value + list: [patch_value, default_value] + z: patch_value + - name: myname02 + param01: [3, 4, 4, 1, 1, 2] diff --git a/docs/docsite/rst/filter_guide_conversions.rst b/docs/docsite/rst/filter_guide_conversions.rst index 3214736dcb..ca0401762c 100644 --- a/docs/docsite/rst/filter_guide_conversions.rst +++ b/docs/docsite/rst/filter_guide_conversions.rst @@ -1,10 +1,15 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + Conversions ----------- Parsing CSV files ^^^^^^^^^^^^^^^^^ -Ansible offers the :ref:`community.general.read_csv module ` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the ``from_csv`` filter exists. +Ansible offers the :ansplugin:`community.general.read_csv module ` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the :ansplugin:`community.general.from_csv filter ` exists. .. code-block:: yaml+jinja @@ -37,7 +42,7 @@ This produces: ] } -The ``from_csv`` filter has several keyword arguments to control its behavior: +The :ansplugin:`community.general.from_csv filter ` has several keyword arguments to control its behavior: :dialect: Dialect of the CSV file. Default is ``excel``. Other possible choices are ``excel-tab`` and ``unix``. If one of ``delimiter``, ``skipinitialspace`` or ``strict`` is specified, ``dialect`` is ignored. :fieldnames: A set of column names to use. If not provided, the first line of the CSV is assumed to contain the column names. @@ -50,7 +55,7 @@ The ``from_csv`` filter has several keyword arguments to control its behavior: Converting to JSON ^^^^^^^^^^^^^^^^^^ -`JC `_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general. This filter needs the `jc Python library `_ installed on the controller. +`JC `_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general, called :ansplugin:`community.general.jc#filter`. This filter needs the `jc Python library `_ installed on the controller. .. code-block:: yaml+jinja diff --git a/docs/docsite/rst/filter_guide_creating_identifiers.rst b/docs/docsite/rst/filter_guide_creating_identifiers.rst index 4e29f72fcb..6e0c730c60 100644 --- a/docs/docsite/rst/filter_guide_creating_identifiers.rst +++ b/docs/docsite/rst/filter_guide_creating_identifiers.rst @@ -1,3 +1,8 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + Creating identifiers -------------------- @@ -6,7 +11,7 @@ The following filters allow to create identifiers. Hashids ^^^^^^^ -`Hashids `_ allow to convert sequences of integers to short unique string identifiers. This filter needs the `hashids Python library `_ installed on the controller. +`Hashids `_ allow to convert sequences of integers to short unique string identifiers. The :ansplugin:`community.general.hashids_encode#filter` and :ansplugin:`community.general.hashids_decode#filter` filters need the `hashids Python library `_ installed on the controller. .. code-block:: yaml+jinja @@ -47,7 +52,7 @@ The hashids filters accept keyword arguments to allow fine-tuning the hashids ge Random MACs ^^^^^^^^^^^ -You can use the ``random_mac`` filter to complete a partial `MAC address `_ to a random 6-byte MAC address. +You can use the :ansplugin:`community.general.random_mac filter ` to complete a partial `MAC address `_ to a random 6-byte MAC address. .. code-block:: yaml+jinja diff --git a/docs/docsite/rst/filter_guide_paths.rst b/docs/docsite/rst/filter_guide_paths.rst index b853909b23..41185832f2 100644 --- a/docs/docsite/rst/filter_guide_paths.rst +++ b/docs/docsite/rst/filter_guide_paths.rst @@ -1,14 +1,9 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + Paths ----- -The ``path_join`` filter has been added in ansible-base 2.10. If you want to use this filter, but also need to support Ansible 2.9, you can use ``community.general``'s ``path_join`` shim, ``community.general.path_join``. This filter redirects to ``path_join`` for ansible-base 2.10 and ansible-core 2.11 or newer, and re-implements the filter for Ansible 2.9. - -.. code-block:: yaml+jinja - - # ansible-base 2.10 or newer: - path: {{ ('/etc', path, 'subdir', file) | path_join }} - - # Also works with Ansible 2.9: - path: {{ ('/etc', path, 'subdir', file) | community.general.path_join }} - -.. versionadded:: 3.0.0 +The :ansplugin:`ansible.builtin.path_join filter ` has been added in ansible-base 2.10. Community.general 3.0.0 and newer contains an alias ``community.general.path_join`` for this filter that could be used on Ansible 2.9 as well. Since community.general no longer supports Ansible 2.9, this is now a simple redirect to :ansplugin:`ansible.builtin.path_join filter `. diff --git a/docs/docsite/rst/filter_guide_selecting_json_data.rst b/docs/docsite/rst/filter_guide_selecting_json_data.rst index c3e52c87fa..bdf2624f3c 100644 --- a/docs/docsite/rst/filter_guide_selecting_json_data.rst +++ b/docs/docsite/rst/filter_guide_selecting_json_data.rst @@ -1,9 +1,14 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + .. _ansible_collections.community.general.docsite.json_query_filter: Selecting JSON data: JSON queries --------------------------------- -To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the ``json_query`` filter. The ``json_query`` filter lets you query a complex JSON structure and iterate over it using a loop structure. +To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the :ansplugin:`community.general.json_query filter `. The :ansplugin:`community.general.json_query#filter` filter lets you query a complex JSON structure and iterate over it using a loop structure. .. note:: You must manually install the **jmespath** dependency on the Ansible controller before using this filter. This filter is built upon **jmespath**, and you can use the same syntax. For examples, see `jmespath examples `_. @@ -12,50 +17,50 @@ Consider this data structure: .. code-block:: yaml+jinja { - "domain_definition": { - "domain": { - "cluster": [ - { - "name": "cluster1" - }, - { - "name": "cluster2" - } - ], - "server": [ - { - "name": "server11", - "cluster": "cluster1", - "port": "8080" - }, - { - "name": "server12", - "cluster": "cluster1", - "port": "8090" - }, - { - "name": "server21", - "cluster": "cluster2", - "port": "9080" - }, - { - "name": "server22", - "cluster": "cluster2", - "port": "9090" - } - ], - "library": [ - { - "name": "lib1", - "target": "cluster1" - }, - { - "name": "lib2", - "target": "cluster2" - } - ] + "domain_definition": { + "domain": { + "cluster": [ + { + "name": "cluster1" + }, + { + "name": "cluster2" } + ], + "server": [ + { + "name": "server11", + "cluster": "cluster1", + "port": "8080" + }, + { + "name": "server12", + "cluster": "cluster1", + "port": "8090" + }, + { + "name": "server21", + "cluster": "cluster2", + "port": "9080" + }, + { + "name": "server22", + "cluster": "cluster2", + "port": "9090" + } + ], + "library": [ + { + "name": "lib1", + "target": "cluster1" + }, + { + "name": "lib2", + "target": "cluster2" + } + ] } + } } To extract all clusters from this structure, you can use the following query: @@ -119,7 +124,7 @@ To get a hash map with all ports and names of a cluster: var: item loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}" vars: - server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}" + server_name_cluster1_query: "domain.server[?cluster=='cluster1'].{name: name, port: port}" To extract ports from all clusters with name starting with 'server1': @@ -141,4 +146,4 @@ To extract ports from all clusters with name containing 'server1': vars: server_name_query: "domain.server[?contains(name,'server1')].port" -.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure. +.. note:: while using ``starts_with`` and ``contains``, you have to use ``to_json | from_json`` filter for correct parsing of data structure. diff --git a/docs/docsite/rst/filter_guide_working_with_times.rst b/docs/docsite/rst/filter_guide_working_with_times.rst index f218c9972e..032d44bb57 100644 --- a/docs/docsite/rst/filter_guide_working_with_times.rst +++ b/docs/docsite/rst/filter_guide_working_with_times.rst @@ -1,9 +1,14 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + Working with times ------------------ -The ``to_time_unit`` filter allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds. +The :ansplugin:`community.general.to_time_unit filter ` allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds. -There are shorthands to directly convert to various units, like ``to_hours``, ``to_minutes``, ``to_seconds``, and so on. The following table lists all units that can be used: +There are shorthands to directly convert to various units, like :ansplugin:`community.general.to_hours#filter`, :ansplugin:`community.general.to_minutes#filter`, :ansplugin:`community.general.to_seconds#filter`, and so on. The following table lists all units that can be used: .. list-table:: Units :widths: 25 25 25 25 @@ -16,37 +21,37 @@ There are shorthands to directly convert to various units, like ``to_hours``, `` * - Millisecond - 1/1000 second - ``ms``, ``millisecond``, ``milliseconds``, ``msec``, ``msecs``, ``msecond``, ``mseconds`` - - ``to_milliseconds`` + - :ansplugin:`community.general.to_milliseconds#filter` * - Second - 1 second - ``s``, ``sec``, ``secs``, ``second``, ``seconds`` - - ``to_seconds`` + - :ansplugin:`community.general.to_seconds#filter` * - Minute - 60 seconds - ``m``, ``min``, ``mins``, ``minute``, ``minutes`` - - ``to_minutes`` + - :ansplugin:`community.general.to_minutes#filter` * - Hour - 60*60 seconds - ``h``, ``hour``, ``hours`` - - ``to_hours`` + - :ansplugin:`community.general.to_hours#filter` * - Day - 24*60*60 seconds - ``d``, ``day``, ``days`` - - ``to_days`` + - :ansplugin:`community.general.to_days#filter` * - Week - 7*24*60*60 seconds - ``w``, ``week``, ``weeks`` - - ``to_weeks`` + - :ansplugin:`community.general.to_weeks#filter` * - Month - 30*24*60*60 seconds - ``mo``, ``month``, ``months`` - - ``to_months`` + - :ansplugin:`community.general.to_months#filter` * - Year - 365*24*60*60 seconds - ``y``, ``year``, ``years`` - - ``to_years`` + - :ansplugin:`community.general.to_years#filter` -Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to ``to_time_unit`` and to all shorthand filters. +Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to :ansplugin:`community.general.to_time_unit#filter` and to all shorthand filters. .. code-block:: yaml+jinja diff --git a/docs/docsite/rst/filter_guide_working_with_unicode.rst b/docs/docsite/rst/filter_guide_working_with_unicode.rst index 25e7ba123d..e75b0f871b 100644 --- a/docs/docsite/rst/filter_guide_working_with_unicode.rst +++ b/docs/docsite/rst/filter_guide_working_with_unicode.rst @@ -1,9 +1,14 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + Working with Unicode --------------------- -`Unicode `_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this ``Unicode`` defines `normalization forms `_ which avoid these distinctions by choosing a unique character sequence for a given visual representation. +`Unicode `_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this Unicode defines `normalization forms `_ which avoid these distinctions by choosing a unique character sequence for a given visual representation. -You can use the ``community.general.unicode_normalize`` filter to normalize ``Unicode`` strings within your playbooks. +You can use the :ansplugin:`community.general.unicode_normalize filter ` to normalize Unicode strings within your playbooks. .. code-block:: yaml+jinja @@ -23,7 +28,7 @@ This produces: "msg": true } -The ``community.general.unicode_normalize`` filter accepts a keyword argument to select the ``Unicode`` form used to normalize the input string. +The :ansplugin:`community.general.unicode_normalize filter ` accepts a keyword argument :ansopt:`community.general.unicode_normalize#filter:form` to select the Unicode form used to normalize the input string. :form: One of ``'NFC'`` (default), ``'NFD'``, ``'NFKC'``, or ``'NFKD'``. See the `Unicode reference `_ for more information. diff --git a/docs/docsite/rst/filter_guide_working_with_versions.rst b/docs/docsite/rst/filter_guide_working_with_versions.rst index 91cc6aca18..055bbcd217 100644 --- a/docs/docsite/rst/filter_guide_working_with_versions.rst +++ b/docs/docsite/rst/filter_guide_working_with_versions.rst @@ -1,7 +1,12 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + Working with versions --------------------- -If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the ``version_sort`` filter: +If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the :ansplugin:`community.general.version_sort filter `: .. code-block:: yaml+jinja diff --git a/docs/docsite/rst/guide_alicloud.rst b/docs/docsite/rst/guide_alicloud.rst new file mode 100644 index 0000000000..b5ce2c063c --- /dev/null +++ b/docs/docsite/rst/guide_alicloud.rst @@ -0,0 +1,96 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_alicloud: + +Alibaba Cloud Compute Services Guide +==================================== + +Introduction +```````````` + +The community.general collection contains several modules for controlling and managing Alibaba Cloud Compute Services (Alicloud). This guide +explains how to use the Alicloud Ansible modules together. + +All Alicloud modules require ``footmark`` - install it on your control machine with ``pip install footmark``. + +Cloud modules, including Alicloud modules, are usually executed on your local machine (the control machine) with ``connection: local``, rather than on remote machines defined in your hosts. + +Normally, you'll use the following pattern for plays that provision Alicloud resources: + +.. code-block:: yaml + + - hosts: localhost + connection: local + vars: + - ... + tasks: + - ... + +Authentication +`````````````` + +You can specify your Alicloud authentication credentials (access key and secret key) by passing them as +environment variables or by storing them in a vars file. + +To pass authentication credentials as environment variables: + +.. code-block:: console + + export ALICLOUD_ACCESS_KEY='Alicloud123' + export ALICLOUD_SECRET_KEY='AlicloudSecret123' + +To store authentication credentials in a vars file, encrypt them with :ref:`Ansible Vault ` to keep them secure, then list them: + +.. code-block:: yaml + + --- + alicloud_access_key: "--REMOVED--" + alicloud_secret_key: "--REMOVED--" + +Note that if you store your credentials in a vars file, you need to refer to them in each Alicloud module. For example: + +.. code-block:: yaml+jinja + + - community.general.ali_instance: + alicloud_access_key: "{{ alicloud_access_key }}" + alicloud_secret_key: "{{ alicloud_secret_key }}" + image_id: "..." + +Provisioning +```````````` + +Alicloud modules create Alicloud ECS instances (:ansplugin:`community.general.ali_instance#module`) and retrieve information on these (:ansplugin:`community.general.ali_instance_info#module`). + +You can use the ``count`` parameter to control the number of resources you create or terminate. For example, if you want exactly 5 instances tagged ``NewECS``, set the ``count`` of instances to 5 and the ``count_tag`` to ``NewECS``, as shown in the last task of the example playbook below. If there are no instances with the tag ``NewECS``, the task creates 5 new instances. If there are 2 instances with that tag, the task creates 3 more. If there are 8 instances with that tag, the task terminates 3 of those instances. + +If you do not specify a ``count_tag``, the task creates the number of instances you specify in ``count`` with the ``instance_name`` you provide. + +.. code-block:: yaml+jinja + + # alicloud_setup.yml + + - hosts: localhost + connection: local + + tasks: + - name: Create a set of instances + community.general.ali_instance: + instance_type: ecs.n4.small + image_id: "{{ ami_id }}" + instance_name: "My-new-instance" + instance_tags: + Name: NewECS + Version: 0.0.1 + count: 5 + count_tag: + Name: NewECS + allocate_public_ip: true + max_bandwidth_out: 50 + register: create_instance + +In the example playbook above, data about the instances created by this playbook is saved in the variable defined by the ``register`` keyword in the task. + +Each Alicloud module offers a variety of parameter options. Not all options are demonstrated in the above example. See each individual module for further details and examples. diff --git a/docs/docsite/rst/guide_cmdrunner.rst b/docs/docsite/rst/guide_cmdrunner.rst new file mode 100644 index 0000000000..c1514ee340 --- /dev/null +++ b/docs/docsite/rst/guide_cmdrunner.rst @@ -0,0 +1,529 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_cmdrunner: + + +Command Runner guide +==================== + + +Introduction +^^^^^^^^^^^^ + +The ``ansible_collections.community.general.plugins.module_utils.cmd_runner`` module util provides the +``CmdRunner`` class to help execute external commands. The class is a wrapper around +the standard ``AnsibleModule.run_command()`` method, handling command arguments, localization setting, +output processing output, check mode, and other features. + +It is even more useful when one command is used in multiple modules, so that you can define all options +in a module util file, and each module uses the same runner with different arguments. + +For the sake of clarity, throughout this guide, unless otherwise specified, we use the term *option* when referring to +Ansible module options, and the term *argument* when referring to the command line arguments for the external command. + + +Quickstart +"""""""""" + +``CmdRunner`` defines a command and a set of coded instructions on how to format +the command-line arguments, in which specific order, for a particular execution. +It relies on ``ansible.module_utils.basic.AnsibleModule.run_command()`` to actually execute the command. +There are other features, see more details throughout this document. + +To use ``CmdRunner`` you must start by creating an object. The example below is a simplified +version of the actual code in :ansplugin:`community.general.ansible_galaxy_install#module`: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + runner = CmdRunner( + module, + command="ansible-galaxy", + arg_formats=dict( + type=cmd_runner_fmt.as_func(lambda v: [] if v == 'both' else [v]), + galaxy_cmd=cmd_runner_fmt.as_list(), + upgrade=cmd_runner_fmt.as_bool("--upgrade"), + requirements_file=cmd_runner_fmt.as_opt_val('-r'), + dest=cmd_runner_fmt.as_opt_val('-p'), + force=cmd_runner_fmt.as_bool("--force"), + no_deps=cmd_runner_fmt.as_bool("--no-deps"), + version=cmd_runner_fmt.as_fixed("--version"), + name=cmd_runner_fmt.as_list(), + ) + ) + +This is meant to be done once, then every time you need to execute the command you create a context and pass values as needed: + +.. code-block:: python + + # Run the command with these arguments, when values exist for them + with runner("type galaxy_cmd upgrade force no_deps dest requirements_file name", output_process=process) as ctx: + ctx.run(galaxy_cmd="install", upgrade=upgrade) + + # version is fixed, requires no value + with runner("version") as ctx: + dummy, stdout, dummy = ctx.run() + + # passes arg 'data' to AnsibleModule.run_command() + with runner("type name", data=stdin_data) as ctx: + dummy, stdout, dummy = ctx.run() + + # Another way of expressing it + dummy, stdout, dummy = runner("version").run() + +Note that you can pass values for the arguments when calling ``run()``, otherwise ``CmdRunner`` +uses the module options with the exact same names to provide values for the runner arguments. +If no value is passed and no module option is found for the name specified, then an exception is raised, unless +the argument is using ``cmd_runner_fmt.as_fixed`` as format function like the ``version`` in the example above. +See more about it below. + +In the first example, values of ``type``, ``force``, ``no_deps`` and others +are taken straight from the module, whilst ``galaxy_cmd`` and ``upgrade`` are +passed explicitly. + +.. note:: + + It is not possible to automatically retrieve values of suboptions. + +That generates a resulting command line similar to (example taken from the +output of an integration test): + +.. code-block:: python + + [ + "/bin/ansible-galaxy", + "collection", + "install", + "--upgrade", + "-p", + "", + "netbox.netbox", + ] + + +Argument formats +^^^^^^^^^^^^^^^^ + +As seen in the example, ``CmdRunner`` expects a parameter named ``arg_formats`` +defining how to format each CLI named argument. +An "argument format" is nothing but a function to transform the value of a variable +into something formatted for the command line. + + +Argument format function +"""""""""""""""""""""""" + +An ``arg_format`` function is defined in the form similar to: + +.. code-block:: python + + def func(value): + return ["--some-param-name", value] + +The parameter ``value`` can be of any type - although there are convenience +mechanisms to help handling sequence and mapping objects. + +The result is expected to be of the type ``Sequence[str]`` type (most commonly +``list[str]`` or ``tuple[str]``), otherwise it is considered to be a ``str``, +and it is coerced into ``list[str]``. +This resulting sequence of strings is added to the command line when that +argument is actually used. + +For example, if ``func`` returns: + +- ``["nee", 2, "shruberries"]``, the command line adds arguments ``"nee" "2" "shruberries"``. +- ``2 == 2``, the command line adds argument ``True``. +- ``None``, the command line adds argument ``None``. +- ``[]``, the command line adds no command line argument for that particular argument. + + +Convenience format methods +"""""""""""""""""""""""""" + +In the same module as ``CmdRunner`` there is a class ``cmd_runner_fmt`` which +provides a set of convenience methods that return format functions for common cases. +In the first block of code in the `Quickstart`_ section you can see the importing of +that class: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + +The same example shows how to make use of some of them in the instantiation of the ``CmdRunner`` object. +A description of each one of the convenience methods available and examples of how to use them is found below. +In these descriptions ``value`` refers to the single parameter passed to the formatting function. + +- ``cmd_runner_fmt.as_list()`` + This method does not receive any parameter, function returns ``value`` as-is. + + - Creation: + ``cmd_runner_fmt.as_list()`` + - Examples: + +----------------------+---------------------+ + | Value | Outcome | + +======================+=====================+ + | ``["foo", "bar"]`` | ``["foo", "bar"]`` | + +----------------------+---------------------+ + | ``"foobar"`` | ``["foobar"]`` | + +----------------------+---------------------+ + +- ``cmd_runner_fmt.as_bool()`` + This method receives two different parameters: ``args_true`` and ``args_false``, latter being optional. + If the boolean evaluation of ``value`` is ``True``, the format function returns ``args_true``. + If the boolean evaluation is ``False``, then the function returns ``args_false`` if it was provided, or ``[]`` otherwise. + + - Creation (one arg): + ``cmd_runner_fmt.as_bool("--force")`` + - Examples: + +------------+--------------------+ + | Value | Outcome | + +============+====================+ + | ``True`` | ``["--force"]`` | + +------------+--------------------+ + | ``False`` | ``[]`` | + +------------+--------------------+ + - Creation (two args, ``None`` treated as ``False``): + ``cmd_runner_fmt.as_bool("--relax", "--dont-do-it")`` + - Examples: + +------------+----------------------+ + | Value | Outcome | + +============+======================+ + | ``True`` | ``["--relax"]`` | + +------------+----------------------+ + | ``False`` | ``["--dont-do-it"]`` | + +------------+----------------------+ + | | ``["--dont-do-it"]`` | + +------------+----------------------+ + - Creation (two args, ``None`` is ignored): + ``cmd_runner_fmt.as_bool("--relax", "--dont-do-it", ignore_none=True)`` + - Examples: + +------------+----------------------+ + | Value | Outcome | + +============+======================+ + | ``True`` | ``["--relax"]`` | + +------------+----------------------+ + | ``False`` | ``["--dont-do-it"]`` | + +------------+----------------------+ + | | ``[]`` | + +------------+----------------------+ + +- ``cmd_runner_fmt.as_bool_not()`` + This method receives one parameter, which is returned by the function when the boolean evaluation + of ``value`` is ``False``. + + - Creation: + ``cmd_runner_fmt.as_bool_not("--no-deps")`` + - Examples: + +-------------+---------------------+ + | Value | Outcome | + +=============+=====================+ + | ``True`` | ``[]`` | + +-------------+---------------------+ + | ``False`` | ``["--no-deps"]`` | + +-------------+---------------------+ + +- ``cmd_runner_fmt.as_optval()`` + This method receives one parameter ``arg``, the function returns the string concatenation + of ``arg`` and ``value``. + + - Creation: + ``cmd_runner_fmt.as_optval("-i")`` + - Examples: + +---------------+---------------------+ + | Value | Outcome | + +===============+=====================+ + | ``3`` | ``["-i3"]`` | + +---------------+---------------------+ + | ``foobar`` | ``["-ifoobar"]`` | + +---------------+---------------------+ + +- ``cmd_runner_fmt.as_opt_val()`` + This method receives one parameter ``arg``, the function returns ``[arg, value]``. + + - Creation: + ``cmd_runner_fmt.as_opt_val("--name")`` + - Examples: + +--------------+--------------------------+ + | Value | Outcome | + +==============+==========================+ + | ``abc`` | ``["--name", "abc"]`` | + +--------------+--------------------------+ + +- ``cmd_runner_fmt.as_opt_eq_val()`` + This method receives one parameter ``arg``, the function returns the string of the form + ``{arg}={value}``. + + - Creation: + ``cmd_runner_fmt.as_opt_eq_val("--num-cpus")`` + - Examples: + +------------+-------------------------+ + | Value | Outcome | + +============+=========================+ + | ``10`` | ``["--num-cpus=10"]`` | + +------------+-------------------------+ + +- ``cmd_runner_fmt.as_fixed()`` + This method defines one or more fixed arguments that are returned by the generated function + regardless whether ``value`` is passed to it or not. + + This method accepts these arguments in one of three forms: + + * one scalar parameter ``arg``, which will be returned as ``[arg]`` by the function, or + * one sequence parameter, such as a list, ``arg``, which will be returned by the function as ``arg[0]``, or + * multiple parameters ``args``, which will be returned as ``args`` directly by the function. + + See the examples below for each one of those forms. And, stressing that the generated function expects no ``value`` - if one + is provided then it is ignored. + + - Creation (one scalar argument): + * ``cmd_runner_fmt.as_fixed("--version")`` + - Examples: + +---------+--------------------------------------+ + | Value | Outcome | + +=========+======================================+ + | | * ``["--version"]`` | + +---------+--------------------------------------+ + | 57 | * ``["--version"]`` | + +---------+--------------------------------------+ + + - Creation (one sequence argument): + * ``cmd_runner_fmt.as_fixed(["--list", "--json"])`` + - Examples: + +---------+--------------------------------------+ + | Value | Outcome | + +=========+======================================+ + | | * ``["--list", "--json"]`` | + +---------+--------------------------------------+ + | True | * ``["--list", "--json"]`` | + +---------+--------------------------------------+ + + - Creation (multiple arguments): + * ``cmd_runner_fmt.as_fixed("--one", "--two", "--three")`` + - Examples: + +---------+--------------------------------------+ + | Value | Outcome | + +=========+======================================+ + | | * ``["--one", "--two", "--three"]`` | + +---------+--------------------------------------+ + | False | * ``["--one", "--two", "--three"]`` | + +---------+--------------------------------------+ + + - Note: + This is the only special case in which a value can be missing for the formatting function. + The first example here comes from the code in `Quickstart`_. + In that case, the module has code to determine the command's version so that it can assert compatibility. + There is no *value* to be passed for that CLI argument. + +- ``cmd_runner_fmt.as_map()`` + This method receives one parameter ``arg`` which must be a dictionary, and an optional parameter ``default``. + The function returns the evaluation of ``arg[value]``. + If ``value not in arg``, then it returns ``default`` if defined, otherwise ``[]``. + + - Creation: + ``cmd_runner_fmt.as_map(dict(a=1, b=2, c=3), default=42)`` + - Examples: + +---------------------+---------------+ + | Value | Outcome | + +=====================+===============+ + | ``"b"`` | ``["2"]`` | + +---------------------+---------------+ + | ``"yabadabadoo"`` | ``["42"]`` | + +---------------------+---------------+ + + - Note: + If ``default`` is not specified, invalid values return an empty list, meaning they are silently ignored. + +- ``cmd_runner_fmt.as_func()`` + This method receives one parameter ``arg`` which is itself is a format function and it must abide by the rules described above. + + - Creation: + ``cmd_runner_fmt.as_func(lambda v: [] if v == 'stable' else ['--channel', '{0}'.format(v)])`` + - Note: + The outcome for that depends entirely on the function provided by the developer. + + +Other features for argument formatting +"""""""""""""""""""""""""""""""""""""" + +Some additional features are available as decorators: + +- ``cmd_runner_fmt.unpack args()`` + This decorator unpacks the incoming ``value`` as a list of elements. + + For example, in ``ansible_collections.community.general.plugins.module_utils.puppet``, it is used as: + + .. code-block:: python + + @cmd_runner_fmt.unpack_args + def execute_func(execute, manifest): + if execute: + return ["--execute", execute] + else: + return [manifest] + + runner = CmdRunner( + module, + command=_prepare_base_cmd(), + path_prefix=_PUPPET_PATH_PREFIX, + arg_formats=dict( + # ... + _execute=cmd_runner_fmt.as_func(execute_func), + # ... + ), + ) + + Then, in :ansplugin:`community.general.puppet#module` it is put to use with: + + .. code-block:: python + + with runner(args_order) as ctx: + rc, stdout, stderr = ctx.run(_execute=[p['execute'], p['manifest']]) + +- ``cmd_runner_fmt.unpack_kwargs()`` + Conversely, this decorator unpacks the incoming ``value`` as a ``dict``-like object. + +- ``cmd_runner_fmt.stack()`` + This decorator assumes ``value`` is a sequence and concatenates the output + of the wrapped function applied to each element of the sequence. + + For example, in :ansplugin:`community.general.django_check#module`, the argument format for ``database`` + is defined as: + + .. code-block:: python + + arg_formats = dict( + # ... + database=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--database"), + # ... + ) + + When receiving a list ``["abc", "def"]``, the output is: + + .. code-block:: python + + ["--database", "abc", "--database", "def"] + + +Command Runner +^^^^^^^^^^^^^^ + +Settings that can be passed to the ``CmdRunner`` constructor are: + +- ``module: AnsibleModule`` + Module instance. Mandatory parameter. +- ``command: str | list[str]`` + Command to be executed. It can be a single string, the executable name, or a list + of strings containing the executable name as the first element and, optionally, fixed parameters. + Those parameters are used in all executions of the runner. + The *executable* pointed by this parameter (whether itself when ``str`` or its first element when ``list``) is + processed using ``AnsibleModule.get_bin_path()`` *unless* it is an absolute path or contains the character ``/``. +- ``arg_formats: dict`` + Mapping of argument names to formatting functions. +- ``default_args_order: str`` + As the name suggests, a default ordering for the arguments. When + this is passed, the context can be created without specifying ``args_order``. Defaults to ``()``. +- ``check_rc: bool`` + When ``True``, if the return code from the command is not zero, the module exits + with an error. Defaults to ``False``. +- ``path_prefix: list[str]`` + If the command being executed is installed in a non-standard directory path, + additional paths might be provided to search for the executable. Defaults to ``None``. +- ``environ_update: dict`` + Pass additional environment variables to be set during the command execution. + Defaults to ``None``. +- ``force_lang: str`` + It is usually important to force the locale to one specific value, so that responses are consistent and, therefore, parseable. + Please note that using this option (which is enabled by default) overwrites the environment variables ``LANGUAGE`` and ``LC_ALL``. + To disable this mechanism, set this parameter to ``None``. + In community.general 9.1.0 a special value ``auto`` was introduced for this parameter, with the effect + that ``CmdRunner`` then tries to determine the best parseable locale for the runtime. + It should become the default value in the future, but for the time being the default value is ``C``. + +When creating a context, the additional settings that can be passed to the call are: + +- ``args_order: str`` + Establishes the order in which the arguments are rendered in the command line. + This parameter is mandatory unless ``default_args_order`` was provided to the runner instance. +- ``output_process: func`` + Function to transform the output of the executable into different values or formats. + See examples in section below. +- ``check_mode_skip: bool`` + Whether to skip the actual execution of the command when the module is in check mode. + Defaults to ``False``. +- ``check_mode_return: any`` + If ``check_mode_skip=True``, then return this value instead. +- valid named arguments to ``AnsibleModule.run_command()`` + Other than ``args``, any valid argument to ``run_command()`` can be passed when setting up the run context. + For example, ``data`` can be used to send information to the command's standard input. + Or ``cwd`` can be used to run the command inside a specific working directory. + +Additionally, any other valid parameters for ``AnsibleModule.run_command()`` may be passed, but unexpected behavior +might occur if redefining options already present in the runner or its context creation. Use with caution. + + +Processing results +^^^^^^^^^^^^^^^^^^ + +As mentioned, ``CmdRunner`` uses ``AnsibleModule.run_command()`` to execute the external command, +and it passes the return value from that method back to caller. That means that, +by default, the result is going to be a tuple ``(rc, stdout, stderr)``. + +If you need to transform or process that output, you can pass a function to the context, +as the ``output_process`` parameter. It must be a function like: + +.. code-block:: python + + def process(rc, stdout, stderr): + # do some magic + return processed_value # whatever that is + +In that case, the return of ``run()`` is the ``processed_value`` returned by the function. + + +PythonRunner +^^^^^^^^^^^^ + +The ``PythonRunner`` class is a specialized version of ``CmdRunner``, geared towards the execution of +Python scripts. It features two extra and mutually exclusive parameters ``python`` and ``venv`` in its constructor: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.python_runner import PythonRunner + from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt + + runner = PythonRunner( + module, + command=["-m", "django"], + arg_formats=dict(...), + python="python", + venv="/path/to/some/venv", + ) + +The default value for ``python`` is the string ``python``, and the for ``venv`` it is ``None``. + +The command line produced by such a command with ``python="python3.12"`` is something like: + +.. code-block:: shell + + /usr/bin/python3.12 -m django ... + +And the command line for ``venv="/work/venv"`` is like: + +.. code-block:: shell + + /work/venv/bin/python -m django ... + +You may provide the value of the ``command`` argument as a string (in that case the string is used as a script name) +or as a list, in which case the elements of the list must be valid arguments for the Python interpreter, as in the example above. +See `Command line and environment `_ for more details. + +If the parameter ``python`` is an absolute path, or contains directory separators, such as ``/``, then it is used +as-is, otherwise the runtime ``PATH`` is searched for that command name. + +Other than that, everything else works as in ``CmdRunner``. + +.. versionadded:: 4.8.0 diff --git a/docs/docsite/rst/guide_deps.rst b/docs/docsite/rst/guide_deps.rst new file mode 100644 index 0000000000..1a44051ee4 --- /dev/null +++ b/docs/docsite/rst/guide_deps.rst @@ -0,0 +1,75 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_deps: + +``deps`` Guide +============== + + +Using ``deps`` +^^^^^^^^^^^^^^ + +The ``ansible_collections.community.general.plugins.module_utils.deps`` module util simplifies +the importing of code as described in :ref:`Importing and using shared code `. +Please notice that ``deps`` is meant to be used specifically with Ansible modules, and not other types of plugins. + +The same example from the Developer Guide would become: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils import deps + + + with deps.declare("foo"): + import foo + +Then in ``main()``, just after the argspec (or anywhere in the code, for that matter), do + +.. code-block:: python + + deps.validate(module) # assuming module is a valid AnsibleModule instance + +By default, ``deps`` will rely on ``ansible.module_utils.basic.missing_required_lib`` to generate +a message about a failing import. That function accepts parameters ``reason`` and ``url``, and +and so does ``deps```: + +.. code-block:: python + + with deps.declare("foo", reason="foo is needed to properly bar", url="https://foo.bar.io"): + import foo + +If you would rather write a custom message instead of using ``missing_required_lib`` then do: + +.. code-block:: python + + with deps.declare("foo", msg="Custom msg explaining why foo is needed"): + import foo + +``deps`` allows for multiple dependencies to be declared: + +.. code-block:: python + + with deps.declare("foo"): + import foo + + with deps.declare("bar"): + import bar + + with deps.declare("doe"): + import doe + +By default, ``deps.validate()`` will check on all the declared dependencies, but if so desired, +they can be validated selectively by doing: + +.. code-block:: python + + deps.validate(module, "foo") # only validates the "foo" dependency + + deps.validate(module, "doe:bar") # only validates the "doe" and "bar" dependencies + + deps.validate(module, "-doe:bar") # validates all dependencies except "doe" and "bar" + +.. versionadded:: 6.1.0 diff --git a/docs/docsite/rst/guide_iocage.rst b/docs/docsite/rst/guide_iocage.rst new file mode 100644 index 0000000000..67eb0e8a99 --- /dev/null +++ b/docs/docsite/rst/guide_iocage.rst @@ -0,0 +1,15 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage: + +************ +Iocage Guide +************ + +.. toctree:: + :maxdepth: 1 + + guide_iocage_inventory diff --git a/docs/docsite/rst/guide_iocage_inventory.rst b/docs/docsite/rst/guide_iocage_inventory.rst new file mode 100644 index 0000000000..4a410c35db --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory.rst @@ -0,0 +1,31 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory: + +community.general.iocage inventory plugin +========================================= + +The inventory plugin :ansplugin:`community.general.iocage#inventory` gets the inventory hosts from the iocage jail manager. + +See: + +* `iocage - A FreeBSD Jail Manager `_ +* `man iocage `_ +* `Jails and Containers `_ + +.. note:: + The output of the examples is YAML formatted. See the option :ansopt:`ansible.builtin.default#callback:result_format`. + +.. toctree:: + :caption: Table of Contents + :maxdepth: 1 + + guide_iocage_inventory_basics + guide_iocage_inventory_dhcp + guide_iocage_inventory_hooks + guide_iocage_inventory_properties + guide_iocage_inventory_tags + guide_iocage_inventory_aliases diff --git a/docs/docsite/rst/guide_iocage_inventory_aliases.rst b/docs/docsite/rst/guide_iocage_inventory_aliases.rst new file mode 100644 index 0000000000..431403d733 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_aliases.rst @@ -0,0 +1,200 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_aliases: + +Aliases +------- + +Quoting :ref:`inventory_aliases`: + + The ``inventory_hostname`` is the unique identifier for a host in Ansible, this can be an IP or a hostname, but also just an 'alias' or short name for the host. + +As root at the iocage host, stop and destroy all jails: + +.. code-block:: console + + shell> iocage stop ALL + * Stopping srv_1 + + Executing prestop OK + + Stopping services OK + + Tearing down VNET OK + + Removing devfs_ruleset: 1000 OK + + Removing jail process OK + + Executing poststop OK + * Stopping srv_2 + + Executing prestop OK + + Stopping services OK + + Tearing down VNET OK + + Removing devfs_ruleset: 1001 OK + + Removing jail process OK + + Executing poststop OK + * Stopping srv_3 + + Executing prestop OK + + Stopping services OK + + Tearing down VNET OK + + Removing devfs_ruleset: 1002 OK + + Removing jail process OK + + Executing poststop OK + ansible_client is not running! + + shell> iocage destroy -f srv_1 srv_2 srv_3 + Destroying srv_1 + Destroying srv_2 + Destroying srv_3 + +Create three VNET jails with a DHCP interface from the template *ansible_client*. Use the option ``--count``: + +.. code-block:: console + + shell> iocage create --short --template ansible_client --count 3 bpf=1 dhcp=1 vnet=1 + 1c11de2d successfully created! + 9d94cc9e successfully created! + 052b9557 successfully created! + +The names are random. Start the jails: + +.. code-block:: console + + shell> iocage start ALL + No default gateway found for ipv6. + * Starting 052b9557 + + Started OK + + Using devfs_ruleset: 1000 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.137/24 + No default gateway found for ipv6. + * Starting 1c11de2d + + Started OK + + Using devfs_ruleset: 1001 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.146/24 + No default gateway found for ipv6. + * Starting 9d94cc9e + + Started OK + + Using devfs_ruleset: 1002 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.115/24 + Please convert back to a jail before trying to start ansible_client + +List the jails: + +.. code-block:: console + + shell> iocage list -l + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+==========+======+=======+======+=================+====================+=====+================+==========+ + | 207 | 052b9557 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.137 | - | ansible_client | no | + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 208 | 1c11de2d | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.146 | - | ansible_client | no | + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 209 | 9d94cc9e | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.115 | - | ansible_client | no | + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +Set notes. The tag *alias* will be used to create inventory aliases: + +.. code-block:: console + + shell> iocage set notes="vmm=iocage_02 project=foo alias=srv_1" 052b9557 + notes: none -> vmm=iocage_02 project=foo alias=srv_1 + shell> iocage set notes="vmm=iocage_02 project=foo alias=srv_2" 1c11de2d + notes: none -> vmm=iocage_02 project=foo alias=srv_2 + shell> iocage set notes="vmm=iocage_02 project=bar alias=srv_3" 9d94cc9e + notes: none -> vmm=iocage_02 project=bar alias=srv_3 + +Update the inventory configuration. Set the option +:ansopt:`community.general.iocage#inventory:inventory_hostname_tag` to :ansval:`alias`. This tag keeps the +value of the alias. The option :ansopt:`community.general.iocage#inventory:get_properties` must be +enabled. For example, ``hosts/02_iocage.yml`` contains: + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + get_properties: true + inventory_hostname_tag: alias + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + iocage_tags: dict(iocage_properties.notes | split | map('split', '=')) + keyed_groups: + - prefix: vmm + key: iocage_tags.vmm + - prefix: project + key: iocage_tags.project + +Display tags and groups. Create a playbook ``pb-test-groups.yml`` with the following content: + +.. code-block:: yaml+jinja + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - debug: + var: iocage_tags + + - debug: + msg: | + {% for group in groups %} + {{ group }}: {{ groups[group] }} + {% endfor %} + run_once: true + +Run the playbook: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml pb-test-groups.yml + + PLAY [all] ********************************************************************************************************** + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + iocage_tags: + alias: srv_1 + project: foo + vmm: iocage_02 + ok: [srv_2] => + iocage_tags: + alias: srv_2 + project: foo + vmm: iocage_02 + ok: [srv_3] => + iocage_tags: + alias: srv_3 + project: bar + vmm: iocage_02 + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + msg: |- + all: ['srv_1', 'srv_2', 'srv_3'] + ungrouped: [] + vmm_iocage_02: ['srv_1', 'srv_2', 'srv_3'] + project_foo: ['srv_1', 'srv_2'] + project_bar: ['srv_3'] + + PLAY RECAP ********************************************************************************************************** + srv_1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 diff --git a/docs/docsite/rst/guide_iocage_inventory_basics.rst b/docs/docsite/rst/guide_iocage_inventory_basics.rst new file mode 100644 index 0000000000..f198edc4f4 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_basics.rst @@ -0,0 +1,128 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_basics: + +Basics +------ + +As root at the iocage host, create three VNET jails with a DHCP interface from the template +*ansible_client*: + +.. code-block:: console + + shell> iocage create --template ansible_client --name srv_1 bpf=1 dhcp=1 vnet=1 + srv_1 successfully created! + shell> iocage create --template ansible_client --name srv_2 bpf=1 dhcp=1 vnet=1 + srv_2 successfully created! + shell> iocage create --template ansible_client --name srv_3 bpf=1 dhcp=1 vnet=1 + srv_3 successfully created! + +See: `Configuring a VNET Jail `_. + +As admin at the controller, list the jails: + +.. code-block:: console + + shell> ssh admin@10.1.0.73 iocage list -l + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +======+=======+======+=======+======+=================+====================+=====+================+==========+ + | None | srv_1 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | None | srv_2 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | None | srv_3 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +Create the inventory file ``hosts/02_iocage.yml`` + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + +Display the inventory: + +.. code-block:: console + + shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml + all: + children: + ungrouped: + hosts: + srv_1: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (not running) + iocage_ip6: '-' + iocage_jid: None + iocage_release: 14.2-RELEASE-p3 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + srv_2: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (not running) + iocage_ip6: '-' + iocage_jid: None + iocage_release: 14.2-RELEASE-p3 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + srv_3: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (not running) + iocage_ip6: '-' + iocage_jid: None + iocage_release: 14.2-RELEASE-p3 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + +Optionally, create shared IP jails: + +.. code-block:: console + + shell> iocage create --template ansible_client --name srv_1 ip4_addr="em0|10.1.0.101/24" + srv_1 successfully created! + shell> iocage create --template ansible_client --name srv_2 ip4_addr="em0|10.1.0.102/24" + srv_2 successfully created! + shell> iocage create --template ansible_client --name srv_3 ip4_addr="em0|10.1.0.103/24" + srv_3 successfully created! + shell> iocage list -l + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +======+=======+======+=======+======+=================+===================+=====+================+==========+ + | None | srv_1 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.101/24 | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + | None | srv_2 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.102/24 | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + | None | srv_3 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.103/24 | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + +See: `Configuring a Shared IP Jail `_ + +If iocage needs environment variable(s), use the option :ansopt:`community.general.iocage#inventory:env`. For example, + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 diff --git a/docs/docsite/rst/guide_iocage_inventory_dhcp.rst b/docs/docsite/rst/guide_iocage_inventory_dhcp.rst new file mode 100644 index 0000000000..3c37366ca6 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_dhcp.rst @@ -0,0 +1,175 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_dhcp: + +DHCP +---- + +As root at the iocage host, start the jails: + +.. code-block:: console + + shell> iocage start ALL + No default gateway found for ipv6. + * Starting srv_1 + + Started OK + + Using devfs_ruleset: 1000 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.183/24 + No default gateway found for ipv6. + * Starting srv_2 + + Started OK + + Using devfs_ruleset: 1001 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.204/24 + No default gateway found for ipv6. + * Starting srv_3 + + Started OK + + Using devfs_ruleset: 1002 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.169/24 + Please convert back to a jail before trying to start ansible_client + +List the jails: + +.. code-block:: console + + shell> iocage list -l + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+=======+======+=======+======+=================+====================+=====+================+==========+ + | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.183 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.204 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.169 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +As admin at the controller, list the jails. The IP4 tab says "... address requires root": + +.. code-block:: console + + shell> ssh admin@10.1.0.73 iocage list -l + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+=======+======+=======+======+=================+=========================================+=====+================+==========+ + | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + +Use sudo if enabled: + +.. code-block:: console + + shell> ssh admin@10.1.0.73 sudo iocage list -l + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+=======+======+=======+======+=================+====================+=====+================+==========+ + | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.183 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.204 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.169 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +Create the inventory file ``hosts/02_iocage.yml``. Use the option +:ansopt:`community.general.iocage#inventory:sudo`: + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + sudo: true + +Display the inventory: + +.. code-block:: console + + shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml + all: + children: + ungrouped: + hosts: + srv_1: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: 10.1.0.183 + iocage_ip4_dict: + ip4: + - ifc: epair0b + ip: 10.1.0.183 + mask: '-' + msg: '' + iocage_ip6: '-' + iocage_jid: '204' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_2: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: 10.1.0.204 + iocage_ip4_dict: + ip4: + - ifc: epair0b + ip: 10.1.0.204 + mask: '-' + msg: '' + iocage_ip6: '-' + iocage_jid: '205' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_3: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: 10.1.0.169 + iocage_ip4_dict: + ip4: + - ifc: epair0b + ip: 10.1.0.169 + mask: '-' + msg: '' + iocage_ip6: '-' + iocage_jid: '206' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + +Note: If the option :ansopt:`community.general.iocage#inventory:env` is used and :ansopt:`community.general.iocage#inventory:sudo` is enabled, enable also :ansopt:`community.general.iocage#inventory:sudo_preserve_env`. For example, + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 + sudo: true + sudo_preserve_env: true + +In this case, make sure the sudo tag ``SETENV`` is used: + +.. code-block:: console + + shell> ssh admin@10.1.0.73 sudo cat /usr/local/etc/sudoers | grep admin + admin ALL=(ALL) NOPASSWD:SETENV: ALL diff --git a/docs/docsite/rst/guide_iocage_inventory_hooks.rst b/docs/docsite/rst/guide_iocage_inventory_hooks.rst new file mode 100644 index 0000000000..45364fc798 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_hooks.rst @@ -0,0 +1,187 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_hooks: + +Hooks +----- + +The iocage utility internally opens a console to a jail to get the jail's DHCP address. This +requires root. If you run the command ``iocage list -l`` as unprivileged user, you'll see the +message ``DHCP (running -- address requires root)``. If you are not granted the root privilege, use +``/etc/dhclient-exit-hooks``. For example, in the jail *srv_1*, create the file +``/zroot/iocage/jails/srv_1/root/etc/dhclient-exit-hooks`` + +.. code-block:: shell + + case "$reason" in + "BOUND"|"REBIND"|"REBOOT"|"RENEW") + echo $new_ip_address > /var/db/dhclient-hook.address.$interface + ;; + esac + +where ``/zroot/iocage`` is the activated pool. + +.. code-block:: console + + shell> zfs list | grep /zroot/iocage + zroot/iocage 4.69G 446G 5.08M /zroot/iocage + zroot/iocage/download 927M 446G 384K /zroot/iocage/download + zroot/iocage/download/14.1-RELEASE 465M 446G 465M /zroot/iocage/download/14.1-RELEASE + zroot/iocage/download/14.2-RELEASE 462M 446G 462M /zroot/iocage/download/14.2-RELEASE + zroot/iocage/images 384K 446G 384K /zroot/iocage/images + zroot/iocage/jails 189M 446G 480K /zroot/iocage/jails + zroot/iocage/jails/srv_1 62.9M 446G 464K /zroot/iocage/jails/srv_1 + zroot/iocage/jails/srv_1/root 62.4M 446G 3.53G /zroot/iocage/jails/srv_1/root + zroot/iocage/jails/srv_2 62.8M 446G 464K /zroot/iocage/jails/srv_2 + zroot/iocage/jails/srv_2/root 62.3M 446G 3.53G /zroot/iocage/jails/srv_2/root + zroot/iocage/jails/srv_3 62.8M 446G 464K /zroot/iocage/jails/srv_3 + zroot/iocage/jails/srv_3/root 62.3M 446G 3.53G /zroot/iocage/jails/srv_3/root + zroot/iocage/log 688K 446G 688K /zroot/iocage/log + zroot/iocage/releases 2.93G 446G 384K /zroot/iocage/releases + zroot/iocage/releases/14.2-RELEASE 2.93G 446G 384K /zroot/iocage/releases/14.2-RELEASE + zroot/iocage/releases/14.2-RELEASE/root 2.93G 446G 2.88G /zroot/iocage/releases/14.2-RELEASE/root + zroot/iocage/templates 682M 446G 416K /zroot/iocage/templates + zroot/iocage/templates/ansible_client 681M 446G 432K /zroot/iocage/templates/ansible_client + zroot/iocage/templates/ansible_client/root 681M 446G 3.53G /zroot/iocage/templates/ansible_client/root + +See: `man dhclient-script `_ + +Create the inventory configuration. Use the option :ansopt:`community.general.iocage#inventory:hooks_results` instead of :ansopt:`community.general.iocage#inventory:sudo`: + +.. code-block:: console + + shell> cat hosts/02_iocage.yml + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + hooks_results: + - /var/db/dhclient-hook.address.epair0b + +.. note:: + + The option :ansopt:`community.general.iocage#inventory:hooks_results` expects the poolname to be mounted to ``/poolname``. For example, if you + activate the pool iocage, this plugin expects to find the :ansopt:`community.general.iocage#inventory:hooks_results` items in the path + /iocage/iocage/jails//root. If you mount the poolname to a different path, the easiest + remedy is to create a symlink. + +As admin at the controller, display the inventory: + +.. code-block:: console + + shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml + all: + children: + ungrouped: + hosts: + srv_1: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_hooks: + - 10.1.0.183 + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (running -- address requires root) + iocage_ip6: '-' + iocage_jid: '204' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_2: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_hooks: + - 10.1.0.204 + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (running -- address requires root) + iocage_ip6: '-' + iocage_jid: '205' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_3: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_hooks: + - 10.1.0.169 + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (running -- address requires root) + iocage_ip6: '-' + iocage_jid: '206' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + +Compose the variable ``ansible_host``. For example, ``hosts/02_iocage.yml`` could look like: + +.. code-block:: yaml+jinja + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + +Test the jails. Create a playbook ``pb-test-uname.yml``: + +.. code-block:: yaml + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - command: uname -a + register: out + + - debug: + var: out.stdout + +See: :ref:`working_with_bsd` + +Run the playbook: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml pb-test-uname.yml + + PLAY [all] ********************************************************************************************************** + + TASK [command] ****************************************************************************************************** + changed: [srv_3] + changed: [srv_1] + changed: [srv_2] + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + out.stdout: FreeBSD srv-1 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 + ok: [srv_3] => + out.stdout: FreeBSD srv-3 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 + ok: [srv_2] => + out.stdout: FreeBSD srv-2 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 + + PLAY RECAP ********************************************************************************************************** + srv_1 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_2 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_3 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + +Note: This playbook and the inventory configuration works also for the *Shared IP Jails*. diff --git a/docs/docsite/rst/guide_iocage_inventory_properties.rst b/docs/docsite/rst/guide_iocage_inventory_properties.rst new file mode 100644 index 0000000000..d044f2e7f2 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_properties.rst @@ -0,0 +1,201 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_properties: + +Properties +---------- + +Optionally, in the inventory file ``hosts/02_iocage.yml``, get the iocage properties. Enable +:ansopt:`community.general.iocage#inventory:get_properties`: + +.. code-block:: yaml+jinja + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + get_properties: true + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + +Display the properties. Create the playbook ``pb-test-properties.yml``: + +.. code-block:: yaml + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - debug: + var: iocage_properties + +Run the playbook. Limit the inventory to *srv_3*: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml -l srv_3 pb-test-properties.yml + + PLAY [all] ********************************************************************************************************** + + TASK [debug] ******************************************************************************************************** + ok: [srv_3] => + iocage_properties: + CONFIG_VERSION: '33' + allow_chflags: '0' + allow_mlock: '0' + allow_mount: '1' + allow_mount_devfs: '0' + allow_mount_fdescfs: '0' + allow_mount_fusefs: '0' + allow_mount_linprocfs: '0' + allow_mount_linsysfs: '0' + allow_mount_nullfs: '0' + allow_mount_procfs: '0' + allow_mount_tmpfs: '0' + allow_mount_zfs: '0' + allow_nfsd: '0' + allow_quotas: '0' + allow_raw_sockets: '0' + allow_set_hostname: '1' + allow_socket_af: '0' + allow_sysvipc: '0' + allow_tun: '0' + allow_vmm: '0' + assign_localhost: '0' + available: readonly + basejail: '0' + boot: '0' + bpf: '1' + children_max: '0' + cloned_release: 14.2-RELEASE + comment: none + compression: 'on' + compressratio: readonly + coredumpsize: 'off' + count: '1' + cpuset: 'off' + cputime: 'off' + datasize: 'off' + dedup: 'off' + defaultrouter: auto + defaultrouter6: auto + depends: none + devfs_ruleset: '4' + dhcp: '1' + enforce_statfs: '2' + exec_clean: '1' + exec_created: /usr/bin/true + exec_fib: '0' + exec_jail_user: root + exec_poststart: /usr/bin/true + exec_poststop: /usr/bin/true + exec_prestart: /usr/bin/true + exec_prestop: /usr/bin/true + exec_start: /bin/sh /etc/rc + exec_stop: /bin/sh /etc/rc.shutdown + exec_system_jail_user: '0' + exec_system_user: root + exec_timeout: '60' + host_domainname: none + host_hostname: srv-3 + host_hostuuid: srv_3 + host_time: '1' + hostid: ea2ba7d1-4fcd-f13f-82e4-8b32c0a03403 + hostid_strict_check: '0' + interfaces: vnet0:bridge0 + ip4: new + ip4_addr: none + ip4_saddrsel: '1' + ip6: new + ip6_addr: none + ip6_saddrsel: '1' + ip_hostname: '0' + jail_zfs: '0' + jail_zfs_dataset: iocage/jails/srv_3/data + jail_zfs_mountpoint: none + last_started: '2025-06-11 04:29:23' + localhost_ip: none + login_flags: -f root + mac_prefix: 02a098 + maxproc: 'off' + memorylocked: 'off' + memoryuse: 'off' + min_dyn_devfs_ruleset: '1000' + mount_devfs: '1' + mount_fdescfs: '1' + mount_linprocfs: '0' + mount_procfs: '0' + mountpoint: readonly + msgqqueued: 'off' + msgqsize: 'off' + nat: '0' + nat_backend: ipfw + nat_forwards: none + nat_interface: none + nat_prefix: '172.16' + nmsgq: 'off' + notes: none + nsem: 'off' + nsemop: 'off' + nshm: 'off' + nthr: 'off' + openfiles: 'off' + origin: readonly + owner: root + pcpu: 'off' + plugin_name: none + plugin_repository: none + priority: '99' + pseudoterminals: 'off' + quota: none + readbps: 'off' + readiops: 'off' + release: 14.2-RELEASE-p3 + reservation: none + resolver: /etc/resolv.conf + rlimits: 'off' + rtsold: '0' + securelevel: '2' + shmsize: 'off' + source_template: ansible_client + stacksize: 'off' + state: up + stop_timeout: '30' + swapuse: 'off' + sync_state: none + sync_target: none + sync_tgt_zpool: none + sysvmsg: new + sysvsem: new + sysvshm: new + template: '0' + type: jail + used: readonly + vmemoryuse: 'off' + vnet: '1' + vnet0_mac: 02a0983da05d 02a0983da05e + vnet0_mtu: auto + vnet1_mac: none + vnet1_mtu: auto + vnet2_mac: none + vnet2_mtu: auto + vnet3_mac: none + vnet3_mtu: auto + vnet_default_interface: auto + vnet_default_mtu: '1500' + vnet_interfaces: none + wallclock: 'off' + writebps: 'off' + writeiops: 'off' + + PLAY RECAP ********************************************************************************************************** + srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 diff --git a/docs/docsite/rst/guide_iocage_inventory_tags.rst b/docs/docsite/rst/guide_iocage_inventory_tags.rst new file mode 100644 index 0000000000..8adf641073 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_tags.rst @@ -0,0 +1,117 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_tags: + +Tags +---- + +Quoting `man iocage `_ + +.. code-block:: text + + PROPERTIES + ... + notes="any string" + Custom notes for miscellaneous tagging. + Default: none + Source: local + +We will use the format ``notes="tag1=value1 tag2=value2 ..."``. + +.. note:: + + The iocage tags have nothing to do with the :ref:`tags`. + +As root at the iocage host, set notes. For example, + +.. code-block:: console + + shell> iocage set notes="vmm=iocage_02 project=foo" srv_1 + notes: none -> vmm=iocage_02 project=foo + shell> iocage set notes="vmm=iocage_02 project=foo" srv_2 + notes: none -> vmm=iocage_02 project=foo + shell> iocage set notes="vmm=iocage_02 project=bar" srv_3 + notes: none -> vmm=iocage_02 project=bar + +Update the inventory configuration. Compose a dictionary *iocage_tags* and create groups. The option +:ansopt:`community.general.iocage#inventory:get_properties` must be enabled. +For example, ``hosts/02_iocage.yml`` could look like: + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + get_properties: true + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + iocage_tags: dict(iocage_properties.notes | split | map('split', '=')) + keyed_groups: + - prefix: vmm + key: iocage_tags.vmm + - prefix: project + key: iocage_tags.project + +Display tags and groups. Create a playbook ``pb-test-groups.yml``: + +.. code-block:: yaml+jinja + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - debug: + var: iocage_tags + + - debug: + msg: | + {% for group in groups %} + {{ group }}: {{ groups[group] }} + {% endfor %} + run_once: true + +Run the playbook: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml pb-test-groups.yml + + PLAY [all] ********************************************************************************************************** + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + iocage_tags: + project: foo + vmm: iocage_02 + ok: [srv_2] => + iocage_tags: + project: foo + vmm: iocage_02 + ok: [srv_3] => + iocage_tags: + project: bar + vmm: iocage_02 + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + msg: |- + all: ['srv_1', 'srv_2', 'srv_3'] + ungrouped: [] + vmm_iocage_02: ['srv_1', 'srv_2', 'srv_3'] + project_foo: ['srv_1', 'srv_2'] + project_bar: ['srv_3'] + + PLAY RECAP ********************************************************************************************************** + srv_1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 diff --git a/docs/docsite/rst/guide_modulehelper.rst b/docs/docsite/rst/guide_modulehelper.rst new file mode 100644 index 0000000000..711cdc7f99 --- /dev/null +++ b/docs/docsite/rst/guide_modulehelper.rst @@ -0,0 +1,559 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_modulehelper: + +Module Helper guide +=================== + + +Introduction +^^^^^^^^^^^^ + +Writing a module for Ansible is largely described in existing documentation. +However, a good part of that is boilerplate code that needs to be repeated every single time. +That is where ``ModuleHelper`` comes to assistance: a lot of that boilerplate code is done. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.quickstart: + +Quickstart +"""""""""" + +See the `example from Ansible documentation `_ +written with ``ModuleHelper``. +But bear in mind that it does not showcase all of MH's features: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + + class MyTest(ModuleHelper): + module = dict( + argument_spec=dict( + name=dict(type='str', required=True), + new=dict(type='bool', required=False, default=False), + ), + supports_check_mode=True, + ) + + def __run__(self): + self.vars.original_message = '' + self.vars.message = '' + if self.check_mode: + return + self.vars.original_message = self.vars.name + self.vars.message = 'goodbye' + self.changed = self.vars['new'] + if self.vars.name == "fail me": + self.do_raise("You requested this to fail") + + + def main(): + MyTest.execute() + + + if __name__ == '__main__': + main() + + +Module Helper +^^^^^^^^^^^^^ + +Introduction +"""""""""""" + +``ModuleHelper`` is a wrapper around the standard ``AnsibleModule``, providing extra features and conveniences. +The basic structure of a module using ``ModuleHelper`` is as shown in the +:ref:`ansible_collections.community.general.docsite.guide_modulehelper.quickstart` +section above, but there are more elements that will take part in it. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + + class MyTest(ModuleHelper): + # behavior for module paramaters ONLY, see below for further information + output_params = () + change_params = () + diff_params = () + facts_params = () + + facts_name = None # used if generating facts, from parameters or otherwise + + module = dict( + argument_spec=dict(...), + # ... + ) + +After importing the ``ModuleHelper`` class, you need to declare your own class extending it. + +.. seealso:: + + There is a variation called ``StateModuleHelper``, which builds on top of the features provided by MH. + See :ref:`ansible_collections.community.general.docsite.guide_modulehelper.statemh` below for more details. + +The easiest way of specifying the module is to create the class variable ``module`` with a dictionary +containing the exact arguments that would be passed as parameters to ``AnsibleModule``. +If you prefer to create the ``AnsibleModule`` object yourself, just assign it to the ``module`` class variable. +MH also accepts a parameter ``module`` in its constructor, if that parameter is used used, +then it will override the class variable. The parameter can either be ``dict`` or ``AnsibleModule`` as well. + +Beyond the definition of the module, there are other variables that can be used to control aspects +of MH's behavior. These variables should be set at the very beginning of the class, and their semantics are +explained through this document. + +The main logic of MH happens in the ``ModuleHelper.run()`` method, which looks like: + +.. code-block:: python + + @module_fails_on_exception + def run(self): + self.__init_module__() + self.__run__() + self.__quit_module__() + output = self.output + if 'failed' not in output: + output['failed'] = False + self.module.exit_json(changed=self.has_changed(), **output) + +The method ``ModuleHelper.__run__()`` must be implemented by the module and most +modules will be able to perform their actions implementing only that MH method. +However, in some cases, you might want to execute actions before or after the main tasks, in which cases +you should implement ``ModuleHelper.__init_module__()`` and ``ModuleHelper.__quit_module__()`` respectively. + +Note that the output comes from ``self.output``, which is a ``@property`` method. +By default, that property will collect all the variables that are marked for output and return them in a dictionary with their values. +Moreover, the default ``self.output`` will also handle Ansible ``facts`` and *diff mode*. +Also note the changed status comes from ``self.has_changed()``, which is usually calculated from variables that are marked +to track changes in their content. + +.. seealso:: + + More details in sections + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.paramvaroutput` and + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.changes` below. + +.. seealso:: + + See more about the decorator + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.modulefailsdeco` below. + + +Another way to write the example from the +:ref:`ansible_collections.community.general.docsite.guide_modulehelper.quickstart` +would be: + +.. code-block:: python + + def __init_module__(self): + self.vars.original_message = '' + self.vars.message = '' + + def __run__(self): + if self.check_mode: + return + self.vars.original_message = self.vars.name + self.vars.message = 'goodbye' + self.changed = self.vars['new'] + + def __quit_module__(self): + if self.vars.name == "fail me": + self.do_raise("You requested this to fail") + +Notice that there are no calls to ``module.exit_json()`` nor ``module.fail_json()``: if the module fails, raise an exception. +You can use the convenience method ``self.do_raise()`` or raise the exception as usual in Python to do that. +If no exception is raised, then the module succeeds. + +.. seealso:: + + See more about exceptions in section + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.exceptions` below. + +Ansible modules must have a ``main()`` function and the usual test for ``'__main__'``. When using MH that should look like: + +.. code-block:: python + + def main(): + MyTest.execute() + + + if __name__ == '__main__': + main() + +The class method ``execute()`` is nothing more than a convenience shorcut for: + +.. code-block:: python + + m = MyTest() + m.run() + +Optionally, an ``AnsibleModule`` may be passed as parameter to ``execute()``. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.paramvaroutput: + +Parameters, variables, and output +""""""""""""""""""""""""""""""""" + +All the parameters automatically become variables in the ``self.vars`` attribute, which is of the ``VarDict`` type. +By using ``self.vars``, you get a central mechanism to access the parameters but also to expose variables as return values of the module. +As described in :ref:`ansible_collections.community.general.docsite.guide_vardict`, variables in ``VarDict`` have metadata associated to them. +One of the attributes in that metadata marks the variable for output, and MH makes use of that to generate the module's return values. + +.. note:: + + The ``VarDict`` class was introduced in community.general 7.1.0, as part of ``ModuleHelper`` itself. + However, it has been factored out to become an utility on its own, described in :ref:`ansible_collections.community.general.docsite.guide_vardict`, + and the older implementation was removed in community.general 11.0.0. + + Some code might still refer to the class variables ``use_old_vardict`` and ``mute_vardict_deprecation``, used for the transtition to the new + implementation but from community.general 11.0.0 onwards they are no longer used and can be safely removed from the code. + +Contrary to new variables created in ``VarDict``, module parameters are not set for output by default. +If you want to include some module parameters in the output, list them in the ``output_params`` class variable. + +.. code-block:: python + + class MyTest(ModuleHelper): + output_params = ('state', 'name') + ... + +.. important:: + + The variable names listed in ``output_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + +Another neat feature provided by MH by using ``VarDict`` is the automatic tracking of changes when setting the metadata ``change=True``. +Again, to enable this feature for module parameters, you must list them in the ``change_params`` class variable. + +.. code-block:: python + + class MyTest(ModuleHelper): + # example from community.general.xfconf + change_params = ('value', ) + ... + +.. important:: + + The variable names listed in ``change_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + +.. seealso:: + + See more about this in + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.changes` below. + +Similarly, if you want to use Ansible's diff mode, you can set the metadata ``diff=True`` and ``diff_params`` for module parameters. +With that, MH will automatically generate the diff output for variables that have changed. + +.. code-block:: python + + class MyTest(ModuleHelper): + diff_params = ('value', ) + + def __run__(self): + # example from community.general.gio_mime + self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True) + +.. important:: + + The variable names listed in ``diff_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + +Moreover, if a module is set to return *facts* instead of return values, then again use the metadata ``fact=True`` and ``fact_params`` for module parameters. +Additionally, you must specify ``facts_name``, as in: + +.. code-block:: python + + class VolumeFacts(ModuleHelper): + facts_name = 'volume_facts' + + def __init_module__(self): + self.vars.set("volume", 123, fact=True) + +That generates an Ansible fact like: + +.. code-block:: yaml+jinja + + - name: Obtain volume facts + some.collection.volume_facts: + # parameters + + - name: Print volume facts + debug: + msg: Volume fact is {{ ansible_facts.volume_facts.volume }} + +.. important:: + + The variable names listed in ``fact_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + +.. important:: + + If ``facts_name`` is not set, the module does not generate any facts. + + +.. _ansible_collections.community.general.docsite.guide_modulehelper.changes: + +Handling changes +"""""""""""""""" + +In MH there are many ways to indicate change in the module execution. Here they are: + +Tracking changes in variables +----------------------------- + +As explained above, you can enable change tracking in any number of variables in ``self.vars``. +By the end of the module execution, if any of those variables has a value different then the first value assigned to them, +then that will be picked up by MH and signalled as changed at the module output. +See the example below to learn how you can enabled change tracking in variables: + +.. code-block:: python + + # using __init_module__() as example, it works the same in __run__() and __quit_module__() + def __init_module__(self): + # example from community.general.ansible_galaxy_install + self.vars.set("new_roles", {}, change=True) + + # example of "hidden" variable used only to track change in a value from community.general.gconftool2 + self.vars.set('_value', self.vars.previous_value, output=False, change=True) + + # enable change-tracking without assigning value + self.vars.set_meta("new_roles", change=True) + + # if you must forcibly set an initial value to the variable + self.vars.set_meta("new_roles", initial_value=[]) + ... + +If the end value of any variable marked ``change`` is different from its initial value, then MH will return ``changed=True``. + +Indicating changes with ``changed`` +----------------------------------- + +If you want to indicate change directly in the code, then use the ``self.changed`` property in MH. +Beware that this is a ``@property`` method in MH, with both a *getter* and a *setter*. +By default, that hidden field is set to ``False``. + +Effective change +---------------- + +The effective outcome for the module is determined in the ``self.has_changed()`` method, and it consists of the logical *OR* operation +between ``self.changed`` and the change calculated from ``self.vars``. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.exceptions: + +Exceptions +"""""""""" + +In MH, instead of calling ``module.fail_json()`` you can just raise an exception. +The output variables are collected the same way they would be for a successful execution. +However, you can set output variables specifically for that exception, if you so choose. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelperException + + def __init_module__(self): + if not complex_validation(): + self.do_raise("Validation failed!") + + # Or passing output variables + awesomeness = calculate_awesomeness() + if awesomeness > 1000: + self.do_raise("Over awesome, I cannot handle it!", update_output={"awesomeness": awesomeness}) + # which is just a convenience shortcut for + raise ModuleHelperException("...", update_output={...}) + +All exceptions derived from ``Exception`` are captured and translated into a ``fail_json()`` call. +However, if you do want to call ``self.module.fail_json()`` yourself it will work, +just keep in mind that there will be no automatic handling of output variables in that case. + +Behind the curtains, all ``do_raise()`` does is to raise a ``ModuleHelperException``. +If you want to create specialized error handling for your code, the best way is to extend that clas and raise it when needed. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.statemh: + +StateModuleHelper +^^^^^^^^^^^^^^^^^ + +Many modules use a parameter ``state`` that effectively controls the exact action performed by the module, such as +``state=present`` or ``state=absent`` for installing or removing packages. +By using ``StateModuleHelper`` you can make your code like the excerpt from the ``gconftool2`` below: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper + + class GConftool(StateModuleHelper): + ... + module = dict( + ... + ) + + def __init_module__(self): + self.runner = gconftool2_runner(self.module, check_rc=True) + ... + + self.vars.set('previous_value', self._get(), fact=True) + self.vars.set('value_type', self.vars.value_type) + self.vars.set('_value', self.vars.previous_value, output=False, change=True) + self.vars.set_meta('value', initial_value=self.vars.previous_value) + self.vars.set('playbook_value', self.vars.value, fact=True) + + ... + + def state_absent(self): + with self.runner("state key", output_process=self._make_process(False)) as ctx: + ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set('new_value', None, fact=True) + self.vars._value = None + + def state_present(self): + with self.runner("direct config_source value_type state key value", output_process=self._make_process(True)) as ctx: + ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set('new_value', self._get(), fact=True) + self.vars._value = self.vars.new_value + +Note that the method ``__run__()`` is implemented in ``StateModuleHelper``, all you need to implement are the methods ``state_``. +In the example above, :ansplugin:`community.general.gconftool2#module` only has two states, ``present`` and ``absent``, thus, ``state_present()`` and ``state_absent()``. + +If the controlling parameter is not called ``state``, like in :ansplugin:`community.general.jira#module` module, just let SMH know about it: + +.. code-block:: python + + class JIRA(StateModuleHelper): + state_param = 'operation' + + def operation_create(self): + ... + + def operation_search(self): + ... + +Lastly, if the module is called with ``state=somevalue`` and the method ``state_somevalue`` +is not implemented, SMH will resort to call a method called ``__state_fallback__()``. +By default, this method will raise a ``ValueError`` indicating the method was not found. +Naturally, you can override that method to write a default implementation, as in :ansplugin:`community.general.locale_gen#module`: + +.. code-block:: python + + def __state_fallback__(self): + if self.vars.state_tracking == self.vars.state: + return + if self.vars.ubuntu_mode: + self.apply_change_ubuntu(self.vars.state, self.vars.name) + else: + self.apply_change(self.vars.state, self.vars.name) + +That module has only the states ``present`` and ``absent`` and the code for both is the one in the fallback method. + +.. note:: + + The name of the fallback method **does not change** if you set a different value of ``state_param``. + + +Other Conveniences +^^^^^^^^^^^^^^^^^^ + +Delegations to AnsibleModule +"""""""""""""""""""""""""""" + +The MH properties and methods below are delegated as-is to the underlying ``AnsibleModule`` instance in ``self.module``: + +- ``check_mode`` +- ``get_bin_path()`` +- ``warn()`` +- ``deprecate()`` + +Additionally, MH will also delegate: + +- ``diff_mode`` to ``self.module._diff`` +- ``verbosity`` to ``self.module._verbosity`` + +Starting in community.general 10.3.0, MH will also delegate the method ``debug`` to ``self.module``. +If any existing module already has a ``debug`` attribute defined, a warning message will be generated, +requesting it to be renamed. Upon the release of community.general 12.0.0, the delegation will be +preemptive and will override any existing method or property in the subclasses. + +Decorators +"""""""""" + +The following decorators should only be used within ``ModuleHelper`` class. + +@cause_changes +-------------- + +This decorator will control whether the outcome of the method will cause the module to signal change in its output. +If the method completes without raising an exception it is considered to have succeeded, otherwise, it will have failed. + +The decorator has a parameter ``when`` that accepts three different values: ``success``, ``failure``, and ``always``. +There are also two legacy parameters, ``on_success`` and ``on_failure``, that will be deprecated, so do not use them. +The value of ``changed`` in the module output will be set to ``True``: + +- ``when="success"`` and the method completes without raising an exception. +- ``when="failure"`` and the method raises an exception. +- ``when="always"``, regardless of the method raising an exception or not. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import cause_changes + + # adapted excerpt from the community.general.jira module + class JIRA(StateModuleHelper): + @cause_changes(when="success") + def operation_create(self): + ... + +If ``when`` has a different value or no parameters are specificied, the decorator will have no effect whatsoever. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.modulefailsdeco: + +@module_fails_on_exception +-------------------------- + +In a method using this decorator, if an exception is raised, the text message of that exception will be captured +by the decorator and used to call ``self.module.fail_json()``. +In most of the cases there will be no need to use this decorator, because ``ModuleHelper.run()`` already uses it. + +@check_mode_skip +---------------- + +If the module is running in check mode, this decorator will prevent the method from executing. +The return value in that case is ``None``. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import check_mode_skip + + # adapted excerpt from the community.general.locale_gen module + class LocaleGen(StateModuleHelper): + @check_mode_skip + def __state_fallback__(self): + ... + + +@check_mode_skip_returns +------------------------ + +This decorator is similar to the previous one, but the developer can control the return value for the method when running in check mode. +It is used with one of two parameters. One is ``callable`` and the return value in check mode will be ``callable(self, *args, **kwargs)``, +where ``self`` is the ``ModuleHelper`` instance and the union of ``args`` and ``kwargs`` will contain all the parameters passed to the method. + +The other option is to use the parameter ``value``, in which case the method will return ``value`` when in check mode. + + +References +^^^^^^^^^^ + +- `Ansible Developer Guide `_ +- `Creating a module `_ +- `Returning ansible facts `_ +- :ref:`ansible_collections.community.general.docsite.guide_vardict` + + +.. versionadded:: 3.1.0 diff --git a/docs/docsite/rst/guide_online.rst b/docs/docsite/rst/guide_online.rst new file mode 100644 index 0000000000..c233b403e8 --- /dev/null +++ b/docs/docsite/rst/guide_online.rst @@ -0,0 +1,49 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_online: + +**************** +Online.net Guide +**************** + +Introduction +============ + +Online is a French hosting company mainly known for providing bare-metal servers named Dedibox. +Check it out: `https://www.online.net/en `_ + +Dynamic inventory for Online resources +-------------------------------------- + +Ansible has a dynamic inventory plugin that can list your resources. + +1. Create a YAML configuration such as ``online_inventory.yml`` with this content: + + .. code-block:: yaml + + plugin: community.general.online + +2. Set your ``ONLINE_TOKEN`` environment variable with your token. + + You need to open an account and log into it before you can get a token. + You can find your token at the following page: `https://console.online.net/en/api/access `_ + +3. You can test that your inventory is working by running: + + .. code-block:: console + + $ ansible-inventory -v -i online_inventory.yml --list + + +4. Now you can run your playbook or any other module with this inventory: + + .. code-block:: ansible-output + + $ ansible all -i online_inventory.yml -m ping + sd-96735 | SUCCESS => { + "changed": false, + "ping": "pong" + } diff --git a/docs/docsite/rst/guide_packet.rst b/docs/docsite/rst/guide_packet.rst new file mode 100644 index 0000000000..95b38dddd0 --- /dev/null +++ b/docs/docsite/rst/guide_packet.rst @@ -0,0 +1,214 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_packet: + +********************************** +Packet.net Guide +********************************** + +Introduction +============ + +`Packet.net `_ is a bare metal infrastructure host that is supported by the community.general collection through six cloud modules. The six modules are: + +- :ansplugin:`community.general.packet_device#module`: manages servers on Packet. You can use this module to create, restart and delete devices. +- :ansplugin:`community.general.packet_ip_subnet#module`: assign IP subnet to a bare metal server +- :ansplugin:`community.general.packet_project#module`: create/delete a project in Packet host +- :ansplugin:`community.general.packet_sshkey#module`: adds a public SSH key from file or value to the Packet infrastructure. Every subsequently-created device will have this public key installed in .ssh/authorized_keys. +- :ansplugin:`community.general.packet_volume#module`: create/delete a volume in Packet host +- :ansplugin:`community.general.packet_volume_attachment#module`: attach/detach a volume to a device in the Packet host + +Note, this guide assumes you are familiar with Ansible and how it works. If you are not, have a look at their :ref:`docs ` before getting started. + +Requirements +============ + +The Packet modules connect to the Packet API using the `packet-python package `_. You can install it with pip: + +.. code-block:: console + + $ pip install packet-python + +In order to check the state of devices created by Ansible on Packet, it is a good idea to install one of the `Packet CLI clients `_. Otherwise you can check them through the `Packet portal `_. + +To use the modules you will need a Packet API token. You can generate an API token through the Packet portal `here `__. The simplest way to authenticate yourself is to set the Packet API token in an environment variable: + +.. code-block:: console + + $ export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs + +If you are not comfortable exporting your API token, you can pass it as a parameter to the modules. + +On Packet, devices and reserved IP addresses belong to `projects `_. In order to use the packet_device module, you need to specify the UUID of the project in which you want to create or manage devices. You can find a project's UUID in the Packet portal `here `_ (it is just under the project table) or through one of the available `CLIs `_. + + +If you want to use a new SSH key pair in this tutorial, you can generate it to ``./id_rsa`` and ``./id_rsa.pub`` as: + +.. code-block:: console + + $ ssh-keygen -t rsa -f ./id_rsa + +If you want to use an existing key pair, just copy the private and public key over to the playbook directory. + + +Device Creation +=============== + +The following code block is a simple playbook that creates one `Type 0 `_ server (the ``plan`` parameter). You have to supply ``plan`` and ``operating_system``. ``location`` defaults to ``ewr1`` (Parsippany, NJ). You can find all the possible values for the parameters through a `CLI client `_. + +.. code-block:: yaml+jinja + + # playbook_create.yml + + - name: Create Ubuntu device + hosts: localhost + tasks: + + - community.general.packet_sshkey: + key_file: ./id_rsa.pub + label: tutorial key + + - community.general.packet_device: + project_id: + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + +After running ``ansible-playbook playbook_create.yml``, you should have a server provisioned on Packet. You can verify through a CLI or in the `Packet portal `__. + +If you get an error with the message "failed to set machine state present, error: Error 404: Not Found", please verify your project UUID. + + +Updating Devices +================ + +The two parameters used to uniquely identify Packet devices are: "device_ids" and "hostnames". Both parameters accept either a single string (later converted to a one-element list), or a list of strings. + +The ``device_ids`` and ``hostnames`` parameters are mutually exclusive. The following values are all acceptable: + +- device_ids: ``a27b7a83-fc93-435b-a128-47a5b04f2dcf`` + +- hostnames: ``mydev1`` + +- device_ids: ``[a27b7a83-fc93-435b-a128-47a5b04f2dcf, 4887130f-0ccd-49a0-99b0-323c1ceb527b]`` + +- hostnames: ``[mydev1, mydev2]`` + +In addition, hostnames can contain a special ``%d`` formatter along with a ``count`` parameter that lets you easily expand hostnames that follow a simple name and number pattern; in other words, ``hostnames: "mydev%d", count: 2`` will expand to [mydev1, mydev2]. + +If your playbook acts on existing Packet devices, you can only pass the ``hostname`` and ``device_ids`` parameters. The following playbook shows how you can reboot a specific Packet device by setting the ``hostname`` parameter: + +.. code-block:: yaml+jinja + + # playbook_reboot.yml + + - name: reboot myserver + hosts: localhost + tasks: + + - community.general.packet_device: + project_id: + hostnames: myserver + state: rebooted + +You can also identify specific Packet devices with the ``device_ids`` parameter. The device's UUID can be found in the `Packet Portal `_ or by using a `CLI `_. The following playbook removes a Packet device using the ``device_ids`` field: + +.. code-block:: yaml+jinja + + # playbook_remove.yml + + - name: remove a device + hosts: localhost + tasks: + + - community.general.packet_device: + project_id: + device_ids: + state: absent + + +More Complex Playbooks +====================== + +In this example, we will create a CoreOS cluster with `user data `_. + + +The CoreOS cluster will use `etcd `_ for discovery of other servers in the cluster. Before provisioning your servers, you will need to generate a discovery token for your cluster: + +.. code-block:: console + + $ curl -w "\n" 'https://discovery.etcd.io/new?size=3' + +The following playbook will create an SSH key, 3 Packet servers, and then wait until SSH is ready (or until 5 minutes passed). Make sure to substitute the discovery token URL in ``user_data``, and the ``project_id`` before running ``ansible-playbook``. Also, feel free to change ``plan`` and ``facility``. + +.. code-block:: yaml+jinja + + # playbook_coreos.yml + + - name: Start 3 CoreOS nodes in Packet and wait until SSH is ready + hosts: localhost + tasks: + + - community.general.packet_sshkey: + key_file: ./id_rsa.pub + label: new + + - community.general.packet_device: + hostnames: [coreos-one, coreos-two, coreos-three] + operating_system: coreos_beta + plan: baremetal_0 + facility: ewr1 + project_id: + wait_for_public_IPv: 4 + user_data: | + # cloud-config + coreos: + etcd2: + discovery: https://discovery.etcd.io/ + advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001 + initial-advertise-peer-urls: http://$private_ipv4:2380 + listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 + listen-peer-urls: http://$private_ipv4:2380 + fleet: + public-ip: $private_ipv4 + units: + - name: etcd2.service + command: start + - name: fleet.service + command: start + register: newhosts + + - name: wait for ssh + ansible.builtin.wait_for: + delay: 1 + host: "{{ item.public_ipv4 }}" + port: 22 + state: started + timeout: 500 + loop: "{{ newhosts.results[0].devices }}" + + +As with most Ansible modules, the default states of the Packet modules are idempotent, meaning the resources in your project will remain the same after re-runs of a playbook. Thus, we can keep the ``packet_sshkey`` module call in our playbook. If the public key is already in your Packet account, the call will have no effect. + +The second module call provisions 3 Packet Type 0 (specified using the ``plan`` parameter) servers in the project identified by the ``project_id`` parameter. The servers are all provisioned with CoreOS beta (the ``operating_system`` parameter) and are customized with cloud-config user data passed to the ``user_data`` parameter. + +The ``packet_device`` module has a ``wait_for_public_IPv`` that is used to specify the version of the IP address to wait for (valid values are ``4`` or ``6`` for IPv4 or IPv6). If specified, Ansible will wait until the GET API call for a device contains an Internet-routeable IP address of the specified version. When referring to an IP address of a created device in subsequent module calls, it is wise to use the ``wait_for_public_IPv`` parameter, or ``state: active`` in the packet_device module call. + +Run the playbook: + +.. code-block:: console + + $ ansible-playbook playbook_coreos.yml + +Once the playbook quits, your new devices should be reachable through SSH. Try to connect to one and check if etcd has started properly: + +.. code-block:: console + + tomk@work $ ssh -i id_rsa core@$one_of_the_servers_ip + core@coreos-one ~ $ etcdctl cluster-health + +If you have any questions or comments let us know! help@packet.net diff --git a/docs/docsite/rst/guide_scaleway.rst b/docs/docsite/rst/guide_scaleway.rst new file mode 100644 index 0000000000..f3b7b24e0e --- /dev/null +++ b/docs/docsite/rst/guide_scaleway.rst @@ -0,0 +1,320 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_scaleway: + +************** +Scaleway Guide +************** + +Introduction +============ + +`Scaleway `_ is a cloud provider supported by the community.general collection through a set of plugins and modules. +Those modules are: + +- :ansplugin:`community.general.scaleway_compute#module`: manages servers on Scaleway. You can use this module to create, restart and delete servers. +- :ansplugin:`community.general.scaleway_compute_private_network#module` +- :ansplugin:`community.general.scaleway_container#module` +- :ansplugin:`community.general.scaleway_container_info#module` +- :ansplugin:`community.general.scaleway_container_namespace_info#module` +- :ansplugin:`community.general.scaleway_container_namespace#module` +- :ansplugin:`community.general.scaleway_container_registry_info#module` +- :ansplugin:`community.general.scaleway_container_registry#module` +- :ansplugin:`community.general.scaleway_database_backup#module` +- :ansplugin:`community.general.scaleway_function#module` +- :ansplugin:`community.general.scaleway_function_info#module` +- :ansplugin:`community.general.scaleway_function_namespace_info#module` +- :ansplugin:`community.general.scaleway_function_namespace#module` +- :ansplugin:`community.general.scaleway_image_info#module` +- :ansplugin:`community.general.scaleway_ip#module` +- :ansplugin:`community.general.scaleway_ip_info#module` +- :ansplugin:`community.general.scaleway_lb#module` +- :ansplugin:`community.general.scaleway_organization_info#module` +- :ansplugin:`community.general.scaleway_private_network#module` +- :ansplugin:`community.general.scaleway_security_group#module` +- :ansplugin:`community.general.scaleway_security_group_info#module` +- :ansplugin:`community.general.scaleway_security_group_rule#module` +- :ansplugin:`community.general.scaleway_server_info#module` +- :ansplugin:`community.general.scaleway_snapshot_info#module` +- :ansplugin:`community.general.scaleway_sshkey#module`: adds a public SSH key from a file or value to the Packet infrastructure. Every subsequently-created device will have this public key installed in .ssh/authorized_keys. +- :ansplugin:`community.general.scaleway_user_data#module` +- :ansplugin:`community.general.scaleway_volume#module`: manages volumes on Scaleway. +- :ansplugin:`community.general.scaleway_volume_info#module` + +The plugins are: + +- :ansplugin:`community.general.scaleway#inventory`: inventory plugin + + +.. note:: + This guide assumes you are familiar with Ansible and how it works. + If you are not, have a look at :ref:`ansible_documentation` before getting started. + +Requirements +============ + +The Scaleway modules and inventory script connect to the Scaleway API using `Scaleway REST API `_. +To use the modules and inventory script you will need a Scaleway API token. +You can generate an API token through the `Scaleway console's credential page `__. +The simplest way to authenticate yourself is to set the Scaleway API token in an environment variable: + +.. code-block:: console + + $ export SCW_TOKEN=00000000-1111-2222-3333-444444444444 + +If you are not comfortable exporting your API token, you can pass it as a parameter to the modules using the ``api_token`` argument. + +If you want to use a new SSH key pair in this tutorial, you can generate it to ``./id_rsa`` and ``./id_rsa.pub`` as: + +.. code-block:: console + + $ ssh-keygen -t rsa -f ./id_rsa + +If you want to use an existing key pair, just copy the private and public key over to the playbook directory. + +How to add an SSH key? +====================== + +Connection to Scaleway Compute nodes use Secure Shell. +SSH keys are stored at the account level, which means that you can reuse the same SSH key in multiple nodes. +The first step to configure Scaleway compute resources is to have at least one SSH key configured. + +:ansplugin:`community.general.scaleway_sshkey#module` is a module that manages SSH keys on your Scaleway account. +You can add an SSH key to your account by including the following task in a playbook: + +.. code-block:: yaml+jinja + + - name: "Add SSH key" + community.general.scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAA..." + state: "present" + +The ``ssh_pub_key`` parameter contains your ssh public key as a string. Here is an example inside a playbook: + + +.. code-block:: yaml+jinja + + - name: Test SSH key lifecycle on a Scaleway account + hosts: localhost + gather_facts: false + environment: + SCW_API_KEY: "" + + tasks: + + - community.general.scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAAB...424242 developer@example.com" + state: present + register: result + + - ansible.builtin.assert: + that: + - result is success and result is changed + +How to create a compute instance? +================================= + +Now that we have an SSH key configured, the next step is to spin up a server! +:ansplugin:`community.general.scaleway_compute#module` is a module that can create, update and delete Scaleway compute instances: + +.. code-block:: yaml+jinja + + - name: Create a server + community.general.scaleway_compute: + name: foobar + state: present + image: 00000000-1111-2222-3333-444444444444 + organization: 00000000-1111-2222-3333-444444444444 + region: ams1 + commercial_type: START1-S + +Here are the parameter details for the example shown above: + +- ``name`` is the name of the instance (the one that will show up in your web console). +- ``image`` is the UUID of the system image you would like to use. + A list of all images is available for each availability zone. +- ``organization`` represents the organization that your account is attached to. +- ``region`` represents the Availability Zone which your instance is in (for this example, ``par1`` and ``ams1``). +- ``commercial_type`` represents the name of the commercial offers. + You can check out the Scaleway pricing page to find which instance is right for you. + +Take a look at this short playbook to see a working example using ``scaleway_compute``: + +.. code-block:: yaml+jinja + + - name: Test compute instance lifecycle on a Scaleway account + hosts: localhost + gather_facts: false + environment: + SCW_API_KEY: "" + + tasks: + + - name: Create a server + register: server_creation_task + community.general.scaleway_compute: + name: foobar + state: present + image: 00000000-1111-2222-3333-444444444444 + organization: 00000000-1111-2222-3333-444444444444 + region: ams1 + commercial_type: START1-S + wait: true + + - ansible.builtin.debug: + var: server_creation_task + + - ansible.builtin.assert: + that: + - server_creation_task is success + - server_creation_task is changed + + - name: Run it + community.general.scaleway_compute: + name: foobar + state: running + image: 00000000-1111-2222-3333-444444444444 + organization: 00000000-1111-2222-3333-444444444444 + region: ams1 + commercial_type: START1-S + wait: true + tags: + - web_server + register: server_run_task + + - ansible.builtin.debug: + var: server_run_task + + - ansible.builtin.assert: + that: + - server_run_task is success + - server_run_task is changed + +Dynamic Inventory Plugin +======================== + +Ansible ships with :ansplugin:`community.general.scaleway#inventory`. +You can now get a complete inventory of your Scaleway resources through this plugin and filter it on +different parameters (``regions`` and ``tags`` are currently supported). + +Let us create an example! +Suppose that we want to get all hosts that got the tag web_server. +Create a file named ``scaleway_inventory.yml`` with the following content: + +.. code-block:: yaml+jinja + + plugin: community.general.scaleway + regions: + - ams1 + - par1 + tags: + - web_server + +This inventory means that we want all hosts that got the tag ``web_server`` on the zones ``ams1`` and ``par1``. +Once you have configured this file, you can get the information using the following command: + +.. code-block:: console + + $ ansible-inventory --list -i scaleway_inventory.yml + +The output will be: + +.. code-block:: json + + { + "_meta": { + "hostvars": { + "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d": { + "ansible_verbosity": 6, + "arch": "x86_64", + "commercial_type": "START1-S", + "hostname": "foobar", + "ipv4": "192.0.2.1", + "organization": "00000000-1111-2222-3333-444444444444", + "state": "running", + "tags": [ + "web_server" + ] + } + } + }, + "all": { + "children": [ + "ams1", + "par1", + "ungrouped", + "web_server" + ] + }, + "ams1": {}, + "par1": { + "hosts": [ + "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d" + ] + }, + "ungrouped": {}, + "web_server": { + "hosts": [ + "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d" + ] + } + } + +As you can see, we get different groups of hosts. +``par1`` and ``ams1`` are groups based on location. +``web_server`` is a group based on a tag. + +In case a filter parameter is not defined, the plugin supposes all values possible are wanted. +This means that for each tag that exists on your Scaleway compute nodes, a group based on each tag will be created. + +Scaleway S3 object storage +========================== + +`Object Storage `_ allows you to store any kind of objects (documents, images, videos, and so on). +As the Scaleway API is S3 compatible, Ansible supports it natively through the amazon.aws modules: :ansplugin:`amazon.aws.s3_bucket#module`, :ansplugin:`amazon.aws.s3_object#module`. + +You can find many examples in the `scaleway_s3 integration tests `_. + +.. code-block:: yaml+jinja + + - hosts: myserver + vars: + scaleway_region: nl-ams + s3_url: https://s3.nl-ams.scw.cloud + environment: + # AWS_ACCESS_KEY matches your scaleway organization id available at https://cloud.scaleway.com/#/account + AWS_ACCESS_KEY: 00000000-1111-2222-3333-444444444444 + # AWS_SECRET_KEY matches a secret token that you can retrieve at https://cloud.scaleway.com/#/credentials + AWS_SECRET_KEY: aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + module_defaults: + group/amazon.aws.aws: + s3_url: '{{ s3_url }}' + region: '{{ scaleway_region }}' + tasks: + # use a fact instead of a variable, otherwise template is evaluate each time variable is used + - ansible.builtin.set_fact: + bucket_name: "{{ 99999999 | random | to_uuid }}" + + # "requester_pays:" is mandatory because Scaleway does not implement related API + # another way is to use amazon.aws.s3_object and "mode: create" ! + - amazon.aws.s3_bucket: + name: '{{ bucket_name }}' + requester_pays: + + - name: Another way to create the bucket + amazon.aws.s3_object: + bucket: '{{ bucket_name }}' + mode: create + encrypt: false + register: bucket_creation_check + + - name: add something in the bucket + amazon.aws.s3_object: + mode: put + bucket: '{{ bucket_name }}' + src: /tmp/test.txt # needs to be created before + object: test.txt + encrypt: false # server side encryption must be disabled diff --git a/docs/docsite/rst/guide_uthelper.rst b/docs/docsite/rst/guide_uthelper.rst new file mode 100644 index 0000000000..c4a4110d70 --- /dev/null +++ b/docs/docsite/rst/guide_uthelper.rst @@ -0,0 +1,394 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_uthelper: + +UTHelper Guide +============== + +Introduction +^^^^^^^^^^^^ + +``UTHelper`` was written to reduce the boilerplate code used in unit tests for modules. +It was originally written to handle tests of modules that run external commands using ``AnsibleModule.run_command()``. +At the time of writing (Feb 2025) that remains the only type of tests you can use +``UTHelper`` for, but it aims to provide support for other types of interactions. + +Until now, there are many different ways to implement unit tests that validate a module based on the execution of external commands. See some examples: + +* `test_apk.py `_ - A very simple one +* `test_bootc_manage.py `_ - + This one has more test cases, but do notice how the code is repeated amongst them. +* `test_modprobe.py `_ - + This one has 15 tests in it, but to achieve that it declares 8 classes repeating quite a lot of code. + +As you can notice, there is no consistency in the way these tests are executed - +they all do the same thing eventually, but each one is written in a very distinct way. + +``UTHelper`` aims to: + +* provide a consistent idiom to define unit tests +* reduce the code to a bare minimal, and +* define tests as data instead +* allow the test cases definition to be expressed not only as a Python data structure but also as YAML content + +Quickstart +"""""""""" + +To use UTHelper, your test module will need only a bare minimal of code: + +.. code-block:: python + + # tests/unit/plugin/modules/test_ansible_module.py + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + + UTHelper.from_module(ansible_module, __name__, mocks=[RunCommandMock]) + +Then, in the test specification file, you have: + +.. code-block:: yaml + + # tests/unit/plugin/modules/test_ansible_module.yaml + test_cases: + - id: test_ansible_module + flags: + diff: true + input: + state: present + name: Roger the Shrubber + output: + shrubbery: + looks: nice + price: not too expensive + changed: true + diff: + before: + shrubbery: null + after: + shrubbery: + looks: nice + price: not too expensive + mocks: + run_command: + - command: [/testbin/shrubber, --version] + rc: 0 + out: "2.80.0\n" + err: '' + - command: [/testbin/shrubber, --make-shrubbery] + rc: 0 + out: 'Shrubbery created' + err: '' + +.. note:: + + If you prefer to pick a different YAML file for the test cases, or if you prefer to define them in plain Python, + you can use the convenience methods ``UTHelper.from_file()`` and ``UTHelper.from_spec()``, respectively. + See more details below. + + +Using ``UTHelper`` +^^^^^^^^^^^^^^^^^^ + +Test Module +""""""""""" + +``UTHelper`` is **strictly for unit tests**. To use it, you import the ``.uthelper.UTHelper`` class. +As mentioned in different parts of this guide, there are three different mechanisms to load the test cases. + +.. seealso:: + + See the UTHelper class reference below for API details on the three different mechanisms. + + +The easies and most recommended way of using ``UTHelper`` is literally the example shown. +See a real world example at +`test_gconftool2.py `_. + +The ``from_module()`` method will pick the filename of the test module up (in the example above, ``tests/unit/plugins/modules/test_gconftool2.py``) +and it will search for ``tests/unit/plugins/modules/test_gconftool2.yaml`` (or ``.yml`` if that is not found). +In that file it will expect to find the test specification expressed in YAML format, conforming to the structure described below LINK LINK LINK. + +If you prefer to read the test specifications a different file path, use ``from_file()`` passing the file handle for the YAML file. + +And, if for any reason you prefer or need to pass the data structure rather than dealing with YAML files, use the ``from_spec()`` method. +A real world example for that can be found at +`test_snap.py `_. + + +Test Specification +"""""""""""""""""" + +The structure of the test specification data is described below. + +Top level +--------- + +At the top level there are two accepted keys: + +- ``anchors: dict`` + Optional. Placeholder for you to define YAML anchors that can be repeated in the test cases. + Its contents are never accessed directly by test Helper. +- ``test_cases: list`` + Mandatory. List of test cases, see below for definition. + +Test cases +---------- + +You write the test cases with five elements: + +- ``id: str`` + Mandatory. Used to identify the test case. + +- ``flags: dict`` + Optional. Flags controling the behavior of the test case. All flags are optional. Accepted flags: + + * ``check: bool``: set to ``true`` if the module is to be executed in **check mode**. + * ``diff: bool``: set to ``true`` if the module is to be executed in **diff mode**. + * ``skip: str``: set the test case to be skipped, providing the message for ``pytest.skip()``. + * ``xfail: str``: set the test case to expect failure, providing the message for ``pytest.xfail()``. + +- ``input: dict`` + Optional. Parameters for the Ansible module, it can be empty. + +- ``output: dict`` + Optional. Expected return values from the Ansible module. + All RV names are used here are expected to be found in the module output, but not all RVs in the output must be here. + It can include special RVs such as ``changed`` and ``diff``. + It can be empty. + +- ``mocks: dict`` + Optional. Mocked interactions, ``run_command`` being the only one supported for now. + Each key in this dictionary refers to one subclass of ``TestCaseMock`` and its + structure is dictated by the ``TestCaseMock`` subclass implementation. + All keys are expected to be named using snake case, as in ``run_command``. + The ``TestCaseMock`` subclass is responsible for defining the name used in the test specification. + The structure for that specification is dependent on the implementing class. + See more details below for the implementation of ``RunCommandMock`` + +Example using YAML +------------------ + +We recommend you use ``UTHelper`` reading the test specifications from a YAML file. +See an example below of how one actually looks like (excerpt from ``test_opkg.yaml``): + +.. code-block:: yaml + + --- + anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} + test_cases: + - id: install_zlibdev + input: + name: zlib-dev + state: present + output: + msg: installed 1 package(s) + mocks: + run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, install, zlib-dev] + environ: *env-def + rc: 0 + out: | + Installing zlib-dev (1.2.11-6) to root... + Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib-dev_1.2.11-6_mips_24kc.ipk + Installing zlib (1.2.11-6) to root... + Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib_1.2.11-6_mips_24kc.ipk + Configuring zlib. + Configuring zlib-dev. + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: | + zlib-dev - 1.2.11-6 + err: '' + - id: install_zlibdev_present + input: + name: zlib-dev + state: present + output: + msg: package(s) already present + mocks: + run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: | + zlib-dev - 1.2.11-6 + err: '' + +TestCaseMocks Specifications +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``TestCaseMock`` subclass is free to define the expected data structure. + +RunCommandMock Specification +"""""""""""""""""""""""""""" + +``RunCommandMock`` mocks can be specified with the key ``run_command`` and it expects a ``list`` in which elements follow the structure: + +- ``command: Union[list, str]`` + Mandatory. The command that is expected to be executed by the module. It corresponds to the parameter ``args`` of the ``AnsibleModule.run_command()`` call. + It can be either a list or a string, though the list form is generally recommended. +- ``environ: dict`` + Mandatory. All other parameters passed to the ``AnsibleModule.run_command()`` call. + Most commonly used are ``environ_update`` and ``check_rc``. + Must include all parameters the Ansible module uses in the ``AnsibleModule.run_command()`` call, otherwise the test will fail. +- ``rc: int`` + Mandatory. The return code for the command execution. + As per usual in bash scripting, a value of ``0`` means success, whereas any other number is an error code. +- ``out: str`` + Mandatory. The *stdout* result of the command execution, as one single string containing zero or more lines. +- ``err: str`` + Mandatory. The *stderr* result of the command execution, as one single string containing zero or more lines. + + +``UTHelper`` Reference +^^^^^^^^^^^^^^^^^^^^^^ + +.. py:module:: .uthelper + + .. py:class:: UTHelper + + A class to encapsulate unit tests. + + .. py:staticmethod:: from_spec(ansible_module, test_module, test_spec, mocks=None) + + Creates an ``UTHelper`` instance from a given test specification. + + :param ansible_module: The Ansible module to be tested. + :type ansible_module: :py:class:`types.ModuleType` + :param test_module: The test module. + :type test_module: :py:class:`types.ModuleType` + :param test_spec: The test specification. + :type test_spec: dict + :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists. + :type mocks: list or None + :return: An ``UTHelper`` instance. + :rtype: UTHelper + + Example usage of ``from_spec()``: + + .. code-block:: python + + import sys + + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + TEST_SPEC = dict( + test_cases=[ + ... + ] + ) + + helper = UTHelper.from_spec(ansible_module, sys.modules[__name__], TEST_SPEC, mocks=[RunCommandMock]) + + .. py:staticmethod:: from_file(ansible_module, test_module, test_spec_filehandle, mocks=None) + + Creates an ``UTHelper`` instance from a test specification file. + + :param ansible_module: The Ansible module to be tested. + :type ansible_module: :py:class:`types.ModuleType` + :param test_module: The test module. + :type test_module: :py:class:`types.ModuleType` + :param test_spec_filehandle: A file handle to an file stream handle providing the test specification in YAML format. + :type test_spec_filehandle: ``file-like object`` + :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists. + :type mocks: list or None + :return: An ``UTHelper`` instance. + :rtype: UTHelper + + Example usage of ``from_file()``: + + .. code-block:: python + + import sys + + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + with open("test_spec.yaml", "r") as test_spec_filehandle: + helper = UTHelper.from_file(ansible_module, sys.modules[__name__], test_spec_filehandle, mocks=[RunCommandMock]) + + .. py:staticmethod:: from_module(ansible_module, test_module_name, mocks=None) + + Creates an ``UTHelper`` instance from a given Ansible module and test module. + + :param ansible_module: The Ansible module to be tested. + :type ansible_module: :py:class:`types.ModuleType` + :param test_module_name: The name of the test module. It works if passed ``__name__``. + :type test_module_name: str + :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists. + :type mocks: list or None + :return: An ``UTHelper`` instance. + :rtype: UTHelper + + Example usage of ``from_module()``: + + .. code-block:: python + + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + # Example usage + helper = UTHelper.from_module(ansible_module, __name__, mocks=[RunCommandMock]) + + +Creating TestCaseMocks +^^^^^^^^^^^^^^^^^^^^^^ + +To create a new ``TestCaseMock`` you must extend that class and implement the relevant parts: + +.. code-block:: python + + class ShrubberyMock(TestCaseMock): + # this name is mandatory, it is the name used in the test specification + name = "shrubbery" + + def setup(self, mocker): + # perform setup, commonly using mocker to patch some other piece of code + ... + + def check(self, test_case, results): + # verify the tst execution met the expectations of the test case + # for example the function was called as many times as it should + ... + + def fixtures(self): + # returns a dict mapping names to pytest fixtures that should be used for the test case + # for example, in RunCommandMock it creates a fixture that patches AnsibleModule.get_bin_path + ... + +Caveats +^^^^^^^ + +Known issues/opportunities for improvement: + +* Only one ``UTHelper`` per test module: UTHelper injects a test function with a fixed name into the module's namespace, + so placing a second ``UTHelper`` instance is going to overwrite the function created by the first one. +* Order of elements in module's namespace is not consistent across executions in Python 3.5, so if adding more tests to the test module + might make Test Helper add its function before or after the other test functions. + In the community.general collection the CI processes uses ``pytest-xdist`` to paralellize and distribute the tests, + and it requires the order of the tests to be consistent. + +.. versionadded:: 7.5.0 diff --git a/docs/docsite/rst/guide_vardict.rst b/docs/docsite/rst/guide_vardict.rst new file mode 100644 index 0000000000..1beef0c57f --- /dev/null +++ b/docs/docsite/rst/guide_vardict.rst @@ -0,0 +1,176 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_vardict: + +VarDict Guide +============= + +Introduction +^^^^^^^^^^^^ + +The ``ansible_collections.community.general.plugins.module_utils.vardict`` module util provides the +``VarDict`` class to help manage the module variables. That class is a container for module variables, +especially the ones for which the module must keep track of state changes, and the ones that should +be published as return values. + +Each variable has extra behaviors controlled by associated metadata, simplifying the generation of +output values from the module. + +Quickstart +"""""""""" + +The simplest way of using ``VarDict`` is: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.vardict import VarDict + +Then in ``main()``, or any other function called from there: + +.. code-block:: python + + vars = VarDict() + + # Next 3 statements are equivalent + vars.abc = 123 + vars["abc"] = 123 + vars.set("abc", 123) + + vars.xyz = "bananas" + vars.ghi = False + +And by the time the module is about to exit: + +.. code-block:: python + + results = vars.output() + module.exit_json(**results) + +That makes the return value of the module: + +.. code-block:: json + + { + "abc": 123, + "xyz": "bananas", + "ghi": false + } + +Metadata +"""""""" + +The metadata values associated with each variable are: + +- ``output: bool`` - marks the variable for module output as a module return value. +- ``fact: bool`` - marks the variable for module output as an Ansible fact. +- ``verbosity: int`` - sets the minimum level of verbosity for which the variable will be included in the output. +- ``change: bool`` - controls the detection of changes in the variable value. +- ``initial_value: any`` - when using ``change`` and need to forcefully set an intial value to the variable. +- ``diff: bool`` - used along with ``change``, this generates an Ansible-style diff ``dict``. + +See the sections below for more details on how to use the metadata. + + +Using VarDict +^^^^^^^^^^^^^ + +Basic Usage +""""""""""" + +As shown above, variables can be accessed using the ``[]`` operator, as in a ``dict`` object, +and also as an object attribute, such as ``vars.abc``. The form using the ``set()`` +method is special in the sense that you can use it to set metadata values: + +.. code-block:: python + + vars.set("abc", 123, output=False) + vars.set("abc", 123, output=True, change=True) + +Another way to set metadata after the variables have been created is: + +.. code-block:: python + + vars.set_meta("abc", output=False) + vars.set_meta("abc", output=True, change=True, diff=True) + +You can use either operator and attribute forms to access the value of the variable. Other ways to +access its value and its metadata are: + +.. code-block:: python + + print("abc value = {0}".format(vars.var("abc")["value"])) # get the value + print("abc output? {0}".format(vars.get_meta("abc")["output"])) # get the metadata like this + +The names of methods, such as ``set``, ``get_meta``, ``output`` amongst others, are reserved and +cannot be used as variable names. If you try to use a reserved name a ``ValueError`` exception +is raised with the message "Name is reserved". + +Generating output +""""""""""""""""" + +By default, every variable create will be enable for output with minimum verbosity set to zero, in +other words, they will always be in the output by default. + +You can control that when creating the variable for the first time or later in the code: + +.. code-block:: python + + vars.set("internal", x + 4, output=False) + vars.set_meta("internal", output=False) + +You can also set the verbosity of some variable, like: + +.. code-block:: python + + vars.set("abc", x + 4) + vars.set("debug_x", x, verbosity=3) + + results = vars.output(module._verbosity) + module.exit_json(**results) + +If the module was invoked with verbosity lower than 3, then the output will only contain +the variable ``abc``. If running at higher verbosity, as in ``ansible-playbook -vvv``, +then the output will also contain ``debug_x``. + +Generating facts is very similar to regular output, but variables are not marked as facts by default. + +.. code-block:: python + + vars.set("modulefact", x + 4, fact=True) + vars.set("debugfact", x, fact=True, verbosity=3) + + results = vars.output(module._verbosity) + results["ansible_facts"] = {"module_name": vars.facts(module._verbosity)} + module.exit_json(**results) + +Handling change +""""""""""""""" + +You can use ``VarDict`` to determine whether variables have had their values changed. + +.. code-block:: python + + vars.set("abc", 42, change=True) + vars.abc = 90 + + results = vars.output() + results["changed"] = vars.has_changed + module.exit_json(**results) + +If tracking changes in variables, you may want to present the difference between the initial and the final +values of it. For that, you want to use: + +.. code-block:: python + + vars.set("abc", 42, change=True, diff=True) + vars.abc = 90 + + results = vars.output() + results["changed"] = vars.has_changed + results["diff"] = vars.diff() + module.exit_json(**results) + +.. versionadded:: 7.1.0 diff --git a/docs/docsite/rst/test_guide.rst b/docs/docsite/rst/test_guide.rst index 2df0ed04cd..a1f5723df4 100644 --- a/docs/docsite/rst/test_guide.rst +++ b/docs/docsite/rst/test_guide.rst @@ -1,16 +1,21 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + .. _ansible_collections.community.general.docsite.test_guide: community.general Test (Plugin) Guide ===================================== -The :ref:`community.general collection ` offers currently one test plugin. +The :anscollection:`community.general collection ` offers currently one test plugin. .. contents:: Topics Feature Tests ------------- -The ``a_module`` test allows to check whether a given string refers to an existing module or action plugin. This can be useful in roles, which can use this to ensure that required modules are present ahead of time. +The :ansplugin:`community.general.a_module test ` allows to check whether a given string refers to an existing module or action plugin. This can be useful in roles, which can use this to ensure that required modules are present ahead of time. .. code-block:: yaml+jinja diff --git a/galaxy.yml b/galaxy.yml index 8f637b27c2..0288625dbb 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,16 +1,21 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + namespace: community name: general -version: 5.0.1 +version: 12.0.0 readme: README.md authors: - Ansible (https://github.com/ansible) -description: null +description: >- + The community.general collection is a part of the Ansible package and includes many modules and + plugins supported by Ansible community which are not part of more specialized community collections. license_file: COPYING -tags: [community] -# NOTE: No dependencies are expected to be added here -# dependencies: +tags: + - community repository: https://github.com/ansible-collections/community.general documentation: https://docs.ansible.com/ansible/latest/collections/community/general/ homepage: https://github.com/ansible-collections/community.general issues: https://github.com/ansible-collections/community.general/issues -#type: flatmap diff --git a/meta/runtime.yml b/meta/runtime.yml index c3a439b6f3..d2be5a89c1 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -1,16 +1,134 @@ --- -requires_ansible: '>=2.11.0' +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +requires_ansible: '>=2.17.0' +action_groups: + consul: + - consul_agent_check + - consul_agent_service + - consul_auth_method + - consul_binding_rule + - consul_policy + - consul_role + - consul_session + - consul_token + proxmox: + - metadata: + extend_group: + - community.proxmox.proxmox + keycloak: + - keycloak_authentication + - keycloak_authentication_required_actions + - keycloak_authz_authorization_scope + - keycloak_authz_custom_policy + - keycloak_authz_permission + - keycloak_authz_permission_info + - keycloak_client + - keycloak_client_rolemapping + - keycloak_client_rolescope + - keycloak_clientscope + - keycloak_clientscope_type + - keycloak_clientsecret_info + - keycloak_clientsecret_regenerate + - keycloak_clienttemplate + - keycloak_component + - keycloak_component_info + - keycloak_group + - keycloak_identity_provider + - keycloak_realm + - keycloak_realm_key + - keycloak_realm_keys_metadata_info + - keycloak_realm_rolemapping + - keycloak_role + - keycloak_user + - keycloak_user_federation + - keycloak_user_rolemapping + - keycloak_userprofile + scaleway: + - scaleway_compute + - scaleway_compute_private_network + - scaleway_container + - scaleway_container_info + - scaleway_container_namespace + - scaleway_container_namespace_info + - scaleway_container_registry + - scaleway_container_registry_info + - scaleway_database_backup + - scaleway_function + - scaleway_function_info + - scaleway_function_namespace + - scaleway_function_namespace_info + - scaleway_image_info + - scaleway_ip + - scaleway_ip_info + - scaleway_lb + - scaleway_organization_info + - scaleway_private_network + - scaleway_security_group + - scaleway_security_group_info + - scaleway_security_group_rule + - scaleway_server_info + - scaleway_snapshot_info + - scaleway_sshkey + - scaleway_user_data + - scaleway_volume + - scaleway_volume_info + plugin_routing: + callback: + actionable: + tombstone: + removal_version: 2.0.0 + warning_text: Use the 'default' callback plugin with 'display_skipped_hosts + = no' and 'display_ok_hosts = no' options. + full_skip: + tombstone: + removal_version: 2.0.0 + warning_text: Use the 'default' callback plugin with 'display_skipped_hosts + = no' option. + hipchat: + tombstone: + removal_version: 10.0.0 + warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. + osx_say: + redirect: community.general.say + stderr: + tombstone: + removal_version: 2.0.0 + warning_text: Use the 'default' callback plugin with 'display_failed_stderr + = yes' option. + yaml: + tombstone: + removal_version: 12.0.0 + warning_text: >- + The plugin has been superseded by the option `result_format=yaml` in callback plugin ansible.builtin.default from ansible-core 2.13 onwards. connection: docker: redirect: community.docker.docker oc: redirect: community.okd.oc + proxmox_pct_remote: + redirect: community.proxmox.proxmox_pct_remote + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. lookup: gcp_storage_file: redirect: community.google.gcp_storage_file hashi_vault: redirect: community.hashi_vault.hashi_vault + hiera: + deprecation: + removal_version: 13.0.0 + warning_text: >- + Hiera has been deprecated a long time ago. + If you disagree with this deprecation, please create an issue in the community.general repository. + manifold: + tombstone: + removal_version: 11.0.0 + warning_text: Company was acquired in 2021 and service was ceased afterwards. nios: redirect: infoblox.nios_modules.nios_lookup nios_next_ip: @@ -18,160 +136,72 @@ plugin_routing: nios_next_network: redirect: infoblox.nios_modules.nios_next_network modules: - aerospike_migrations: - redirect: community.general.database.aerospike.aerospike_migrations - airbrake_deployment: - redirect: community.general.monitoring.airbrake_deployment - aix_devices: - redirect: community.general.system.aix_devices - aix_filesystem: - redirect: community.general.system.aix_filesystem - aix_inittab: - redirect: community.general.system.aix_inittab - aix_lvg: - redirect: community.general.system.aix_lvg - aix_lvol: - redirect: community.general.system.aix_lvol - alerta_customer: - redirect: community.general.monitoring.alerta_customer - ali_instance: - redirect: community.general.cloud.alicloud.ali_instance ali_instance_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.ali_instance_info instead. - ali_instance_info: - redirect: community.general.cloud.alicloud.ali_instance_info - alternatives: - redirect: community.general.system.alternatives - ansible_galaxy_install: - redirect: community.general.packaging.language.ansible_galaxy_install - apache2_mod_proxy: - redirect: community.general.web_infrastructure.apache2_mod_proxy - apache2_module: - redirect: community.general.web_infrastructure.apache2_module - apk: - redirect: community.general.packaging.os.apk - apt_repo: - redirect: community.general.packaging.os.apt_repo - apt_rpm: - redirect: community.general.packaging.os.apt_rpm - archive: - redirect: community.general.files.archive atomic_container: - redirect: community.general.cloud.atomic.atomic_container + deprecation: + removal_version: 13.0.0 + warning_text: Project Atomic was sunset by the end of 2019. atomic_host: - redirect: community.general.cloud.atomic.atomic_host + deprecation: + removal_version: 13.0.0 + warning_text: Project Atomic was sunset by the end of 2019. atomic_image: - redirect: community.general.cloud.atomic.atomic_image - awall: - redirect: community.general.system.awall - beadm: - redirect: community.general.system.beadm + deprecation: + removal_version: 13.0.0 + warning_text: Project Atomic was sunset by the end of 2019. bearychat: - redirect: community.general.notification.bearychat - bigpanda: - redirect: community.general.monitoring.bigpanda - bitbucket_access_key: - redirect: community.general.source_control.bitbucket.bitbucket_access_key - bitbucket_pipeline_key_pair: - redirect: community.general.source_control.bitbucket.bitbucket_pipeline_key_pair - bitbucket_pipeline_known_host: - redirect: community.general.source_control.bitbucket.bitbucket_pipeline_known_host - bitbucket_pipeline_variable: - redirect: community.general.source_control.bitbucket.bitbucket_pipeline_variable - bower: - redirect: community.general.packaging.language.bower - bundler: - redirect: community.general.packaging.language.bundler - bzr: - redirect: community.general.source_control.bzr - campfire: - redirect: community.general.notification.campfire - capabilities: - redirect: community.general.system.capabilities - cargo: - redirect: community.general.packaging.language.cargo + tombstone: + removal_version: 12.0.0 + warning_text: Chat service is no longer available. catapult: - redirect: community.general.notification.catapult - circonus_annotation: - redirect: community.general.monitoring.circonus_annotation + deprecation: + removal_version: 13.0.0 + warning_text: DNS fails to resolve the API endpoint used by the module since Oct 2024. See https://github.com/ansible-collections/community.general/issues/10318 for details. cisco_spark: - redirect: community.general.notification.cisco_spark - cisco_webex: - redirect: community.general.notification.cisco_webex - clc_aa_policy: - redirect: community.general.cloud.centurylink.clc_aa_policy + redirect: community.general.cisco_webex clc_alert_policy: - redirect: community.general.cloud.centurylink.clc_alert_policy + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. clc_blueprint_package: - redirect: community.general.cloud.centurylink.clc_blueprint_package + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. clc_firewall_policy: - redirect: community.general.cloud.centurylink.clc_firewall_policy + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. clc_group: - redirect: community.general.cloud.centurylink.clc_group + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. clc_loadbalancer: - redirect: community.general.cloud.centurylink.clc_loadbalancer + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. clc_modify_server: - redirect: community.general.cloud.centurylink.clc_modify_server + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. clc_publicip: - redirect: community.general.cloud.centurylink.clc_publicip + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. clc_server: - redirect: community.general.cloud.centurylink.clc_server + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. clc_server_snapshot: - redirect: community.general.cloud.centurylink.clc_server_snapshot - cloud_init_data_facts: - redirect: community.general.cloud.misc.cloud_init_data_facts - cloudflare_dns: - redirect: community.general.net_tools.cloudflare_dns - cobbler_sync: - redirect: community.general.remote_management.cobbler.cobbler_sync - cobbler_system: - redirect: community.general.remote_management.cobbler.cobbler_system - composer: - redirect: community.general.packaging.language.composer - consul: - redirect: community.general.clustering.consul.consul + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. consul_acl: - redirect: community.general.clustering.consul.consul_acl - consul_kv: - redirect: community.general.clustering.consul.consul_kv - consul_session: - redirect: community.general.clustering.consul.consul_session - copr: - redirect: community.general.packaging.os.copr - cpanm: - redirect: community.general.packaging.language.cpanm - cronvar: - redirect: community.general.system.cronvar - crypttab: - redirect: community.general.system.crypttab - datadog_downtime: - redirect: community.general.monitoring.datadog.datadog_downtime - datadog_event: - redirect: community.general.monitoring.datadog.datadog_event - datadog_monitor: - redirect: community.general.monitoring.datadog.datadog_monitor - dconf: - redirect: community.general.system.dconf - deploy_helper: - redirect: community.general.web_infrastructure.deploy_helper - dimensiondata_network: - redirect: community.general.cloud.dimensiondata.dimensiondata_network - dimensiondata_vlan: - redirect: community.general.cloud.dimensiondata.dimensiondata_vlan - discord: - redirect: community.general.notification.discord - django_manage: - redirect: community.general.web_infrastructure.django_manage - dnf_versionlock: - redirect: community.general.packaging.os.dnf_versionlock - dnsimple: - redirect: community.general.net_tools.dnsimple - dnsimple_info: - redirect: community.general.net_tools.dnsimple_info - dnsmadeeasy: - redirect: community.general.net_tools.dnsmadeeasy + tombstone: + removal_version: 10.0.0 + warning_text: Use community.general.consul_token and/or community.general.consul_policy instead. docker_compose: redirect: community.docker.docker_compose docker_config: @@ -226,36 +256,19 @@ plugin_routing: redirect: community.docker.docker_volume docker_volume_info: redirect: community.docker.docker_volume_info - dpkg_divert: - redirect: community.general.system.dpkg_divert - easy_install: - redirect: community.general.packaging.language.easy_install - ejabberd_user: - redirect: community.general.web_infrastructure.ejabberd_user - elasticsearch_plugin: - redirect: community.general.database.misc.elasticsearch_plugin - emc_vnx_sg_member: - redirect: community.general.storage.emc.emc_vnx_sg_member - etcd3: - redirect: community.general.clustering.etcd3 facter: - redirect: community.general.system.facter - filesize: - redirect: community.general.files.filesize - filesystem: - redirect: community.general.system.filesystem - flatpak: - redirect: community.general.packaging.os.flatpak - flatpak_remote: - redirect: community.general.packaging.os.flatpak_remote + tombstone: + removal_version: 12.0.0 + warning_text: Use community.general.facter_facts instead. flowdock: - redirect: community.general.notification.flowdock + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. foreman: tombstone: removal_version: 2.0.0 warning_text: Use the modules from the theforeman.foreman collection instead. - gandi_livedns: - redirect: community.general.net_tools.gandi_livedns gc_storage: redirect: community.google.gc_storage gcdns_record: @@ -290,8 +303,6 @@ plugin_routing: redirect: community.google.gce_snapshot gce_tag: redirect: community.google.gce_tag - gconftool2: - redirect: community.general.system.gconftool2 gcp_backend_service: tombstone: removal_version: 2.0.0 @@ -327,63 +338,13 @@ plugin_routing: removal_version: 2.0.0 warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance instead. - gem: - redirect: community.general.packaging.language.gem - git_config: - redirect: community.general.source_control.git_config - github_deploy_key: - redirect: community.general.source_control.github.github_deploy_key github_hooks: tombstone: removal_version: 2.0.0 warning_text: Use community.general.github_webhook and community.general.github_webhook_info instead. - github_issue: - redirect: community.general.source_control.github.github_issue - github_key: - redirect: community.general.source_control.github.github_key - github_release: - redirect: community.general.source_control.github.github_release - github_repo: - redirect: community.general.source_control.github.github_repo - github_webhook: - redirect: community.general.source_control.github.github_webhook - github_webhook_info: - redirect: community.general.source_control.github.github_webhook_info - gitlab_branch: - redirect: community.general.source_control.gitlab.gitlab_branch - gitlab_deploy_key: - redirect: community.general.source_control.gitlab.gitlab_deploy_key - gitlab_group: - redirect: community.general.source_control.gitlab.gitlab_group - gitlab_group_members: - redirect: community.general.source_control.gitlab.gitlab_group_members - gitlab_group_variable: - redirect: community.general.source_control.gitlab.gitlab_group_variable - gitlab_hook: - redirect: community.general.source_control.gitlab.gitlab_hook - gitlab_project: - redirect: community.general.source_control.gitlab.gitlab_project - gitlab_project_members: - redirect: community.general.source_control.gitlab.gitlab_project_members - gitlab_project_variable: - redirect: community.general.source_control.gitlab.gitlab_project_variable - gitlab_protected_branch: - redirect: community.general.source_control.gitlab.gitlab_protected_branch - gitlab_runner: - redirect: community.general.source_control.gitlab.gitlab_runner - gitlab_user: - redirect: community.general.source_control.gitlab.gitlab_user - grove: - redirect: community.general.notification.grove - gunicorn: - redirect: community.general.web_infrastructure.gunicorn hana_query: - redirect: community.general.database.saphana.hana_query - haproxy: - redirect: community.general.net_tools.haproxy - heroku_collaborator: - redirect: community.general.cloud.heroku.heroku_collaborator + redirect: community.sap_libs.sap_hdbsql hetzner_failover_ip: redirect: community.hrobot.failover_ip hetzner_failover_ip_info: @@ -392,222 +353,30 @@ plugin_routing: redirect: community.hrobot.firewall hetzner_firewall_info: redirect: community.hrobot.firewall_info - hg: - redirect: community.general.source_control.hg hipchat: - redirect: community.general.notification.hipchat - homebrew: - redirect: community.general.packaging.os.homebrew - homebrew_cask: - redirect: community.general.packaging.os.homebrew_cask - homebrew_tap: - redirect: community.general.packaging.os.homebrew_tap - homectl: - redirect: community.general.system.homectl - honeybadger_deployment: - redirect: community.general.monitoring.honeybadger_deployment - hpilo_boot: - redirect: community.general.remote_management.hpilo.hpilo_boot + tombstone: + removal_version: 11.0.0 + warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. hpilo_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.hpilo_info instead. - hpilo_info: - redirect: community.general.remote_management.hpilo.hpilo_info - hponcfg: - redirect: community.general.remote_management.hpilo.hponcfg - htpasswd: - redirect: community.general.web_infrastructure.htpasswd - hwc_ecs_instance: - redirect: community.general.cloud.huawei.hwc_ecs_instance - hwc_evs_disk: - redirect: community.general.cloud.huawei.hwc_evs_disk - hwc_network_vpc: - redirect: community.general.cloud.huawei.hwc_network_vpc - hwc_smn_topic: - redirect: community.general.cloud.huawei.hwc_smn_topic - hwc_vpc_eip: - redirect: community.general.cloud.huawei.hwc_vpc_eip - hwc_vpc_peering_connect: - redirect: community.general.cloud.huawei.hwc_vpc_peering_connect - hwc_vpc_port: - redirect: community.general.cloud.huawei.hwc_vpc_port - hwc_vpc_private_ip: - redirect: community.general.cloud.huawei.hwc_vpc_private_ip - hwc_vpc_route: - redirect: community.general.cloud.huawei.hwc_vpc_route - hwc_vpc_security_group: - redirect: community.general.cloud.huawei.hwc_vpc_security_group - hwc_vpc_security_group_rule: - redirect: community.general.cloud.huawei.hwc_vpc_security_group_rule - hwc_vpc_subnet: - redirect: community.general.cloud.huawei.hwc_vpc_subnet - ibm_sa_domain: - redirect: community.general.storage.ibm.ibm_sa_domain - ibm_sa_host: - redirect: community.general.storage.ibm.ibm_sa_host - ibm_sa_host_ports: - redirect: community.general.storage.ibm.ibm_sa_host_ports - ibm_sa_pool: - redirect: community.general.storage.ibm.ibm_sa_pool - ibm_sa_vol: - redirect: community.general.storage.ibm.ibm_sa_vol - ibm_sa_vol_map: - redirect: community.general.storage.ibm.ibm_sa_vol_map - icinga2_feature: - redirect: community.general.monitoring.icinga2_feature - icinga2_host: - redirect: community.general.monitoring.icinga2_host idrac_firmware: redirect: dellemc.openmanage.idrac_firmware - idrac_redfish_command: - redirect: community.general.remote_management.redfish.idrac_redfish_command - idrac_redfish_config: - redirect: community.general.remote_management.redfish.idrac_redfish_config idrac_redfish_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.idrac_redfish_info instead. - idrac_redfish_info: - redirect: community.general.remote_management.redfish.idrac_redfish_info idrac_server_config_profile: redirect: dellemc.openmanage.idrac_server_config_profile - ilo_redfish_config: - redirect: community.general.remote_management.redfish.ilo_redfish_config - ilo_redfish_info: - redirect: community.general.remote_management.redfish.ilo_redfish_info - imc_rest: - redirect: community.general.remote_management.imc.imc_rest - imgadm: - redirect: community.general.cloud.smartos.imgadm - infinity: - redirect: community.general.net_tools.infinity.infinity - influxdb_database: - redirect: community.general.database.influxdb.influxdb_database - influxdb_query: - redirect: community.general.database.influxdb.influxdb_query - influxdb_retention_policy: - redirect: community.general.database.influxdb.influxdb_retention_policy - influxdb_user: - redirect: community.general.database.influxdb.influxdb_user - influxdb_write: - redirect: community.general.database.influxdb.influxdb_write - ini_file: - redirect: community.general.files.ini_file - installp: - redirect: community.general.packaging.os.installp - interfaces_file: - redirect: community.general.system.interfaces_file - ip_netns: - redirect: community.general.net_tools.ip_netns - ipa_config: - redirect: community.general.identity.ipa.ipa_config - ipa_dnsrecord: - redirect: community.general.identity.ipa.ipa_dnsrecord - ipa_dnszone: - redirect: community.general.identity.ipa.ipa_dnszone - ipa_group: - redirect: community.general.identity.ipa.ipa_group - ipa_hbacrule: - redirect: community.general.identity.ipa.ipa_hbacrule - ipa_host: - redirect: community.general.identity.ipa.ipa_host - ipa_hostgroup: - redirect: community.general.identity.ipa.ipa_hostgroup - ipa_otpconfig: - redirect: community.general.identity.ipa.ipa_otpconfig - ipa_otptoken: - redirect: community.general.identity.ipa.ipa_otptoken - ipa_pwpolicy: - redirect: community.general.identity.ipa.ipa_pwpolicy - ipa_role: - redirect: community.general.identity.ipa.ipa_role - ipa_service: - redirect: community.general.identity.ipa.ipa_service - ipa_subca: - redirect: community.general.identity.ipa.ipa_subca - ipa_sudocmd: - redirect: community.general.identity.ipa.ipa_sudocmd - ipa_sudocmdgroup: - redirect: community.general.identity.ipa.ipa_sudocmdgroup - ipa_sudorule: - redirect: community.general.identity.ipa.ipa_sudorule - ipa_user: - redirect: community.general.identity.ipa.ipa_user - ipa_vault: - redirect: community.general.identity.ipa.ipa_vault - ipify_facts: - redirect: community.general.net_tools.ipify_facts - ipinfoio_facts: - redirect: community.general.net_tools.ipinfoio_facts - ipmi_boot: - redirect: community.general.remote_management.ipmi.ipmi_boot - ipmi_power: - redirect: community.general.remote_management.ipmi.ipmi_power - iptables_state: - redirect: community.general.system.iptables_state - ipwcli_dns: - redirect: community.general.net_tools.ipwcli_dns - irc: - redirect: community.general.notification.irc - iso_create: - redirect: community.general.files.iso_create - iso_extract: - redirect: community.general.files.iso_extract - jabber: - redirect: community.general.notification.jabber - java_cert: - redirect: community.general.system.java_cert - java_keystore: - redirect: community.general.system.java_keystore - jboss: - redirect: community.general.web_infrastructure.jboss - jenkins_build: - redirect: community.general.web_infrastructure.jenkins_build - jenkins_job: - redirect: community.general.web_infrastructure.jenkins_job jenkins_job_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.jenkins_job_info instead. - jenkins_job_info: - redirect: community.general.web_infrastructure.jenkins_job_info - jenkins_plugin: - redirect: community.general.web_infrastructure.jenkins_plugin - jenkins_script: - redirect: community.general.web_infrastructure.jenkins_script - jira: - redirect: community.general.web_infrastructure.jira katello: tombstone: removal_version: 2.0.0 warning_text: Use the modules from the theforeman.foreman collection instead. - kernel_blacklist: - redirect: community.general.system.kernel_blacklist - keycloak_authentication: - redirect: community.general.identity.keycloak.keycloak_authentication - keycloak_client: - redirect: community.general.identity.keycloak.keycloak_client - keycloak_client_rolemapping: - redirect: community.general.identity.keycloak.keycloak_client_rolemapping - keycloak_clientscope: - redirect: community.general.identity.keycloak.keycloak_clientscope - keycloak_clienttemplate: - redirect: community.general.identity.keycloak.keycloak_clienttemplate - keycloak_group: - redirect: community.general.identity.keycloak.keycloak_group - keycloak_identity_provider: - redirect: community.general.identity.keycloak.keycloak_identity_provider - keycloak_realm: - redirect: community.general.identity.keycloak.keycloak_realm - keycloak_realm_info: - redirect: community.general.identity.keycloak.keycloak_realm_info - keycloak_role: - redirect: community.general.identity.keycloak.keycloak_role - keycloak_user_federation: - redirect: community.general.identity.keycloak.keycloak_user_federation - kibana_plugin: - redirect: community.general.database.misc.kibana_plugin kubevirt_cdi_upload: redirect: community.kubevirt.kubevirt_cdi_upload kubevirt_preset: @@ -620,40 +389,10 @@ plugin_routing: redirect: community.kubevirt.kubevirt_template kubevirt_vm: redirect: community.kubevirt.kubevirt_vm - launchd: - redirect: community.general.system.launchd - layman: - redirect: community.general.packaging.os.layman - lbu: - redirect: community.general.system.lbu ldap_attr: tombstone: removal_version: 3.0.0 warning_text: Use community.general.ldap_attrs instead. - ldap_attrs: - redirect: community.general.net_tools.ldap.ldap_attrs - ldap_entry: - redirect: community.general.net_tools.ldap.ldap_entry - ldap_passwd: - redirect: community.general.net_tools.ldap.ldap_passwd - ldap_search: - redirect: community.general.net_tools.ldap.ldap_search - librato_annotation: - redirect: community.general.monitoring.librato_annotation - linode: - redirect: community.general.cloud.linode.linode - linode_v4: - redirect: community.general.cloud.linode.linode_v4 - listen_ports_facts: - redirect: community.general.system.listen_ports_facts - lldp: - redirect: community.general.net_tools.lldp - locale_gen: - redirect: community.general.system.locale_gen - logentries: - redirect: community.general.monitoring.logentries - logentries_msg: - redirect: community.general.notification.logentries_msg logicmonitor: tombstone: removal_version: 1.0.0 @@ -664,86 +403,14 @@ plugin_routing: removal_version: 1.0.0 warning_text: The logicmonitor_facts module is no longer maintained and the API used has been disabled in 2017. - logstash_plugin: - redirect: community.general.monitoring.logstash_plugin - lvg: - redirect: community.general.system.lvg - lvol: - redirect: community.general.system.lvol - lxc_container: - redirect: community.general.cloud.lxc.lxc_container - lxca_cmms: - redirect: community.general.remote_management.lxca.lxca_cmms - lxca_nodes: - redirect: community.general.remote_management.lxca.lxca_nodes - lxd_container: - redirect: community.general.cloud.lxd.lxd_container - lxd_profile: - redirect: community.general.cloud.lxd.lxd_profile - lxd_project: - redirect: community.general.cloud.lxd.lxd_project - macports: - redirect: community.general.packaging.os.macports - mail: - redirect: community.general.notification.mail - make: - redirect: community.general.system.make - manageiq_alert_profiles: - redirect: community.general.remote_management.manageiq.manageiq_alert_profiles - manageiq_alerts: - redirect: community.general.remote_management.manageiq.manageiq_alerts - manageiq_group: - redirect: community.general.remote_management.manageiq.manageiq_group - manageiq_policies: - redirect: community.general.remote_management.manageiq.manageiq_policies - manageiq_provider: - redirect: community.general.remote_management.manageiq.manageiq_provider - manageiq_tags: - redirect: community.general.remote_management.manageiq.manageiq_tags - manageiq_tenant: - redirect: community.general.remote_management.manageiq.manageiq_tenant - manageiq_user: - redirect: community.general.remote_management.manageiq.manageiq_user - mas: - redirect: community.general.packaging.os.mas - matrix: - redirect: community.general.notification.matrix - mattermost: - redirect: community.general.notification.mattermost - maven_artifact: - redirect: community.general.packaging.language.maven_artifact - memset_dns_reload: - redirect: community.general.cloud.memset.memset_dns_reload memset_memstore_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.memset_memstore_info instead. - memset_memstore_info: - redirect: community.general.cloud.memset.memset_memstore_info memset_server_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.memset_server_info instead. - memset_server_info: - redirect: community.general.cloud.memset.memset_server_info - memset_zone: - redirect: community.general.cloud.memset.memset_zone - memset_zone_domain: - redirect: community.general.cloud.memset.memset_zone_domain - memset_zone_record: - redirect: community.general.cloud.memset.memset_zone_record - mksysb: - redirect: community.general.system.mksysb - modprobe: - redirect: community.general.system.modprobe - monit: - redirect: community.general.monitoring.monit - mqtt: - redirect: community.general.notification.mqtt - mssql_db: - redirect: community.general.database.mssql.mssql_db - mssql_script: - redirect: community.general.database.mssql.mssql_script na_cdot_aggregate: tombstone: removal_version: 2.0.0 @@ -780,22 +447,10 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use netapp.ontap.na_ontap_info instead. - nagios: - redirect: community.general.monitoring.nagios - netcup_dns: - redirect: community.general.net_tools.netcup_dns - newrelic_deployment: - redirect: community.general.monitoring.newrelic_deployment - nexmo: - redirect: community.general.notification.nexmo nginx_status_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.nginx_status_info instead. - nginx_status_info: - redirect: community.general.web_infrastructure.nginx_status_info - nictagadm: - redirect: community.general.cloud.smartos.nictagadm nios_a_record: redirect: infoblox.nios_modules.nios_a_record nios_aaaa_record: @@ -828,157 +483,61 @@ plugin_routing: redirect: infoblox.nios_modules.nios_txt_record nios_zone: redirect: infoblox.nios_modules.nios_zone - nmcli: - redirect: community.general.net_tools.nmcli - nomad_job: - redirect: community.general.clustering.nomad.nomad_job - nomad_job_info: - redirect: community.general.clustering.nomad.nomad_job_info - nosh: - redirect: community.general.system.nosh - notification.cisco_spark: - redirect: community.general.notification.cisco_webex - npm: - redirect: community.general.packaging.language.npm - nsupdate: - redirect: community.general.net_tools.nsupdate oci_vcn: - redirect: community.general.cloud.oracle.oci_vcn - odbc: - redirect: community.general.database.misc.odbc - office_365_connector_card: - redirect: community.general.notification.office_365_connector_card - ohai: - redirect: community.general.system.ohai - omapi_host: - redirect: community.general.net_tools.omapi_host + deprecation: + removal_version: 13.0.0 + warning_text: Use oracle.oci.oci_network_vcn instead. ome_device_info: redirect: dellemc.openmanage.ome_device_info - one_host: - redirect: community.general.cloud.opennebula.one_host - one_image: - redirect: community.general.cloud.opennebula.one_image one_image_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.one_image_info instead. - one_image_info: - redirect: community.general.cloud.opennebula.one_image_info - one_service: - redirect: community.general.cloud.opennebula.one_service - one_template: - redirect: community.general.cloud.opennebula.one_template - one_vm: - redirect: community.general.cloud.opennebula.one_vm - oneandone_firewall_policy: - redirect: community.general.cloud.oneandone.oneandone_firewall_policy - oneandone_load_balancer: - redirect: community.general.cloud.oneandone.oneandone_load_balancer - oneandone_monitoring_policy: - redirect: community.general.cloud.oneandone.oneandone_monitoring_policy - oneandone_private_network: - redirect: community.general.cloud.oneandone.oneandone_private_network - oneandone_public_ip: - redirect: community.general.cloud.oneandone.oneandone_public_ip - oneandone_server: - redirect: community.general.cloud.oneandone.oneandone_server onepassword_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.onepassword_info instead. - onepassword_info: - redirect: community.general.identity.onepassword_info oneview_datacenter_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_datacenter_info instead. - oneview_datacenter_info: - redirect: community.general.remote_management.oneview.oneview_datacenter_info oneview_enclosure_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_enclosure_info instead. - oneview_enclosure_info: - redirect: community.general.remote_management.oneview.oneview_enclosure_info - oneview_ethernet_network: - redirect: community.general.remote_management.oneview.oneview_ethernet_network oneview_ethernet_network_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_ethernet_network_info instead. - oneview_ethernet_network_info: - redirect: community.general.remote_management.oneview.oneview_ethernet_network_info - oneview_fc_network: - redirect: community.general.remote_management.oneview.oneview_fc_network oneview_fc_network_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_fc_network_info instead. - oneview_fc_network_info: - redirect: community.general.remote_management.oneview.oneview_fc_network_info - oneview_fcoe_network: - redirect: community.general.remote_management.oneview.oneview_fcoe_network oneview_fcoe_network_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_fcoe_network_info instead. - oneview_fcoe_network_info: - redirect: community.general.remote_management.oneview.oneview_fcoe_network_info - oneview_logical_interconnect_group: - redirect: community.general.remote_management.oneview.oneview_logical_interconnect_group oneview_logical_interconnect_group_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_logical_interconnect_group_info instead. - oneview_logical_interconnect_group_info: - redirect: community.general.remote_management.oneview.oneview_logical_interconnect_group_info - oneview_network_set: - redirect: community.general.remote_management.oneview.oneview_network_set oneview_network_set_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_network_set_info instead. - oneview_network_set_info: - redirect: community.general.remote_management.oneview.oneview_network_set_info - oneview_san_manager: - redirect: community.general.remote_management.oneview.oneview_san_manager oneview_san_manager_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_san_manager_info instead. - oneview_san_manager_info: - redirect: community.general.remote_management.oneview.oneview_san_manager_info online_server_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.online_server_info instead. - online_server_info: - redirect: community.general.cloud.online.online_server_info online_user_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.online_user_info instead. - online_user_info: - redirect: community.general.cloud.online.online_user_info - open_iscsi: - redirect: community.general.system.open_iscsi - openbsd_pkg: - redirect: community.general.packaging.os.openbsd_pkg - opendj_backendprop: - redirect: community.general.identity.opendj.opendj_backendprop - openwrt_init: - redirect: community.general.system.openwrt_init - opkg: - redirect: community.general.packaging.os.opkg - osx_defaults: - redirect: community.general.system.osx_defaults - ovh_ip_failover: - redirect: community.general.cloud.ovh.ovh_ip_failover - ovh_ip_loadbalancing_backend: - redirect: community.general.cloud.ovh.ovh_ip_loadbalancing_backend - ovh_monthly_billing: - redirect: community.general.cloud.ovh.ovh_monthly_billing ovirt: tombstone: removal_version: 3.0.0 @@ -1079,64 +638,6 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_vmpool_info instead. - pacemaker_cluster: - redirect: community.general.clustering.pacemaker_cluster - packet_device: - redirect: community.general.cloud.packet.packet_device - packet_ip_subnet: - redirect: community.general.cloud.packet.packet_ip_subnet - packet_project: - redirect: community.general.cloud.packet.packet_project - packet_sshkey: - redirect: community.general.cloud.packet.packet_sshkey - packet_volume: - redirect: community.general.cloud.packet.packet_volume - packet_volume_attachment: - redirect: community.general.cloud.packet.packet_volume_attachment - pacman: - redirect: community.general.packaging.os.pacman - pacman_key: - redirect: community.general.packaging.os.pacman_key - pagerduty: - redirect: community.general.monitoring.pagerduty - pagerduty_alert: - redirect: community.general.monitoring.pagerduty_alert - pagerduty_change: - redirect: community.general.monitoring.pagerduty_change - pagerduty_user: - redirect: community.general.monitoring.pagerduty_user - pam_limits: - redirect: community.general.system.pam_limits - pamd: - redirect: community.general.system.pamd - parted: - redirect: community.general.system.parted - pear: - redirect: community.general.packaging.language.pear - pids: - redirect: community.general.system.pids - pingdom: - redirect: community.general.monitoring.pingdom - pip_package_info: - redirect: community.general.packaging.language.pip_package_info - pipx: - redirect: community.general.packaging.language.pipx - pkg5: - redirect: community.general.packaging.os.pkg5 - pkg5_publisher: - redirect: community.general.packaging.os.pkg5_publisher - pkgin: - redirect: community.general.packaging.os.pkgin - pkgng: - redirect: community.general.packaging.os.pkgng - pkgutil: - redirect: community.general.packaging.os.pkgutil - pmem: - redirect: community.general.storage.pmem.pmem - portage: - redirect: community.general.packaging.os.portage - portinstall: - redirect: community.general.packaging.os.portinstall postgresql_copy: redirect: community.postgresql.postgresql_copy postgresql_db: @@ -1181,50 +682,116 @@ plugin_routing: redirect: community.postgresql.postgresql_user postgresql_user_obj_stat_info: redirect: community.postgresql.postgresql_user_obj_stat_info - pritunl_org: - redirect: community.general.net_tools.pritunl.pritunl_org - pritunl_org_info: - redirect: community.general.net_tools.pritunl.pritunl_org_info - pritunl_user: - redirect: community.general.net_tools.pritunl.pritunl_user - pritunl_user_info: - redirect: community.general.net_tools.pritunl.pritunl_user_info profitbricks: - redirect: community.general.cloud.profitbricks.profitbricks + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. profitbricks_datacenter: - redirect: community.general.cloud.profitbricks.profitbricks_datacenter + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. profitbricks_nic: - redirect: community.general.cloud.profitbricks.profitbricks_nic + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. profitbricks_volume: - redirect: community.general.cloud.profitbricks.profitbricks_volume + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. profitbricks_volume_attachments: - redirect: community.general.cloud.profitbricks.profitbricks_volume_attachments + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. proxmox: - redirect: community.general.cloud.misc.proxmox + redirect: community.proxmox.proxmox + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_backup: + redirect: community.proxmox.proxmox_backup + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_backup_info: + redirect: community.proxmox.proxmox_backup_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_disk: + redirect: community.proxmox.proxmox_disk + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. proxmox_domain_info: - redirect: community.general.cloud.misc.proxmox_domain_info + redirect: community.proxmox.proxmox_domain_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. proxmox_group_info: - redirect: community.general.cloud.misc.proxmox_group_info + redirect: community.proxmox.proxmox_group_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. proxmox_kvm: - redirect: community.general.cloud.misc.proxmox_kvm + redirect: community.proxmox.proxmox_kvm + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. proxmox_nic: - redirect: community.general.cloud.misc.proxmox_nic + redirect: community.proxmox.proxmox_nic + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_node_info: + redirect: community.proxmox.proxmox_node_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_pool: + redirect: community.proxmox.proxmox_pool + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_pool_member: + redirect: community.proxmox.proxmox_pool_member + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. proxmox_snap: - redirect: community.general.cloud.misc.proxmox_snap + redirect: community.proxmox.proxmox_snap + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_storage_contents_info: + redirect: community.proxmox.proxmox_storage_contents_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. proxmox_storage_info: - redirect: community.general.cloud.misc.proxmox_storage_info + redirect: community.proxmox.proxmox_storage_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. proxmox_tasks_info: - redirect: community.general.cloud.misc.proxmox_tasks_info + redirect: community.proxmox.proxmox_tasks_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. proxmox_template: - redirect: community.general.cloud.misc.proxmox_template + redirect: community.proxmox.proxmox_template + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. proxmox_user_info: - redirect: community.general.cloud.misc.proxmox_user_info - pubnub_blocks: - redirect: community.general.cloud.pubnub.pubnub_blocks - pulp_repo: - redirect: community.general.packaging.os.pulp_repo - puppet: - redirect: community.general.system.puppet + redirect: community.proxmox.proxmox_user_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_vm_info: + redirect: community.proxmox.proxmox_vm_info + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. purefa_facts: tombstone: removal_version: 3.0.0 @@ -1233,210 +800,178 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use purestorage.flashblade.purefb_info instead. - pushbullet: - redirect: community.general.notification.pushbullet - pushover: - redirect: community.general.notification.pushover python_requirements_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.python_requirements_info instead. - python_requirements_info: - redirect: community.general.system.python_requirements_info - rax: - redirect: community.general.cloud.rackspace.rax - rax_cbs: - redirect: community.general.cloud.rackspace.rax_cbs rax_cbs_attachments: - redirect: community.general.cloud.rackspace.rax_cbs_attachments - rax_cdb: - redirect: community.general.cloud.rackspace.rax_cdb + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_cbs: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_cdb_database: - redirect: community.general.cloud.rackspace.rax_cdb_database + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_cdb_user: - redirect: community.general.cloud.rackspace.rax_cdb_user - rax_clb: - redirect: community.general.cloud.rackspace.rax_clb + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_cdb: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_clb_nodes: - redirect: community.general.cloud.rackspace.rax_clb_nodes + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_clb_ssl: - redirect: community.general.cloud.rackspace.rax_clb_ssl - rax_dns: - redirect: community.general.cloud.rackspace.rax_dns + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_clb: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_dns_record: - redirect: community.general.cloud.rackspace.rax_dns_record + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_dns: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_facts: - redirect: community.general.cloud.rackspace.rax_facts - rax_files: - redirect: community.general.cloud.rackspace.rax_files + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_files_objects: - redirect: community.general.cloud.rackspace.rax_files_objects + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_files: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_identity: - redirect: community.general.cloud.rackspace.rax_identity + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_keypair: - redirect: community.general.cloud.rackspace.rax_keypair + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_meta: - redirect: community.general.cloud.rackspace.rax_meta + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_mon_alarm: - redirect: community.general.cloud.rackspace.rax_mon_alarm + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_mon_check: - redirect: community.general.cloud.rackspace.rax_mon_check + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_mon_entity: - redirect: community.general.cloud.rackspace.rax_mon_entity - rax_mon_notification: - redirect: community.general.cloud.rackspace.rax_mon_notification + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_mon_notification_plan: - redirect: community.general.cloud.rackspace.rax_mon_notification_plan + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. + rax_mon_notification: + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_network: - redirect: community.general.cloud.rackspace.rax_network + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_queue: - redirect: community.general.cloud.rackspace.rax_queue + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_scaling_group: - redirect: community.general.cloud.rackspace.rax_scaling_group + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. rax_scaling_policy: - redirect: community.general.cloud.rackspace.rax_scaling_policy - read_csv: - redirect: community.general.files.read_csv - redfish_command: - redirect: community.general.remote_management.redfish.redfish_command - redfish_config: - redirect: community.general.remote_management.redfish.redfish_config + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on the deprecated package pyrax. redfish_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.redfish_info instead. - redfish_info: - redirect: community.general.remote_management.redfish.redfish_info - redhat_subscription: - redirect: community.general.packaging.os.redhat_subscription - redis: - redirect: community.general.database.misc.redis - redis_data: - redirect: community.general.database.misc.redis_data - redis_data_incr: - redirect: community.general.database.misc.redis_data_incr - redis_data_info: - redirect: community.general.database.misc.redis_data_info - redis_info: - redirect: community.general.database.misc.redis_info - rhevm: - redirect: community.general.cloud.misc.rhevm rhn_channel: - redirect: community.general.packaging.os.rhn_channel + tombstone: + removal_version: 10.0.0 + warning_text: RHN is EOL. rhn_register: - redirect: community.general.packaging.os.rhn_register - rhsm_release: - redirect: community.general.packaging.os.rhsm_release - rhsm_repository: - redirect: community.general.packaging.os.rhsm_repository - riak: - redirect: community.general.database.misc.riak - rocketchat: - redirect: community.general.notification.rocketchat - rollbar_deployment: - redirect: community.general.monitoring.rollbar_deployment - rpm_ostree_pkg: - redirect: community.general.packaging.os.rpm_ostree_pkg - rundeck_acl_policy: - redirect: community.general.web_infrastructure.rundeck_acl_policy - rundeck_job_executions_info: - redirect: community.general.web_infrastructure.rundeck_job_executions_info - rundeck_job_run: - redirect: community.general.web_infrastructure.rundeck_job_run - rundeck_project: - redirect: community.general.web_infrastructure.rundeck_project - runit: - redirect: community.general.system.runit - sap_task_list_execute: - redirect: community.general.system.sap_task_list_execute + tombstone: + removal_version: 10.0.0 + warning_text: RHN is EOL. sapcar_extract: - redirect: community.general.files.sapcar_extract - say: - redirect: community.general.notification.say - scaleway_compute: - redirect: community.general.cloud.scaleway.scaleway_compute - scaleway_database_backup: - redirect: community.general.cloud.scaleway.scaleway_database_backup + redirect: community.sap_libs.sapcar_extract + sap_task_list_execute: + redirect: community.sap_libs.sap_task_list_execute scaleway_image_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_image_info instead. - scaleway_image_info: - redirect: community.general.cloud.scaleway.scaleway_image_info - scaleway_ip: - redirect: community.general.cloud.scaleway.scaleway_ip scaleway_ip_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_ip_info instead. - scaleway_ip_info: - redirect: community.general.cloud.scaleway.scaleway_ip_info - scaleway_lb: - redirect: community.general.cloud.scaleway.scaleway_lb scaleway_organization_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_organization_info instead. - scaleway_organization_info: - redirect: community.general.cloud.scaleway.scaleway_organization_info - scaleway_private_network: - redirect: community.general.cloud.scaleway.scaleway_private_network - scaleway_security_group: - redirect: community.general.cloud.scaleway.scaleway_security_group scaleway_security_group_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_security_group_info instead. - scaleway_security_group_info: - redirect: community.general.cloud.scaleway.scaleway_security_group_info - scaleway_security_group_rule: - redirect: community.general.cloud.scaleway.scaleway_security_group_rule scaleway_server_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_server_info instead. - scaleway_server_info: - redirect: community.general.cloud.scaleway.scaleway_server_info scaleway_snapshot_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_snapshot_info instead. - scaleway_snapshot_info: - redirect: community.general.cloud.scaleway.scaleway_snapshot_info - scaleway_sshkey: - redirect: community.general.cloud.scaleway.scaleway_sshkey - scaleway_user_data: - redirect: community.general.cloud.scaleway.scaleway_user_data - scaleway_volume: - redirect: community.general.cloud.scaleway.scaleway_volume scaleway_volume_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_volume_info instead. - scaleway_volume_info: - redirect: community.general.cloud.scaleway.scaleway_volume_info - sefcontext: - redirect: community.general.system.sefcontext - selinux_permissive: - redirect: community.general.system.selinux_permissive - selogin: - redirect: community.general.system.selogin - sendgrid: - redirect: community.general.notification.sendgrid sensu_check: - redirect: community.general.monitoring.sensu.sensu_check + deprecation: + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. sensu_client: - redirect: community.general.monitoring.sensu.sensu_client + deprecation: + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. sensu_handler: - redirect: community.general.monitoring.sensu.sensu_handler + deprecation: + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. sensu_silence: - redirect: community.general.monitoring.sensu.sensu_silence + deprecation: + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. sensu_subscription: - redirect: community.general.monitoring.sensu.sensu_subscription - seport: - redirect: community.general.system.seport - serverless: - redirect: community.general.cloud.misc.serverless + deprecation: + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. sf_account_manager: tombstone: removal_version: 2.0.0 @@ -1457,200 +992,53 @@ plugin_routing: tombstone: removal_version: 2.0.0 warning_text: Use netapp.elementsw.na_elementsw_volume instead. - shutdown: - redirect: community.general.system.shutdown - sl_vm: - redirect: community.general.cloud.softlayer.sl_vm - slack: - redirect: community.general.notification.slack - slackpkg: - redirect: community.general.packaging.os.slackpkg smartos_image_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.smartos_image_info instead. - smartos_image_info: - redirect: community.general.cloud.smartos.smartos_image_info - snap: - redirect: community.general.packaging.os.snap - snap_alias: - redirect: community.general.packaging.os.snap_alias - snmp_facts: - redirect: community.general.net_tools.snmp_facts - solaris_zone: - redirect: community.general.system.solaris_zone - sorcery: - redirect: community.general.packaging.os.sorcery - spectrum_device: - redirect: community.general.monitoring.spectrum_device - spectrum_model_attrs: - redirect: community.general.monitoring.spectrum_model_attrs - spotinst_aws_elastigroup: - redirect: community.general.cloud.spotinst.spotinst_aws_elastigroup - ss_3par_cpg: - redirect: community.general.storage.hpe3par.ss_3par_cpg - ssh_config: - redirect: community.general.system.ssh_config stackdriver: - redirect: community.general.monitoring.stackdriver - stacki_host: - redirect: community.general.remote_management.stacki.stacki_host - statsd: - redirect: community.general.monitoring.statsd - statusio_maintenance: - redirect: community.general.monitoring.statusio_maintenance - sudoers: - redirect: community.general.system.sudoers - supervisorctl: - redirect: community.general.web_infrastructure.supervisorctl - svc: - redirect: community.general.system.svc - svr4pkg: - redirect: community.general.packaging.os.svr4pkg - swdepot: - redirect: community.general.packaging.os.swdepot - swupd: - redirect: community.general.packaging.os.swupd - syslogger: - redirect: community.general.notification.syslogger - syspatch: - redirect: community.general.system.syspatch - sysrc: - redirect: community.general.system.sysrc - sysupgrade: - redirect: community.general.system.sysupgrade - taiga_issue: - redirect: community.general.web_infrastructure.taiga_issue - telegram: - redirect: community.general.notification.telegram - terraform: - redirect: community.general.cloud.misc.terraform - timezone: - redirect: community.general.system.timezone - twilio: - redirect: community.general.notification.twilio + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on HTTPS APIs that do not exist anymore, + and any new development in the direction of providing an alternative should + happen in the context of the google.cloud collection. typetalk: - redirect: community.general.notification.typetalk - udm_dns_record: - redirect: community.general.cloud.univention.udm_dns_record - udm_dns_zone: - redirect: community.general.cloud.univention.udm_dns_zone - udm_group: - redirect: community.general.cloud.univention.udm_group - udm_share: - redirect: community.general.cloud.univention.udm_share - udm_user: - redirect: community.general.cloud.univention.udm_user - ufw: - redirect: community.general.system.ufw - uptimerobot: - redirect: community.general.monitoring.uptimerobot - urpmi: - redirect: community.general.packaging.os.urpmi - utm_aaa_group: - redirect: community.general.web_infrastructure.sophos_utm.utm_aaa_group - utm_aaa_group_info: - redirect: community.general.web_infrastructure.sophos_utm.utm_aaa_group_info - utm_ca_host_key_cert: - redirect: community.general.web_infrastructure.sophos_utm.utm_ca_host_key_cert - utm_ca_host_key_cert_info: - redirect: community.general.web_infrastructure.sophos_utm.utm_ca_host_key_cert_info - utm_dns_host: - redirect: community.general.web_infrastructure.sophos_utm.utm_dns_host - utm_network_interface_address: - redirect: community.general.web_infrastructure.sophos_utm.utm_network_interface_address - utm_network_interface_address_info: - redirect: community.general.web_infrastructure.sophos_utm.utm_network_interface_address_info - utm_proxy_auth_profile: - redirect: community.general.web_infrastructure.sophos_utm.utm_proxy_auth_profile - utm_proxy_exception: - redirect: community.general.web_infrastructure.sophos_utm.utm_proxy_exception - utm_proxy_frontend: - redirect: community.general.web_infrastructure.sophos_utm.utm_proxy_frontend - utm_proxy_frontend_info: - redirect: community.general.web_infrastructure.sophos_utm.utm_proxy_frontend_info - utm_proxy_location: - redirect: community.general.web_infrastructure.sophos_utm.utm_proxy_location - utm_proxy_location_info: - redirect: community.general.web_infrastructure.sophos_utm.utm_proxy_location_info - vdo: - redirect: community.general.system.vdo - vertica_configuration: - redirect: community.general.database.vertica.vertica_configuration + deprecation: + removal_version: 13.0.0 + warning_text: The typetalk service will be discontinued on Dec 2025. vertica_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.vertica_info instead. - vertica_info: - redirect: community.general.database.vertica.vertica_info - vertica_role: - redirect: community.general.database.vertica.vertica_role - vertica_schema: - redirect: community.general.database.vertica.vertica_schema - vertica_user: - redirect: community.general.database.vertica.vertica_user - vexata_eg: - redirect: community.general.storage.vexata.vexata_eg - vexata_volume: - redirect: community.general.storage.vexata.vexata_volume - vmadm: - redirect: community.general.cloud.smartos.vmadm - wakeonlan: - redirect: community.general.remote_management.wakeonlan webfaction_app: - redirect: community.general.cloud.webfaction.webfaction_app + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. webfaction_db: - redirect: community.general.cloud.webfaction.webfaction_db + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. webfaction_domain: - redirect: community.general.cloud.webfaction.webfaction_domain + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. webfaction_mailbox: - redirect: community.general.cloud.webfaction.webfaction_mailbox + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. webfaction_site: - redirect: community.general.cloud.webfaction.webfaction_site - xattr: - redirect: community.general.files.xattr - xbps: - redirect: community.general.packaging.os.xbps - xcc_redfish_command: - redirect: community.general.remote_management.lenovoxcc.xcc_redfish_command - xenserver_facts: - redirect: community.general.cloud.misc.xenserver_facts - xenserver_guest: - redirect: community.general.cloud.xenserver.xenserver_guest + tombstone: + removal_version: 9.0.0 + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. xenserver_guest_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.xenserver_guest_info instead. - xenserver_guest_info: - redirect: community.general.cloud.xenserver.xenserver_guest_info - xenserver_guest_powerstate: - redirect: community.general.cloud.xenserver.xenserver_guest_powerstate - xfconf: - redirect: community.general.system.xfconf - xfconf_info: - redirect: community.general.system.xfconf_info - xfs_quota: - redirect: community.general.system.xfs_quota - xml: - redirect: community.general.files.xml - yarn: - redirect: community.general.packaging.language.yarn - yum_versionlock: - redirect: community.general.packaging.os.yum_versionlock - zfs: - redirect: community.general.storage.zfs.zfs - zfs_delegate_admin: - redirect: community.general.storage.zfs.zfs_delegate_admin - zfs_facts: - redirect: community.general.storage.zfs.zfs_facts - znode: - redirect: community.general.clustering.znode - zpool_facts: - redirect: community.general.storage.zfs.zpool_facts - zypper: - redirect: community.general.packaging.os.zypper - zypper_repository: - redirect: community.general.packaging.os.zypper_repository doc_fragments: _gcp: redirect: community.google._gcp @@ -1664,8 +1052,46 @@ plugin_routing: redirect: community.kubevirt.kubevirt_vm_options nios: redirect: infoblox.nios_modules.nios + oracle: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. + oracle_creatable_resource: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. + oracle_display_name_option: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. + oracle_name_option: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. + oracle_tags: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. + oracle_wait_options: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. postgresql: redirect: community.postgresql.postgresql + proxmox: + redirect: community.proxmox.proxmox + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + purestorage: + tombstone: + removal_version: 12.0.0 + warning_text: The modules for purestorage were removed in community.general 3.0.0, this document fragment was left behind. + rackspace: + tombstone: + removal_version: 9.0.0 + warning_text: This doc fragment was used by rax modules, that relied on the deprecated + package pyrax. module_utils: docker.common: redirect: community.docker.common @@ -1683,37 +1109,45 @@ plugin_routing: redirect: community.kubevirt.kubevirt net_tools.nios.api: redirect: infoblox.nios_modules.api + oci_utils: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. postgresql: redirect: community.postgresql.postgresql + proxmox: + redirect: community.proxmox.proxmox + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + pure: + tombstone: + removal_version: 12.0.0 + warning_text: The modules for purestorage were removed in community.general 3.0.0, this module util was left behind. + rax: + tombstone: + removal_version: 9.0.0 + warning_text: This module util relied on the deprecated package pyrax. remote_management.dellemc.dellemc_idrac: redirect: dellemc.openmanage.dellemc_idrac remote_management.dellemc.ome: redirect: dellemc.openmanage.ome - callback: - actionable: - tombstone: - removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_skipped_hosts - = no' and 'display_ok_hosts = no' options. - full_skip: - tombstone: - removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_skipped_hosts - = no' option. - osx_say: - redirect: community.general.say - stderr: - tombstone: - removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_failed_stderr - = yes' option. inventory: docker_machine: redirect: community.docker.docker_machine docker_swarm: redirect: community.docker.docker_swarm + proxmox: + redirect: community.proxmox.proxmox + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. kubevirt: redirect: community.kubevirt.kubevirt + stackpath_compute: + tombstone: + removal_version: 11.0.0 + warning_text: The company and the service were sunset in June 2024. filter: path_join: # The ansible.builtin.path_join filter has been added in ansible-base 2.10. @@ -1724,8 +1158,3 @@ plugin_routing: # for Ansible 2.9 or earlier. Now we only will have the redirect until we # eventually will deprecate and then remove it. redirect: ansible.builtin.path_join - action: - iptables_state: - redirect: community.general.system.iptables_state - shutdown: - redirect: community.general.system.shutdown diff --git a/noxfile.py b/noxfile.py new file mode 100644 index 0000000000..9b2f92a9e1 --- /dev/null +++ b/noxfile.py @@ -0,0 +1,38 @@ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# SPDX-FileCopyrightText: 2025 Felix Fontein + +# /// script +# dependencies = ["nox>=2025.02.09", "antsibull-nox"] +# /// + +import sys + +import nox + + +try: + import antsibull_nox +except ImportError: + print("You need to install antsibull-nox in the same Python environment as nox.") + sys.exit(1) + + +antsibull_nox.load_antsibull_nox_toml() + + +@nox.session(name="aliases", python=False, default=True) +def aliases(session: nox.Session) -> None: + session.run("python", "tests/sanity/extra/aliases.py") + + +@nox.session(name="botmeta", default=True) +def botmeta(session: nox.Session) -> None: + session.install("PyYAML", "voluptuous") + session.run("python", "tests/sanity/extra/botmeta.py") + + +# Allow to run the noxfile with `python noxfile.py`, `pipx run noxfile.py`, or similar. +# Requires nox >= 2025.02.09 +if __name__ == "__main__": + nox.main() diff --git a/plugins/action/system/iptables_state.py b/plugins/action/iptables_state.py similarity index 72% rename from plugins/action/system/iptables_state.py rename to plugins/action/iptables_state.py index b8ae1a5dea..dd6724476f 100644 --- a/plugins/action/system/iptables_state.py +++ b/plugins/action/iptables_state.py @@ -1,9 +1,8 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, quidame -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, quidame +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import time @@ -21,29 +20,37 @@ class ActionModule(ActionBase): _VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait')) DEFAULT_SUDOABLE = True - MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO = ( - "This module doesn't support async>0 and poll>0 when its 'state' param " - "is set to 'restored'. To enable its rollback feature (that needs the " - "module to run asynchronously on the remote), please set task attribute " - "'poll' (=%s) to 0, and 'async' (=%s) to a value >2 and not greater than " - "'ansible_timeout' (=%s) (recommended).") - MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK = ( - "Attempts to restore iptables state without rollback in case of mistake " - "may lead the ansible controller to loose access to the hosts and never " - "regain it before fixing firewall rules through a serial console, or any " - "other way except SSH. Please set task attribute 'poll' (=%s) to 0, and " - "'async' (=%s) to a value >2 and not greater than 'ansible_timeout' (=%s) " - "(recommended).") - MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT = ( - "You attempt to restore iptables state with rollback in case of mistake, " - "but with settings that will lead this rollback to happen AFTER that the " - "controller will reach its own timeout. Please set task attribute 'poll' " - "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than " - "'ansible_timeout' (=%s) (recommended).") + @staticmethod + def msg_error__async_and_poll_not_zero(task_poll, task_async, max_timeout): + return ( + "This module doesn't support async>0 and poll>0 when its 'state' param " + "is set to 'restored'. To enable its rollback feature (that needs the " + "module to run asynchronously on the remote), please set task attribute " + f"'poll' (={task_poll}) to 0, and 'async' (={task_async}) to a value >2 and not greater than " + f"'ansible_timeout' (={max_timeout}) (recommended).") + + @staticmethod + def msg_warning__no_async_is_no_rollback(task_poll, task_async, max_timeout): + return ( + "Attempts to restore iptables state without rollback in case of mistake " + "may lead the ansible controller to loose access to the hosts and never " + "regain it before fixing firewall rules through a serial console, or any " + f"other way except SSH. Please set task attribute 'poll' (={task_poll}) to 0, and " + f"'async' (={task_async}) to a value >2 and not greater than 'ansible_timeout' (={max_timeout}) " + "(recommended).") + + @staticmethod + def msg_warning__async_greater_than_timeout(task_poll, task_async, max_timeout): + return ( + "You attempt to restore iptables state with rollback in case of mistake, " + "but with settings that will lead this rollback to happen AFTER that the " + "controller will reach its own timeout. Please set task attribute 'poll' " + f"(={task_poll}) to 0, and 'async' (={task_async}) to a value >2 and not greater than " + f"'ansible_timeout' (={max_timeout}) (recommended).") def _async_result(self, async_status_args, task_vars, timeout): ''' - Retrieve results of the asynchonous task, and display them in place of + Retrieve results of the asynchronous task, and display them in place of the async wrapper results (those with the ansible_job_id key). ''' async_status = self._task.copy() @@ -87,21 +94,25 @@ class ActionModule(ActionBase): max_timeout = self._connection._play_context.timeout module_args = self._task.args + async_status_args = {} + starter_cmd = None + confirm_cmd = None + if module_args.get('state', None) == 'restored': if not wrap_async: if not check_mode: - display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % ( + display.warning(self.msg_error__async_and_poll_not_zero( task_poll, task_async, max_timeout)) elif task_poll: - raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % ( + raise AnsibleActionFail(self.msg_warning__no_async_is_no_rollback( task_poll, task_async, max_timeout)) else: if task_async > max_timeout and not check_mode: - display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % ( + display.warning(self.msg_warning__async_greater_than_timeout( task_poll, task_async, max_timeout)) @@ -114,10 +125,10 @@ class ActionModule(ActionBase): # remote and local sides (if not the same, make the loop # longer on the controller); and set a backup file path. module_args['_timeout'] = task_async - module_args['_back'] = '%s/iptables.state' % async_dir + module_args['_back'] = f'{async_dir}/iptables.state' async_status_args = dict(mode='status') - confirm_cmd = 'rm -f %s' % module_args['_back'] - starter_cmd = 'touch %s.starter' % module_args['_back'] + confirm_cmd = f"rm -f {module_args['_back']}" + starter_cmd = f"touch {module_args['_back']}.starter" remaining_time = max(task_async, max_timeout) # do work! diff --git a/plugins/action/system/shutdown.py b/plugins/action/shutdown.py similarity index 60% rename from plugins/action/system/shutdown.py rename to plugins/action/shutdown.py index 19813b0847..d2a9d3c2b7 100644 --- a/plugins/action/system/shutdown.py +++ b/plugins/action/shutdown.py @@ -1,11 +1,11 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Amin Vakil -# Copyright: (c) 2016-2018, Matt Davis -# Copyright: (c) 2018, Sam Doran -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, Amin Vakil +# Copyright (c) 2016-2018, Matt Davis +# Copyright (c) 2018, Sam Doran +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type from ansible.errors import AnsibleError, AnsibleConnectionFailure from ansible.module_utils.common.text.converters import to_native, to_text @@ -16,6 +16,10 @@ from ansible.utils.display import Display display = Display() +def fmt(mapping, key): + return to_native(mapping[key]).strip() + + class TimedOutException(Exception): pass @@ -43,7 +47,7 @@ class ActionModule(ActionBase): SHUTDOWN_COMMAND_ARGS = { 'alpine': '', 'void': '-h +{delay_min} "{message}"', - 'freebsd': '-h +{delay_sec}s "{message}"', + 'freebsd': '-p +{delay_sec}s "{message}"', 'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS, 'macosx': '-h +{delay_min} "{message}"', 'openbsd': '-h +{delay_min} "{message}"', @@ -79,35 +83,41 @@ class ActionModule(ActionBase): getattr(self, default_value)))) return value - def get_shutdown_command_args(self, distribution): - args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS') - # Convert seconds to minutes. If less that 60, set it to 0. - delay_sec = self.delay - shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE) - return args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message) - def get_distribution(self, task_vars): # FIXME: only execute the module if we don't already have the facts we need distribution = {} - display.debug('{action}: running setup module to get distribution'.format(action=self._task.action)) + display.debug(f'{self._task.action}: running setup module to get distribution') module_output = self._execute_module( task_vars=task_vars, module_name='ansible.legacy.setup', module_args={'gather_subset': 'min'}) try: if module_output.get('failed', False): - raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format( - to_native(module_output['module_stdout']).strip(), - to_native(module_output['module_stderr']).strip())) + raise AnsibleError(f"Failed to determine system distribution. {fmt(module_output, 'module_stdout')}, {fmt(module_output, 'module_stderr')}") distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower() - distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0]) + distribution['version'] = to_text( + module_output['ansible_facts']['ansible_distribution_version'].split('.')[0]) distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower()) - display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution)) + display.debug(f"{self._task.action}: distribution: {distribution}") return distribution except KeyError as ke: - raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0])) + raise AnsibleError(f'Failed to get distribution information. Missing "{ke.args[0]}" in output.') def get_shutdown_command(self, task_vars, distribution): + def find_command(command, find_search_paths): + display.debug(f'{self._task.action}: running find module looking in {find_search_paths} to get path for "{command}"') + find_result = self._execute_module( + task_vars=task_vars, + # prevent collection search by calling with ansible.legacy (still allows library/ override of find) + module_name='ansible.legacy.find', + module_args={ + 'paths': find_search_paths, + 'patterns': [command], + 'file_type': 'any' + } + ) + return [x['path'] for x in find_result['files']] + shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND') default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin'] search_paths = self._task.args.get('search_paths', default_search_paths) @@ -117,62 +127,61 @@ class ActionModule(ActionBase): if is_string(search_paths): search_paths = [search_paths] - # Error if we didn't get a list - err_msg = "'search_paths' must be a string or flat list of strings, got {0}" try: incorrect_type = any(not is_string(x) for x in search_paths) if not isinstance(search_paths, list) or incorrect_type: raise TypeError except TypeError: - raise AnsibleError(err_msg.format(search_paths)) + # Error if we didn't get a list + err_msg = f"'search_paths' must be a string or flat list of strings, got {search_paths}" + raise AnsibleError(err_msg) - display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format( - action=self._task.action, - command=shutdown_bin, - paths=search_paths)) - find_result = self._execute_module( - task_vars=task_vars, - # prevent collection search by calling with ansible.legacy (still allows library/ override of find) - module_name='ansible.legacy.find', - module_args={ - 'paths': search_paths, - 'patterns': [shutdown_bin], - 'file_type': 'any' - } - ) + full_path = find_command(shutdown_bin, search_paths) # find the path to the shutdown command + if not full_path: # if we could not find the shutdown command - full_path = [x['path'] for x in find_result['files']] - if not full_path: - raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths)) - self._shutdown_command = full_path[0] - return self._shutdown_command + # tell the user we will try with systemd + display.vvv(f'Unable to find command "{shutdown_bin}" in search paths: {search_paths}, will attempt a shutdown using systemd directly.') + systemctl_search_paths = ['/bin', '/usr/bin'] + full_path = find_command('systemctl', systemctl_search_paths) # find the path to the systemctl command + if not full_path: # if we couldn't find systemctl + raise AnsibleError( + f'Could not find command "{shutdown_bin}" in search paths: {search_paths} or systemctl' + f' command in search paths: {systemctl_search_paths}, unable to shutdown.') # we give up here + else: + return f"{full_path[0]} poweroff" # done, since we cannot use args with systemd shutdown + + # systemd case taken care of, here we add args to the command + args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS') + # Convert seconds to minutes. If less that 60, set it to 0. + delay_sec = self.delay + shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE) + + af = args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message) + return f'{full_path[0]} {af}' def perform_shutdown(self, task_vars, distribution): result = {} shutdown_result = {} - shutdown_command = self.get_shutdown_command(task_vars, distribution) - shutdown_command_args = self.get_shutdown_command_args(distribution) - shutdown_command_exec = '{0} {1}'.format(shutdown_command, shutdown_command_args) + shutdown_command_exec = self.get_shutdown_command(task_vars, distribution) self.cleanup(force=True) try: - display.vvv("{action}: shutting down server...".format(action=self._task.action)) - display.debug("{action}: shutting down server with command '{command}'".format(action=self._task.action, command=shutdown_command_exec)) + display.vvv(f"{self._task.action}: shutting down server...") + display.debug(f"{self._task.action}: shutting down server with command '{shutdown_command_exec}'") if self._play_context.check_mode: shutdown_result['rc'] = 0 else: shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE) except AnsibleConnectionFailure as e: # If the connection is closed too quickly due to the system being shutdown, carry on - display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e))) + display.debug( + f'{self._task.action}: AnsibleConnectionFailure caught and handled: {e}') shutdown_result['rc'] = 0 if shutdown_result['rc'] != 0: result['failed'] = True result['shutdown'] = False - result['msg'] = "Shutdown command failed. Error was {stdout}, {stderr}".format( - stdout=to_native(shutdown_result['stdout'].strip()), - stderr=to_native(shutdown_result['stderr'].strip())) + result['msg'] = f"Shutdown command failed. Error was {fmt(shutdown_result, 'stdout')}, {fmt(shutdown_result, 'stderr')}" return result result['failed'] = False @@ -185,7 +194,7 @@ class ActionModule(ActionBase): # If running with local connection, fail so we don't shutdown ourself if self._connection.transport == 'local' and (not self._play_context.check_mode): - msg = 'Running {0} with local connection would shutdown the control node.'.format(self._task.action) + msg = f'Running {self._task.action} with local connection would shutdown the control node.' return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg} if task_vars is None: diff --git a/plugins/become/doas.py b/plugins/become/doas.py index 7cf4a79c7b..84efe31ac4 100644 --- a/plugins/become/doas.py +++ b/plugins/become/doas.py @@ -1,83 +1,91 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: doas - short_description: Do As user +DOCUMENTATION = r""" +name: doas +short_description: Do As user +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(doas) utility. +author: Ansible Core Team +options: + become_user: + description: User you 'become' to execute the task. + type: string + ini: + - section: privilege_escalation + key: become_user + - section: doas_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_doas_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_DOAS_USER + become_exe: + description: C(doas) executable. + type: string + default: doas + ini: + - section: privilege_escalation + key: become_exe + - section: doas_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_doas_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_DOAS_EXE + become_flags: + description: Options to pass to C(doas). + type: string + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: doas_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_doas_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_DOAS_FLAGS + become_pass: + description: Password for C(doas) prompt. + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_doas_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_DOAS_PASS + ini: + - section: doas_become_plugin + key: password + prompt_l10n: description: - - This become plugins allows your remote/login user to execute commands as another user via the doas utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - ini: - - section: privilege_escalation - key: become_user - - section: doas_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_doas_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_DOAS_USER - become_exe: - description: Doas executable - default: doas - ini: - - section: privilege_escalation - key: become_exe - - section: doas_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_doas_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_DOAS_EXE - become_flags: - description: Options to pass to doas - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: doas_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_doas_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_DOAS_FLAGS - become_pass: - description: password for doas prompt - required: False - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_doas_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_DOAS_PASS - ini: - - section: doas_become_plugin - key: password - prompt_l10n: - description: - - List of localized strings to match for prompt detection - - If empty we'll use the built in one - default: [] - ini: - - section: doas_become_plugin - key: localized_prompts - vars: - - name: ansible_doas_prompt_l10n - env: - - name: ANSIBLE_DOAS_PROMPT_L10N -''' + - List of localized strings to match for prompt detection. + - If empty the plugin uses the built-in one. + type: list + elements: string + default: [] + ini: + - section: doas_become_plugin + key: localized_prompts + vars: + - name: ansible_doas_prompt_l10n + env: + - name: ANSIBLE_DOAS_PROMPT_L10N +notes: + - This become plugin does not work when connection pipelining is enabled. With ansible-core 2.19+, using it automatically + disables pipelining. On ansible-core 2.18 and before, pipelining must explicitly be disabled by the user. +""" import re @@ -93,6 +101,10 @@ class BecomeModule(BecomeBase): fail = ('Permission denied',) missing = ('Authorization required',) + # See https://github.com/ansible-collections/community.general/issues/9977, + # https://github.com/ansible/ansible/pull/78111 + pipelining = False + def check_password_prompt(self, b_output): ''' checks if the expected password prompt exists in b_output ''' @@ -118,9 +130,9 @@ class BecomeModule(BecomeBase): flags += ' -n' become_user = self.get_option('become_user') - user = '-u %s' % (become_user) if become_user else '' + user = f'-u {become_user}' if become_user else '' success_cmd = self._build_success_command(cmd, shell, noexe=True) executable = getattr(shell, 'executable', shell.SHELL_FAMILY) - return '%s %s %s %s -c %s' % (become_exe, flags, user, executable, success_cmd) + return f'{become_exe} {flags} {user} {executable} -c {success_cmd}' diff --git a/plugins/become/dzdo.py b/plugins/become/dzdo.py index 1aef8edb69..dad05eb34e 100644 --- a/plugins/become/dzdo.py +++ b/plugins/become/dzdo.py @@ -1,70 +1,74 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: dzdo - short_description: Centrify's Direct Authorize - description: - - This become plugins allows your remote/login user to execute commands as another user via the dzdo utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - ini: - - section: privilege_escalation - key: become_user - - section: dzdo_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_dzdo_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_DZDO_USER - become_exe: - description: Dzdo executable - default: dzdo - ini: - - section: privilege_escalation - key: become_exe - - section: dzdo_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_dzdo_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_DZDO_EXE - become_flags: - description: Options to pass to dzdo - default: -H -S -n - ini: - - section: privilege_escalation - key: become_flags - - section: dzdo_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_dzdo_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_DZDO_FLAGS - become_pass: - description: Options to pass to dzdo - required: False - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_dzdo_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_DZDO_PASS - ini: - - section: dzdo_become_plugin - key: password -''' +DOCUMENTATION = r""" +name: dzdo +short_description: Centrify's Direct Authorize +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(dzdo) utility. +author: Ansible Core Team +options: + become_user: + description: User you 'become' to execute the task. + type: string + ini: + - section: privilege_escalation + key: become_user + - section: dzdo_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_dzdo_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_DZDO_USER + become_exe: + description: C(dzdo) executable. + type: string + default: dzdo + ini: + - section: privilege_escalation + key: become_exe + - section: dzdo_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_dzdo_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_DZDO_EXE + become_flags: + description: Options to pass to C(dzdo). + type: string + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: dzdo_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_dzdo_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_DZDO_FLAGS + become_pass: + description: Options to pass to C(dzdo). + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_dzdo_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_DZDO_PASS + ini: + - section: dzdo_become_plugin + key: password +""" from ansible.plugins.become import BecomeBase @@ -86,10 +90,10 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') if self.get_option('become_pass'): - self.prompt = '[dzdo via ansible, key=%s] password:' % self._id - flags = '%s -p "%s"' % (flags.replace('-n', ''), self.prompt) + self.prompt = f'[dzdo via ansible, key={self._id}] password:' + flags = f"{flags.replace('-n', '')} -p \"{self.prompt}\"" become_user = self.get_option('become_user') - user = '-u %s' % (become_user) if become_user else '' + user = f'-u {become_user}' if become_user else '' - return ' '.join([becomecmd, flags, user, self._build_success_command(cmd, shell)]) + return f"{becomecmd} {flags} {user} {self._build_success_command(cmd, shell)}" diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py index 1ee47b0fa3..0ffba62385 100644 --- a/plugins/become/ksu.py +++ b/plugins/become/ksu.py @@ -1,84 +1,89 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: ksu - short_description: Kerberos substitute user +DOCUMENTATION = r""" +name: ksu +short_description: Kerberos substitute user +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(ksu) utility. +author: Ansible Core Team +options: + become_user: + description: User you 'become' to execute the task. + type: string + ini: + - section: privilege_escalation + key: become_user + - section: ksu_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_ksu_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_KSU_USER + required: true + become_exe: + description: C(ksu) executable. + type: string + default: ksu + ini: + - section: privilege_escalation + key: become_exe + - section: ksu_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_ksu_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_KSU_EXE + become_flags: + description: Options to pass to C(ksu). + type: string + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: ksu_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_ksu_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_KSU_FLAGS + become_pass: + description: C(ksu) password. + type: string + required: false + vars: + - name: ansible_ksu_pass + - name: ansible_become_pass + - name: ansible_become_password + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_KSU_PASS + ini: + - section: ksu_become_plugin + key: password + prompt_l10n: description: - - This become plugins allows your remote/login user to execute commands as another user via the ksu utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - ini: - - section: privilege_escalation - key: become_user - - section: ksu_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_ksu_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_KSU_USER - required: True - become_exe: - description: Su executable - default: ksu - ini: - - section: privilege_escalation - key: become_exe - - section: ksu_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_ksu_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_KSU_EXE - become_flags: - description: Options to pass to ksu - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: ksu_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_ksu_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_KSU_FLAGS - become_pass: - description: ksu password - required: False - vars: - - name: ansible_ksu_pass - - name: ansible_become_pass - - name: ansible_become_password - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_KSU_PASS - ini: - - section: ksu_become_plugin - key: password - prompt_l10n: - description: - - List of localized strings to match for prompt detection - - If empty we'll use the built in one - default: [] - ini: - - section: ksu_become_plugin - key: localized_prompts - vars: - - name: ansible_ksu_prompt_l10n - env: - - name: ANSIBLE_KSU_PROMPT_L10N -''' + - List of localized strings to match for prompt detection. + - If empty the plugin uses the built-in one. + type: list + elements: string + default: [] + ini: + - section: ksu_become_plugin + key: localized_prompts + vars: + - name: ansible_ksu_prompt_l10n + env: + - name: ANSIBLE_KSU_PROMPT_L10N +""" import re @@ -117,4 +122,4 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') user = self.get_option('become_user') - return '%s %s %s -e %s ' % (exe, user, flags, self._build_success_command(cmd, shell)) + return f'{exe} {user} {flags} -e {self._build_success_command(cmd, shell)} ' diff --git a/plugins/become/machinectl.py b/plugins/become/machinectl.py index aebb0891b0..685f39f5d8 100644 --- a/plugins/become/machinectl.py +++ b/plugins/become/machinectl.py @@ -1,80 +1,121 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: machinectl - short_description: Systemd's machinectl privilege escalation - description: - - This become plugins allows your remote/login user to execute commands as another user via the machinectl utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - default: '' - ini: - - section: privilege_escalation - key: become_user - - section: machinectl_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_machinectl_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_MACHINECTL_USER - become_exe: - description: Machinectl executable - default: machinectl - ini: - - section: privilege_escalation - key: become_exe - - section: machinectl_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_machinectl_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_MACHINECTL_EXE - become_flags: - description: Options to pass to machinectl - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: machinectl_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_machinectl_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_MACHINECTL_FLAGS - become_pass: - description: Password for machinectl - required: False - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_machinectl_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_MACHINECTL_PASS - ini: - - section: machinectl_become_plugin - key: password -''' +DOCUMENTATION = r""" +name: machinectl +short_description: Systemd's machinectl privilege escalation +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(machinectl) utility. +author: Ansible Core Team +options: + become_user: + description: User you 'become' to execute the task. + type: string + default: '' + ini: + - section: privilege_escalation + key: become_user + - section: machinectl_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_machinectl_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_MACHINECTL_USER + become_exe: + description: C(machinectl) executable. + type: string + default: machinectl + ini: + - section: privilege_escalation + key: become_exe + - section: machinectl_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_machinectl_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_MACHINECTL_EXE + become_flags: + description: Options to pass to C(machinectl). + type: string + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: machinectl_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_machinectl_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_MACHINECTL_FLAGS + become_pass: + description: Password for C(machinectl). + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_machinectl_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_MACHINECTL_PASS + ini: + - section: machinectl_become_plugin + key: password +notes: + - When not using this plugin with user V(root), it only works correctly with a polkit rule which alters the behaviour + of C(machinectl). This rule must alter the prompt behaviour to ask directly for the user credentials, if the user is allowed + to perform the action (take a look at the examples section). If such a rule is not present the plugin only works if it + is used in context with the root user, because then no further prompt is shown by C(machinectl). + - This become plugin does not work when connection pipelining is enabled. With ansible-core 2.19+, using it automatically + disables pipelining. On ansible-core 2.18 and before, pipelining must explicitly be disabled by the user. +""" + +EXAMPLES = r""" +# A polkit rule needed to use the module with a non-root user. +# See the Notes section for details. +/etc/polkit-1/rules.d/60-machinectl-fast-user-auth.rules: |- + polkit.addRule(function(action, subject) { + if(action.id == "org.freedesktop.machine1.host-shell" && + subject.isInGroup("wheel")) { + return polkit.Result.AUTH_SELF_KEEP; + } + }); +""" + +from re import compile as re_compile from ansible.plugins.become import BecomeBase +from ansible.module_utils.common.text.converters import to_bytes + + +ansi_color_codes = re_compile(to_bytes(r'\x1B\[[0-9;]+m')) class BecomeModule(BecomeBase): name = 'community.general.machinectl' + prompt = 'Password: ' + fail = ('==== AUTHENTICATION FAILED ====',) + success = ('==== AUTHENTICATION COMPLETE ====',) + require_tty = True # see https://github.com/ansible-collections/community.general/issues/6932 + + # See https://github.com/ansible/ansible/issues/81254, + # https://github.com/ansible/ansible/pull/78111 + pipelining = False + + @staticmethod + def remove_ansi_codes(line): + return ansi_color_codes.sub(b"", line) + def build_become_command(self, cmd, shell): super(BecomeModule, self).build_become_command(cmd, shell) @@ -85,4 +126,16 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') user = self.get_option('become_user') - return '%s -q shell %s %s@ %s' % (become, flags, user, cmd) + return f'{become} -q shell {flags} {user}@ {self._build_success_command(cmd, shell)}' + + def check_success(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_success(b_output) + + def check_incorrect_password(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_incorrect_password(b_output) + + def check_missing_password(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_missing_password(b_output) diff --git a/plugins/become/pbrun.py b/plugins/become/pbrun.py index fe28e61c2b..c9eb975427 100644 --- a/plugins/become/pbrun.py +++ b/plugins/become/pbrun.py @@ -1,83 +1,86 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: pbrun - short_description: PowerBroker run - description: - - This become plugins allows your remote/login user to execute commands as another user via the pbrun utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - default: '' - ini: - - section: privilege_escalation - key: become_user - - section: pbrun_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_pbrun_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_PBRUN_USER - become_exe: - description: Sudo executable - default: pbrun - ini: - - section: privilege_escalation - key: become_exe - - section: pbrun_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_pbrun_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_PBRUN_EXE - become_flags: - description: Options to pass to pbrun - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: pbrun_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_pbrun_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_PBRUN_FLAGS - become_pass: - description: Password for pbrun - required: False - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_pbrun_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_PBRUN_PASS - ini: - - section: pbrun_become_plugin - key: password - wrap_exe: - description: Toggle to wrap the command pbrun calls in 'shell -c' or not - default: False - type: bool - ini: - - section: pbrun_become_plugin - key: wrap_execution - vars: - - name: ansible_pbrun_wrap_execution - env: - - name: ANSIBLE_PBRUN_WRAP_EXECUTION -''' +DOCUMENTATION = r""" +name: pbrun +short_description: PowerBroker run +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(pbrun) utility. +author: Ansible Core Team +options: + become_user: + description: User you 'become' to execute the task. + type: string + default: '' + ini: + - section: privilege_escalation + key: become_user + - section: pbrun_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_pbrun_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_PBRUN_USER + become_exe: + description: C(pbrun) executable. + type: string + default: pbrun + ini: + - section: privilege_escalation + key: become_exe + - section: pbrun_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_pbrun_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_PBRUN_EXE + become_flags: + description: Options to pass to C(pbrun). + type: string + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: pbrun_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_pbrun_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_PBRUN_FLAGS + become_pass: + description: Password for C(pbrun). + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_pbrun_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_PBRUN_PASS + ini: + - section: pbrun_become_plugin + key: password + wrap_exe: + description: Toggle to wrap the command C(pbrun) calls in C(shell -c) or not. + default: false + type: bool + ini: + - section: pbrun_become_plugin + key: wrap_execution + vars: + - name: ansible_pbrun_wrap_execution + env: + - name: ANSIBLE_PBRUN_WRAP_EXECUTION +""" from ansible.plugins.become import BecomeBase @@ -98,7 +101,7 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') become_user = self.get_option('become_user') - user = '-u %s' % (become_user) if become_user else '' + user = f'-u {become_user}' if become_user else '' noexe = not self.get_option('wrap_exe') - return ' '.join([become_exe, flags, user, self._build_success_command(cmd, shell, noexe=noexe)]) + return f"{become_exe} {flags} {user} {self._build_success_command(cmd, shell, noexe=noexe)}" diff --git a/plugins/become/pfexec.py b/plugins/become/pfexec.py index 2b37044c93..2e7df0f6c0 100644 --- a/plugins/become/pfexec.py +++ b/plugins/become/pfexec.py @@ -1,88 +1,91 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: pfexec - short_description: profile based execution +DOCUMENTATION = r""" +name: pfexec +short_description: Profile based execution +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(pfexec) utility. +author: Ansible Core Team +options: + become_user: description: - - This become plugins allows your remote/login user to execute commands as another user via the pfexec utility. - author: Ansible Core Team - options: - become_user: - description: - - User you 'become' to execute the task - - This plugin ignores this setting as pfexec uses it's own C(exec_attr) to figure this out, - but it is supplied here for Ansible to make decisions needed for the task execution, like file permissions. - default: root - ini: - - section: privilege_escalation - key: become_user - - section: pfexec_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_pfexec_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_PFEXEC_USER - become_exe: - description: Sudo executable - default: pfexec - ini: - - section: privilege_escalation - key: become_exe - - section: pfexec_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_pfexec_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_PFEXEC_EXE - become_flags: - description: Options to pass to pfexec - default: -H -S -n - ini: - - section: privilege_escalation - key: become_flags - - section: pfexec_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_pfexec_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_PFEXEC_FLAGS - become_pass: - description: pfexec password - required: False - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_pfexec_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_PFEXEC_PASS - ini: - - section: pfexec_become_plugin - key: password - wrap_exe: - description: Toggle to wrap the command pfexec calls in 'shell -c' or not - default: False - type: bool - ini: - - section: pfexec_become_plugin - key: wrap_execution - vars: - - name: ansible_pfexec_wrap_execution - env: - - name: ANSIBLE_PFEXEC_WRAP_EXECUTION - notes: - - This plugin ignores I(become_user) as pfexec uses it's own C(exec_attr) to figure this out. -''' + - User you 'become' to execute the task. + - This plugin ignores this setting as pfexec uses its own C(exec_attr) to figure this out, but it is supplied here for + Ansible to make decisions needed for the task execution, like file permissions. + type: string + default: root + ini: + - section: privilege_escalation + key: become_user + - section: pfexec_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_pfexec_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_PFEXEC_USER + become_exe: + description: C(pfexec) executable. + type: string + default: pfexec + ini: + - section: privilege_escalation + key: become_exe + - section: pfexec_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_pfexec_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_PFEXEC_EXE + become_flags: + description: Options to pass to C(pfexec). + type: string + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: pfexec_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_pfexec_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_PFEXEC_FLAGS + become_pass: + description: C(pfexec) password. + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_pfexec_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_PFEXEC_PASS + ini: + - section: pfexec_become_plugin + key: password + wrap_exe: + description: Toggle to wrap the command C(pfexec) calls in C(shell -c) or not. + default: false + type: bool + ini: + - section: pfexec_become_plugin + key: wrap_execution + vars: + - name: ansible_pfexec_wrap_execution + env: + - name: ANSIBLE_PFEXEC_WRAP_EXECUTION +notes: + - This plugin ignores O(become_user) as pfexec uses its own C(exec_attr) to figure this out. +""" from ansible.plugins.become import BecomeBase @@ -101,4 +104,4 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') noexe = not self.get_option('wrap_exe') - return '%s %s "%s"' % (exe, flags, self._build_success_command(cmd, shell, noexe=noexe)) + return f'{exe} {flags} {self._build_success_command(cmd, shell, noexe=noexe)}' diff --git a/plugins/become/pmrun.py b/plugins/become/pmrun.py index 8cb24fa937..413600cdbf 100644 --- a/plugins/become/pmrun.py +++ b/plugins/become/pmrun.py @@ -1,63 +1,65 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: pmrun - short_description: Privilege Manager run - description: - - This become plugins allows your remote/login user to execute commands as another user via the pmrun utility. - author: Ansible Core Team - options: - become_exe: - description: Sudo executable - default: pmrun - ini: - - section: privilege_escalation - key: become_exe - - section: pmrun_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_pmrun_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_PMRUN_EXE - become_flags: - description: Options to pass to pmrun - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: pmrun_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_pmrun_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_PMRUN_FLAGS - become_pass: - description: pmrun password - required: False - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_pmrun_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_PMRUN_PASS - ini: - - section: pmrun_become_plugin - key: password - notes: - - This plugin ignores the become_user supplied and uses pmrun's own configuration to select the user. -''' +DOCUMENTATION = r""" +name: pmrun +short_description: Privilege Manager run +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(pmrun) utility. +author: Ansible Core Team +options: + become_exe: + description: C(pmrun) executable. + type: string + default: pmrun + ini: + - section: privilege_escalation + key: become_exe + - section: pmrun_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_pmrun_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_PMRUN_EXE + become_flags: + description: Options to pass to C(pmrun). + type: string + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: pmrun_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_pmrun_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_PMRUN_FLAGS + become_pass: + description: C(pmrun) password. + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_pmrun_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_PMRUN_PASS + ini: + - section: pmrun_become_plugin + key: password +notes: + - This plugin ignores the C(become_user) supplied and uses C(pmrun)'s own configuration to select the user. +""" +from shlex import quote as shlex_quote from ansible.plugins.become import BecomeBase -from ansible.module_utils.six.moves import shlex_quote class BecomeModule(BecomeBase): @@ -74,4 +76,4 @@ class BecomeModule(BecomeBase): become = self.get_option('become_exe') flags = self.get_option('become_flags') - return '%s %s %s' % (become, flags, shlex_quote(self._build_success_command(cmd, shell))) + return f'{become} {flags} {shlex_quote(self._build_success_command(cmd, shell))}' diff --git a/plugins/become/run0.py b/plugins/become/run0.py new file mode 100644 index 0000000000..4362d53ebf --- /dev/null +++ b/plugins/become/run0.py @@ -0,0 +1,126 @@ +# Copyright (c) 2024, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +name: run0 +short_description: Systemd's run0 +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(run0) utility. +author: + - Thomas Sjögren (@konstruktoid) +version_added: '9.0.0' +options: + become_user: + description: User you 'become' to execute the task. + default: root + ini: + - section: privilege_escalation + key: become_user + - section: run0_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_run0_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_RUN0_USER + type: string + become_exe: + description: C(run0) executable. + default: run0 + ini: + - section: privilege_escalation + key: become_exe + - section: run0_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_run0_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_RUN0_EXE + type: string + become_flags: + description: Options to pass to C(run0). + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: run0_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_run0_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_RUN0_FLAGS + type: string +notes: + - This plugin only works when a C(polkit) rule is in place. +""" + +EXAMPLES = r""" +# An example polkit rule that allows the user 'ansible' in the 'wheel' group +# to execute commands using run0 without authentication. +/etc/polkit-1/rules.d/60-run0-fast-user-auth.rules: |- + polkit.addRule(function(action, subject) { + if(action.id == "org.freedesktop.systemd1.manage-units" && + subject.isInGroup("wheel") && + subject.user == "ansible") { + return polkit.Result.YES; + } + }); +""" + +from re import compile as re_compile + +from ansible.plugins.become import BecomeBase +from ansible.module_utils.common.text.converters import to_bytes + +ansi_color_codes = re_compile(to_bytes(r"\x1B\[[0-9;]+m")) + + +class BecomeModule(BecomeBase): + + name = "community.general.run0" + + prompt = "Password: " + fail = ("==== AUTHENTICATION FAILED ====",) + success = ("==== AUTHENTICATION COMPLETE ====",) + require_tty = ( + True # see https://github.com/ansible-collections/community.general/issues/6932 + ) + + @staticmethod + def remove_ansi_codes(line): + return ansi_color_codes.sub(b"", line) + + def build_become_command(self, cmd, shell): + super().build_become_command(cmd, shell) + + if not cmd: + return cmd + + become = self.get_option("become_exe") + flags = self.get_option("become_flags") + user = self.get_option("become_user") + + return ( + f"{become} --user={user} {flags} {self._build_success_command(cmd, shell)}" + ) + + def check_success(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_success(b_output) + + def check_incorrect_password(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_incorrect_password(b_output) + + def check_missing_password(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_missing_password(b_output) diff --git a/plugins/become/sesu.py b/plugins/become/sesu.py index 7113b19442..ecd29c83c5 100644 --- a/plugins/become/sesu.py +++ b/plugins/become/sesu.py @@ -1,72 +1,75 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: sesu - short_description: CA Privileged Access Manager - description: - - This become plugins allows your remote/login user to execute commands as another user via the sesu utility. - author: ansible (@nekonyuu) - options: - become_user: - description: User you 'become' to execute the task - default: '' - ini: - - section: privilege_escalation - key: become_user - - section: sesu_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_sesu_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_SESU_USER - become_exe: - description: sesu executable - default: sesu - ini: - - section: privilege_escalation - key: become_exe - - section: sesu_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_sesu_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_SESU_EXE - become_flags: - description: Options to pass to sesu - default: -H -S -n - ini: - - section: privilege_escalation - key: become_flags - - section: sesu_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_sesu_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_SESU_FLAGS - become_pass: - description: Password to pass to sesu - required: False - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_sesu_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_SESU_PASS - ini: - - section: sesu_become_plugin - key: password -''' +DOCUMENTATION = r""" +name: sesu +short_description: CA Privileged Access Manager +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(sesu) utility. +author: ansible (@nekonyuu) +options: + become_user: + description: User you 'become' to execute the task. + type: string + default: '' + ini: + - section: privilege_escalation + key: become_user + - section: sesu_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_sesu_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_SESU_USER + become_exe: + description: C(sesu) executable. + type: string + default: sesu + ini: + - section: privilege_escalation + key: become_exe + - section: sesu_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_sesu_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_SESU_EXE + become_flags: + description: Options to pass to C(sesu). + type: string + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: sesu_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_sesu_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_SESU_FLAGS + become_pass: + description: Password to pass to C(sesu). + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_sesu_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_SESU_PASS + ini: + - section: sesu_become_plugin + key: password +""" from ansible.plugins.become import BecomeBase @@ -88,4 +91,4 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') user = self.get_option('become_user') - return '%s %s %s -c %s' % (become, flags, user, self._build_success_command(cmd, shell)) + return f'{become} {flags} {user} -c {self._build_success_command(cmd, shell)}' diff --git a/plugins/become/sudosu.py b/plugins/become/sudosu.py index 410b881b96..3b5d4d8b7f 100644 --- a/plugins/become/sudosu.py +++ b/plugins/become/sudosu.py @@ -1,59 +1,77 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = """ - name: sudosu - short_description: Run tasks using sudo su - +DOCUMENTATION = r""" +name: sudosu +short_description: Run tasks using sudo su - +description: + - This become plugin allows your remote/login user to execute commands as another user using the C(sudo) and C(su) utilities + combined. +author: + - Dag Wieers (@dagwieers) +version_added: 2.4.0 +options: + become_user: + description: User you 'become' to execute the task. + type: string + default: root + ini: + - section: privilege_escalation + key: become_user + - section: sudo_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_sudo_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_SUDO_USER + become_flags: + description: Options to pass to C(sudo). + type: string + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: sudo_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_sudo_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_SUDO_FLAGS + become_pass: + description: Password to pass to C(sudo). + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_sudo_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_SUDO_PASS + ini: + - section: sudo_become_plugin + key: password + alt_method: description: - - This become plugins allows your remote/login user to execute commands as another user via the C(sudo) and C(su) utilities combined. - author: - - Dag Wieers (@dagwieers) - version_added: 2.4.0 - options: - become_user: - description: User you 'become' to execute the task. - default: root - ini: - - section: privilege_escalation - key: become_user - - section: sudo_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_sudo_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_SUDO_USER - become_flags: - description: Options to pass to C(sudo). - default: -H -S -n - ini: - - section: privilege_escalation - key: become_flags - - section: sudo_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_sudo_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_SUDO_FLAGS - become_pass: - description: Password to pass to C(sudo). - required: false - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_sudo_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_SUDO_PASS - ini: - - section: sudo_become_plugin - key: password + - Whether to use an alternative method to call C(su). Instead of running C(su -l user /path/to/shell -c command), it + runs C(su -l user -c command). + - Use this when the default one is not working on your system. + required: false + type: boolean + ini: + - section: community.general.sudosu + key: alternative_method + vars: + - name: ansible_sudosu_alt_method + env: + - name: ANSIBLE_SUDOSU_ALT_METHOD + version_added: 9.2.0 """ @@ -79,13 +97,16 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') or '' prompt = '' if self.get_option('become_pass'): - self.prompt = '[sudo via ansible, key=%s] password:' % self._id + self.prompt = f'[sudo via ansible, key={self._id}] password:' if flags: # this could be simplified, but kept as is for now for backwards string matching flags = flags.replace('-n', '') - prompt = '-p "%s"' % (self.prompt) + prompt = f'-p "{self.prompt}"' user = self.get_option('become_user') or '' if user: - user = '%s' % (user) + user = f'{user}' - return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)]) + if self.get_option('alt_method'): + return f"{becomecmd} {flags} {prompt} su -l {user} -c {self._build_success_command(cmd, shell, True)}" + else: + return f"{becomecmd} {flags} {prompt} su -l {user} {self._build_success_command(cmd, shell)}" diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py index f2ea098d4d..28011e8cab 100644 --- a/plugins/cache/memcached.py +++ b/plugins/cache/memcached.py @@ -1,49 +1,50 @@ -# -*- coding: utf-8 -*- -# (c) 2014, Brian Coca, Josh Drake, et al -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Brian Coca, Josh Drake, et al +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: memcached - short_description: Use memcached DB for cache +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: memcached +short_description: Use memcached DB for cache +description: + - This cache uses JSON formatted, per host records saved in memcached. +requirements: + - memcache (python lib) +options: + _uri: description: - - This cache uses JSON formatted, per host records saved in memcached. - requirements: - - memcache (python lib) - options: - _uri: - description: - - List of connection information for the memcached DBs - default: ['127.0.0.1:11211'] - type: list - elements: string - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the DB entries - default: ansible_facts - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults - type: integer -''' + - List of connection information for the memcached DBs. + default: ['127.0.0.1:11211'] + type: list + elements: string + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the DB entries. + type: string + default: ansible_facts + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _timeout: + default: 86400 + type: integer + # TODO: determine whether it is OK to change to: type: float + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults +""" import collections import os @@ -51,11 +52,9 @@ import time from multiprocessing import Lock from itertools import chain -from ansible import constants as C from ansible.errors import AnsibleError -from ansible.module_utils.common._collections_compat import MutableSet +from collections.abc import MutableSet from ansible.plugins.cache import BaseCacheModule -from ansible.release import __version__ as ansible_base_version from ansible.utils.display import Display try: @@ -190,7 +189,7 @@ class CacheModule(BaseCacheModule): self._keys = CacheModuleKeys(self._db, self._db.get(CacheModuleKeys.PREFIX) or []) def _make_key(self, key): - return "{0}{1}".format(self._prefix, key) + return f"{self._prefix}{key}" def _expire_keys(self): if self._timeout > 0: diff --git a/plugins/cache/pickle.py b/plugins/cache/pickle.py index 1e549d4d66..6c053138c8 100644 --- a/plugins/cache/pickle.py +++ b/plugins/cache/pickle.py @@ -1,51 +1,49 @@ -# -*- coding: utf-8 -*- -# (c) 2017, Brian Coca -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Brian Coca +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: pickle - short_description: Pickle formatted files. +DOCUMENTATION = r""" +name: pickle +short_description: Pickle formatted files +description: + - This cache uses Python's pickle serialization format, in per host files, saved to the filesystem. +author: Brian Coca (@bcoca) +options: + _uri: + required: true description: - - This cache uses Python's pickle serialization format, in per host files, saved to the filesystem. - author: Brian Coca (@bcoca) - options: - _uri: - required: True - description: - - Path in which the cache plugin will save the files - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the files - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults -''' + - Path in which the cache plugin saves the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + type: path + _prefix: + description: User defined prefix to use when creating the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + type: string + _timeout: + default: 86400 + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: float +""" -try: - import cPickle as pickle -except ImportError: - import pickle +import pickle -from ansible.module_utils.six import PY3 from ansible.plugins.cache import BaseFileCacheModule @@ -53,14 +51,12 @@ class CacheModule(BaseFileCacheModule): """ A caching module backed by pickle files. """ + _persistent = False # prevent unnecessary JSON serialization and key munging def _load(self, filepath): # Pickle is a binary format with open(filepath, 'rb') as f: - if PY3: - return pickle.load(f, encoding='bytes') - else: - return pickle.load(f) + return pickle.load(f, encoding='bytes') def _dump(self, value, filepath): with open(filepath, 'wb') as f: diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index 6c2edb5f61..d7b596bb32 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -1,77 +1,78 @@ -# -*- coding: utf-8 -*- -# (c) 2014, Brian Coca, Josh Drake, et al -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2014, Brian Coca, Josh Drake, et al +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: redis - short_description: Use Redis DB for cache +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: redis +short_description: Use Redis DB for cache +description: + - This cache uses JSON formatted, per host records saved in Redis. +requirements: + - redis>=2.4.5 (python lib) +options: + _uri: description: - - This cache uses JSON formatted, per host records saved in Redis. - requirements: - - redis>=2.4.5 (python lib) - options: - _uri: - description: - - A colon separated string of connection information for Redis. - - The format is C(host:port:db:password), for example C(localhost:6379:0:changeme). - - To use encryption in transit, prefix the connection with C(tls://), as in C(tls://localhost:6379:0:changeme). - - To use redis sentinel, use separator C(;), for example C(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0. - required: True - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the DB entries - default: ansible_facts - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _keyset_name: - description: User defined name for cache keyset name. - default: ansible_cache_keys - env: - - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME - ini: - - key: fact_caching_redis_keyset_name - section: defaults - version_added: 1.3.0 - _sentinel_service_name: - description: The redis sentinel service name (or referenced as cluster name). - env: - - name: ANSIBLE_CACHE_REDIS_SENTINEL - ini: - - key: fact_caching_redis_sentinel - section: defaults - version_added: 1.3.0 - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults - type: integer -''' + - A colon separated string of connection information for Redis. + - The format is V(host:port:db:password), for example V(localhost:6379:0:changeme). + - To use encryption in transit, prefix the connection with V(tls://), as in V(tls://localhost:6379:0:changeme). + - To use redis sentinel, use separator V(;), for example V(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0. + type: string + required: true + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the DB entries. + type: string + default: ansible_facts + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _keyset_name: + description: User defined name for cache keyset name. + type: string + default: ansible_cache_keys + env: + - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME + ini: + - key: fact_caching_redis_keyset_name + section: defaults + version_added: 1.3.0 + _sentinel_service_name: + description: The redis sentinel service name (or referenced as cluster name). + type: string + env: + - name: ANSIBLE_CACHE_REDIS_SENTINEL + ini: + - key: fact_caching_redis_sentinel + section: defaults + version_added: 1.3.0 + _timeout: + default: 86400 + type: integer + # TODO: determine whether it is OK to change to: type: float + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults +""" import re import time import json -from ansible import constants as C from ansible.errors import AnsibleError -from ansible.module_utils.common.text.converters import to_native from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder from ansible.plugins.cache import BaseCacheModule -from ansible.release import __version__ as ansible_base_version from ansible.utils.display import Display try: @@ -127,7 +128,7 @@ class CacheModule(BaseCacheModule): connection = self._parse_connection(self.re_url_conn, uri) self._db = StrictRedis(*connection, **kw) - display.vv('Redis connection: %s' % self._db) + display.vv(f'Redis connection: {self._db}') @staticmethod def _parse_connection(re_patt, uri): @@ -151,7 +152,7 @@ class CacheModule(BaseCacheModule): # format: "localhost:26379;localhost2:26379;0:changeme" connections = uri.split(';') connection_args = connections.pop(-1) - if len(connection_args) > 0: # hanle if no db nr is given + if len(connection_args) > 0: # handle if no db nr is given connection_args = connection_args.split(':') kw['db'] = connection_args.pop(0) try: @@ -160,12 +161,12 @@ class CacheModule(BaseCacheModule): pass # password is optional sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections] - display.vv('\nUsing redis sentinels: %s' % sentinels) + display.vv(f'\nUsing redis sentinels: {sentinels}') scon = Sentinel(sentinels, **kw) try: return scon.master_for(self._sentinel_service_name, socket_timeout=0.2) except Exception as exc: - raise AnsibleError('Could not connect to redis sentinel: %s' % to_native(exc)) + raise AnsibleError(f'Could not connect to redis sentinel: {exc}') def _make_key(self, key): return self._prefix + key @@ -223,7 +224,7 @@ class CacheModule(BaseCacheModule): def copy(self): # TODO: there is probably a better way to do this in redis - ret = dict([(k, self.get(k)) for k in self.keys()]) + ret = {k: self.get(k) for k in self.keys()} return ret def __getstate__(self): diff --git a/plugins/cache/yaml.py b/plugins/cache/yaml.py index e5062b16d1..52cbf887de 100644 --- a/plugins/cache/yaml.py +++ b/plugins/cache/yaml.py @@ -1,48 +1,49 @@ -# -*- coding: utf-8 -*- -# (c) 2017, Brian Coca -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Brian Coca +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: yaml - short_description: YAML formatted files. +DOCUMENTATION = r""" +name: yaml +short_description: YAML formatted files +description: + - This cache uses YAML formatted, per host, files saved to the filesystem. +author: Brian Coca (@bcoca) +options: + _uri: + required: true description: - - This cache uses YAML formatted, per host, files saved to the filesystem. - author: Brian Coca (@bcoca) - options: - _uri: - required: True - description: - - Path in which the cache plugin will save the files - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the files - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults - type: integer -''' + - Path in which the cache plugin saves the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + type: string + _prefix: + description: User defined prefix to use when creating the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + type: string + _timeout: + default: 86400 + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: integer + # TODO: determine whether it is OK to change to: type: float +""" - -import codecs +import os import yaml @@ -57,9 +58,9 @@ class CacheModule(BaseFileCacheModule): """ def _load(self, filepath): - with codecs.open(filepath, 'r', encoding='utf-8') as f: + with open(os.path.abspath(filepath), 'r', encoding='utf-8') as f: return AnsibleLoader(f).get_single_data() def _dump(self, value, filepath): - with codecs.open(filepath, 'w', encoding='utf-8') as f: + with open(os.path.abspath(filepath), 'w', encoding='utf-8') as f: yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False) diff --git a/plugins/callback/cgroup_memory_recap.py b/plugins/callback/cgroup_memory_recap.py index 0334bee664..294ee4b378 100644 --- a/plugins/callback/cgroup_memory_recap.py +++ b/plugins/callback/cgroup_memory_recap.py @@ -1,43 +1,45 @@ -# -*- coding: utf-8 -*- -# (c) 2018 Matt Martz -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018 Matt Martz +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: cgroup_memory_recap - type: aggregate - requirements: - - whitelist in configuration - - cgroups - short_description: Profiles maximum memory usage of tasks and full execution using cgroups - description: - - This is an ansible callback plugin that profiles maximum memory usage of ansible and individual tasks, and displays a recap at the end using cgroups - notes: - - Requires ansible to be run from within a cgroup, such as with C(cgexec -g memory:ansible_profile ansible-playbook ...) - - This cgroup should only be used by ansible to get accurate results - - To create the cgroup, first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile) - options: - max_mem_file: - required: True - description: Path to cgroups C(memory.max_usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes) - env: - - name: CGROUP_MAX_MEM_FILE - ini: - - section: callback_cgroupmemrecap - key: max_mem_file - cur_mem_file: - required: True - description: Path to C(memory.usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes) - env: - - name: CGROUP_CUR_MEM_FILE - ini: - - section: callback_cgroupmemrecap - key: cur_mem_file -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: cgroup_memory_recap +type: aggregate +requirements: + - whitelist in configuration + - cgroups +short_description: Profiles maximum memory usage of tasks and full execution using cgroups +description: + - This is an Ansible callback plugin that profiles maximum memory usage of Ansible and individual tasks, and displays a + recap at the end using cgroups. +notes: + - Requires ansible to be run from within a C(cgroup), such as with C(cgexec -g memory:ansible_profile ansible-playbook ...). + - This C(cgroup) should only be used by Ansible to get accurate results. + - To create the C(cgroup), first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile). +options: + max_mem_file: + required: true + description: Path to cgroups C(memory.max_usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes). + type: str + env: + - name: CGROUP_MAX_MEM_FILE + ini: + - section: callback_cgroupmemrecap + key: max_mem_file + cur_mem_file: + required: true + description: Path to C(memory.usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes). + type: str + env: + - name: CGROUP_CUR_MEM_FILE + ini: + - section: callback_cgroupmemrecap + key: cur_mem_file +""" import time import threading @@ -111,7 +113,7 @@ class CallbackModule(CallbackBase): max_results = int(f.read().strip()) / 1024 / 1024 self._display.banner('CGROUP MEMORY RECAP') - self._display.display('Execution Maximum: %0.2fMB\n\n' % max_results) + self._display.display(f'Execution Maximum: {max_results:0.2f}MB\n\n') for task, memory in self.task_results: - self._display.display('%s (%s): %0.2fMB' % (task.get_name(), task._uuid, memory)) + self._display.display(f'{task.get_name()} ({task._uuid}): {memory:0.2f}MB') diff --git a/plugins/callback/context_demo.py b/plugins/callback/context_demo.py index c85cc60cda..f390a947a4 100644 --- a/plugins/callback/context_demo.py +++ b/plugins/callback/context_demo.py @@ -1,22 +1,21 @@ -# -*- coding: utf-8 -*- -# (C) 2012, Michael DeHaan, -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (C) 2012, Michael DeHaan, +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: context_demo - type: aggregate - short_description: demo callback that adds play/task context - description: - - Displays some play and task context along with normal output - - This is mostly for demo purposes - requirements: - - whitelist in configuration -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: context_demo +type: aggregate +short_description: Demo callback that adds play/task context +description: + - Displays some play and task context along with normal output. + - This is mostly for demo purposes. +requirements: + - whitelist in configuration +""" from ansible.plugins.callback import CallbackBase @@ -37,15 +36,15 @@ class CallbackModule(CallbackBase): self.play = None def v2_on_any(self, *args, **kwargs): - self._display.display("--- play: {0} task: {1} ---".format(getattr(self.play, 'name', None), self.task)) + self._display.display(f"--- play: {getattr(self.play, 'name', None)} task: {self.task} ---") self._display.display(" --- ARGS ") for i, a in enumerate(args): - self._display.display(' %s: %s' % (i, a)) + self._display.display(f' {i}: {a}') self._display.display(" --- KWARGS ") for k in kwargs: - self._display.display(' %s: %s' % (k, kwargs[k])) + self._display.display(f' {k}: {kwargs[k]}') def v2_playbook_on_play_start(self, play): self.play = play diff --git a/plugins/callback/counter_enabled.py b/plugins/callback/counter_enabled.py index 38d71df69e..d5fe334a49 100644 --- a/plugins/callback/counter_enabled.py +++ b/plugins/callback/counter_enabled.py @@ -1,32 +1,30 @@ -# -*- coding: utf-8 -*- -# (c) 2018, Ivan Aragones Muniesa -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Ivan Aragones Muniesa +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later ''' Counter enabled Ansible callback plugin (See DOCUMENTATION for more information) ''' -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: counter_enabled - type: stdout - short_description: adds counters to the output items (tasks and hosts/task) - description: - - Use this callback when you need a kind of progress bar on a large environments. - - You will know how many tasks has the playbook to run, and which one is actually running. - - You will know how many hosts may run a task, and which of them is actually running. - extends_documentation_fragment: - - default_callback - requirements: - - set as stdout callback in ansible.cfg (stdout_callback = counter_enabled) -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: counter_enabled +type: stdout +short_description: Adds counters to the output items (tasks and hosts/task) +description: + - Use this callback when you need a kind of progress bar on a large environments. + - You can see how many tasks has the playbook to run, and which one is actually running. + - You can see how many hosts may run a task, and which of them is actually running. +extends_documentation_fragment: + - default_callback +requirements: + - set as stdout callback in C(ansible.cfg) (C(stdout_callback = counter_enabled)) +""" from ansible import constants as C from ansible.plugins.callback import CallbackBase from ansible.utils.color import colorize, hostcolor -from ansible.template import Templar from ansible.playbook.task_include import TaskInclude @@ -69,9 +67,9 @@ class CallbackModule(CallbackBase): def v2_playbook_on_play_start(self, play): name = play.get_name().strip() if not name: - msg = u"play" + msg = "play" else: - msg = u"PLAY [%s]" % name + msg = f"PLAY [{name}]" self._play = play @@ -91,25 +89,17 @@ class CallbackModule(CallbackBase): for host in hosts: stat = stats.summarize(host) - self._display.display(u"%s : %s %s %s %s %s %s" % ( - hostcolor(host, stat), - colorize(u'ok', stat['ok'], C.COLOR_OK), - colorize(u'changed', stat['changed'], C.COLOR_CHANGED), - colorize(u'unreachable', stat['unreachable'], C.COLOR_UNREACHABLE), - colorize(u'failed', stat['failures'], C.COLOR_ERROR), - colorize(u'rescued', stat['rescued'], C.COLOR_OK), - colorize(u'ignored', stat['ignored'], C.COLOR_WARN)), + self._display.display( + f"{hostcolor(host, stat)} : {colorize('ok', stat['ok'], C.COLOR_OK)} {colorize('changed', stat['changed'], C.COLOR_CHANGED)} " + f"{colorize('unreachable', stat['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', stat['failures'], C.COLOR_ERROR)} " + f"{colorize('rescued', stat['rescued'], C.COLOR_OK)} {colorize('ignored', stat['ignored'], C.COLOR_WARN)}", screen_only=True ) - self._display.display(u"%s : %s %s %s %s %s %s" % ( - hostcolor(host, stat, False), - colorize(u'ok', stat['ok'], None), - colorize(u'changed', stat['changed'], None), - colorize(u'unreachable', stat['unreachable'], None), - colorize(u'failed', stat['failures'], None), - colorize(u'rescued', stat['rescued'], None), - colorize(u'ignored', stat['ignored'], None)), + self._display.display( + f"{hostcolor(host, stat, False)} : {colorize('ok', stat['ok'], None)} {colorize('changed', stat['changed'], None)} " + f"{colorize('unreachable', stat['unreachable'], None)} {colorize('failed', stat['failures'], None)} " + f"{colorize('rescued', stat['rescued'], None)} {colorize('ignored', stat['ignored'], None)}", log_only=True ) @@ -124,12 +114,14 @@ class CallbackModule(CallbackBase): for k in sorted(stats.custom.keys()): if k == '_run': continue - self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', ''))) + _custom_stats = self._dump_results(stats.custom[k], indent=1).replace('\n', '') + self._display.display(f'\t{k}: {_custom_stats}') # print per run custom stats if '_run' in stats.custom: self._display.display("", screen_only=True) - self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', '')) + _custom_stats_run = self._dump_results(stats.custom['_run'], indent=1).replace('\n', '') + self._display.display(f'\tRUN: {_custom_stats_run}') self._display.display("", screen_only=True) def v2_playbook_on_task_start(self, task, is_conditional): @@ -143,13 +135,13 @@ class CallbackModule(CallbackBase): # that they can secure this if they feel that their stdout is insecure # (shoulder surfing, logging stdout straight to a file, etc). if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT: - args = ', '.join(('%s=%s' % a for a in task.args.items())) - args = ' %s' % args - self._display.banner("TASK %d/%d [%s%s]" % (self._task_counter, self._task_total, task.get_name().strip(), args)) + args = ', '.join(('{k}={v}' for k, v in task.args.items())) + args = f' {args}' + self._display.banner(f"TASK {self._task_counter}/{self._task_total} [{task.get_name().strip()}{args}]") if self._display.verbosity >= 2: path = task.get_path() if path: - self._display.display("task path: %s" % path, color=C.COLOR_DEBUG) + self._display.display(f"task path: {path}", color=C.COLOR_DEBUG) self._host_counter = self._previous_batch_total self._task_counter += 1 @@ -166,15 +158,15 @@ class CallbackModule(CallbackBase): return elif result._result.get('changed', False): if delegated_vars: - msg = "changed: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host']) + msg = f"changed: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> {delegated_vars['ansible_host']}]" else: - msg = "changed: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name()) + msg = f"changed: {self._host_counter}/{self._host_total} [{result._host.get_name()}]" color = C.COLOR_CHANGED else: if delegated_vars: - msg = "ok: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host']) + msg = f"ok: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> {delegated_vars['ansible_host']}]" else: - msg = "ok: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name()) + msg = f"ok: {self._host_counter}/{self._host_total} [{result._host.get_name()}]" color = C.COLOR_OK self._handle_warnings(result._result) @@ -185,7 +177,7 @@ class CallbackModule(CallbackBase): self._clean_results(result._result, result._task.action) if self._run_is_verbose(result): - msg += " => %s" % (self._dump_results(result._result),) + msg += f" => {self._dump_results(result._result)}" self._display.display(msg, color=color) def v2_runner_on_failed(self, result, ignore_errors=False): @@ -206,14 +198,16 @@ class CallbackModule(CallbackBase): else: if delegated_vars: - self._display.display("fatal: %d/%d [%s -> %s]: FAILED! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), delegated_vars['ansible_host'], - self._dump_results(result._result)), - color=C.COLOR_ERROR) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> " + f"{delegated_vars['ansible_host']}]: FAILED! => {self._dump_results(result._result)}", + color=C.COLOR_ERROR + ) else: - self._display.display("fatal: %d/%d [%s]: FAILED! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), self._dump_results(result._result)), - color=C.COLOR_ERROR) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()}]: FAILED! => {self._dump_results(result._result)}", + color=C.COLOR_ERROR + ) if ignore_errors: self._display.display("...ignoring", color=C.COLOR_SKIP) @@ -231,9 +225,9 @@ class CallbackModule(CallbackBase): if result._task.loop and 'results' in result._result: self._process_items(result) else: - msg = "skipping: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name()) + msg = f"skipping: {self._host_counter}/{self._host_total} [{result._host.get_name()}]" if self._run_is_verbose(result): - msg += " => %s" % self._dump_results(result._result) + msg += f" => {self._dump_results(result._result)}" self._display.display(msg, color=C.COLOR_SKIP) def v2_runner_on_unreachable(self, result): @@ -244,11 +238,13 @@ class CallbackModule(CallbackBase): delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: - self._display.display("fatal: %d/%d [%s -> %s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), delegated_vars['ansible_host'], - self._dump_results(result._result)), - color=C.COLOR_UNREACHABLE) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> " + f"{delegated_vars['ansible_host']}]: UNREACHABLE! => {self._dump_results(result._result)}", + color=C.COLOR_UNREACHABLE + ) else: - self._display.display("fatal: %d/%d [%s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), self._dump_results(result._result)), - color=C.COLOR_UNREACHABLE) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()}]: UNREACHABLE! => {self._dump_results(result._result)}", + color=C.COLOR_UNREACHABLE + ) diff --git a/plugins/callback/default_without_diff.py b/plugins/callback/default_without_diff.py new file mode 100644 index 0000000000..b0315829b5 --- /dev/null +++ b/plugins/callback/default_without_diff.py @@ -0,0 +1,43 @@ + +# Copyright (c) 2024, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: default_without_diff +type: stdout +short_description: The default ansible callback without diff output +version_added: 8.4.0 +description: + - This is basically the default ansible callback plugin (P(ansible.builtin.default#callback)) without showing diff output. + This can be useful when using another callback which sends more detailed information to another service, like the L(ARA, + https://ara.recordsansible.org/) callback, and you want diff output sent to that plugin but not shown on the console output. +author: Felix Fontein (@felixfontein) +extends_documentation_fragment: + - ansible.builtin.default_callback + - ansible.builtin.result_format_callback +""" + +EXAMPLES = r""" +# Enable callback in ansible.cfg: +ansible_config: | + [defaults] + stdout_callback = community.general.default_without_diff + +# Enable callback with environment variables: +environment_variable: |- + ANSIBLE_STDOUT_CALLBACK=community.general.default_without_diff +""" + +from ansible.plugins.callback.default import CallbackModule as Default + + +class CallbackModule(Default): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'community.general.default_without_diff' + + def v2_on_file_diff(self, result): + pass diff --git a/plugins/callback/dense.py b/plugins/callback/dense.py index af8464631c..de50d97ce1 100644 --- a/plugins/callback/dense.py +++ b/plugins/callback/dense.py @@ -1,24 +1,23 @@ -# -*- coding: utf-8 -*- -# (c) 2016, Dag Wieers -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Dag Wieers +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: dense type: stdout -short_description: minimal stdout output +short_description: Minimal stdout output extends_documentation_fragment: -- default_callback + - default_callback description: -- When in verbose mode it will act the same as the default callback + - When in verbose mode it acts the same as the default callback. author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) requirements: -- set as stdout in configuration -''' + - set as stdout in configuration +""" HAS_OD = False try: @@ -27,8 +26,7 @@ try: except ImportError: pass -from ansible.module_utils.six import binary_type, text_type -from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence +from collections.abc import MutableMapping, MutableSequence from ansible.plugins.callback.default import CallbackModule as CallbackModule_default from ansible.utils.color import colorize, hostcolor from ansible.utils.display import Display @@ -194,7 +192,7 @@ class CallbackModule(CallbackModule_default): self.disabled = True def __del__(self): - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") def _add_host(self, result, status): name = result._host.get_name() @@ -232,17 +230,17 @@ class CallbackModule(CallbackModule_default): # Remove non-essential attributes for attr in self.removed_attributes: if attr in result: - del(result[attr]) + del result[attr] # Remove empty attributes (list, dict, str) for attr in result.copy(): - if isinstance(result[attr], (MutableSequence, MutableMapping, binary_type, text_type)): + if isinstance(result[attr], (MutableSequence, MutableMapping, bytes, str)): if not result[attr]: - del(result[attr]) + del result[attr] def _handle_exceptions(self, result): if 'exception' in result: - # Remove the exception from the result so it's not shown every time + # Remove the exception from the result so it is not shown every time del result['exception'] if self._display.verbosity == 1: @@ -251,7 +249,7 @@ class CallbackModule(CallbackModule_default): def _display_progress(self, result=None): # Always rewrite the complete line sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline) - sys.stdout.write('%s %d:' % (self.type, self.count[self.type])) + sys.stdout.write(f'{self.type} {self.count[self.type]}:') sys.stdout.write(vt100.reset) sys.stdout.flush() @@ -259,22 +257,18 @@ class CallbackModule(CallbackModule_default): for name in self.hosts: sys.stdout.write(' ') if self.hosts[name].get('delegate', None): - sys.stdout.write(self.hosts[name]['delegate'] + '>') + sys.stdout.write(f"{self.hosts[name]['delegate']}>") sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset) sys.stdout.flush() -# if result._result.get('diff', False): -# sys.stdout.write('\n' + vt100.linewrap) sys.stdout.write(vt100.linewrap) -# self.keep = True - def _display_task_banner(self): if not self.shown_title: self.shown_title = True sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline) - sys.stdout.write('%s %d: %s' % (self.type, self.count[self.type], self.task.get_name().strip())) - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f'{self.type} {self.count[self.type]}: {self.task.get_name().strip()}') + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) @@ -283,7 +277,7 @@ class CallbackModule(CallbackModule_default): def _display_results(self, result, status): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) self.keep = False @@ -308,16 +302,16 @@ class CallbackModule(CallbackModule_default): if result._task.loop and 'results' in result._result: self._process_items(result) else: - sys.stdout.write(colors[status] + status + ': ') + sys.stdout.write(f"{colors[status] + status}: ") delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: - sys.stdout.write(vt100.reset + result._host.get_name() + '>' + colors[status] + delegated_vars['ansible_host']) + sys.stdout.write(f"{vt100.reset}{result._host.get_name()}>{colors[status]}{delegated_vars['ansible_host']}") else: sys.stdout.write(result._host.get_name()) - sys.stdout.write(': ' + dump + '\n') - sys.stdout.write(vt100.reset + vt100.save + vt100.clearline) + sys.stdout.write(f": {dump}\n") + sys.stdout.write(f"{vt100.reset}{vt100.save}{vt100.clearline}") sys.stdout.flush() if status == 'changed': @@ -326,7 +320,7 @@ class CallbackModule(CallbackModule_default): def v2_playbook_on_play_start(self, play): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.bold) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.bold}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.bold) @@ -340,14 +334,14 @@ class CallbackModule(CallbackModule_default): name = play.get_name().strip() if not name: name = 'unnamed' - sys.stdout.write('PLAY %d: %s' % (self.count['play'], name.upper())) - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"PLAY {self.count['play']}: {name.upper()}") + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() def v2_playbook_on_task_start(self, task, is_conditional): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.underline}") else: # Do not clear line, since we want to retain the previous output sys.stdout.write(vt100.restore + vt100.reset + vt100.underline) @@ -364,14 +358,14 @@ class CallbackModule(CallbackModule_default): self.count['task'] += 1 # Write the next task on screen (behind the prompt is the previous output) - sys.stdout.write('%s %d.' % (self.type, self.count[self.type])) + sys.stdout.write(f'{self.type} {self.count[self.type]}.') sys.stdout.write(vt100.reset) sys.stdout.flush() def v2_playbook_on_handler_task_start(self, task): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.underline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline) @@ -387,7 +381,7 @@ class CallbackModule(CallbackModule_default): self.count[self.type] += 1 # Write the next task on screen (behind the prompt is the previous output) - sys.stdout.write('%s %d.' % (self.type, self.count[self.type])) + sys.stdout.write(f'{self.type} {self.count[self.type]}.') sys.stdout.write(vt100.reset) sys.stdout.flush() @@ -450,13 +444,13 @@ class CallbackModule(CallbackModule_default): def v2_playbook_on_no_hosts_remaining(self): if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) self.keep = False - sys.stdout.write(vt100.white + vt100.redbg + 'NO MORE HOSTS LEFT') - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.white + vt100.redbg}NO MORE HOSTS LEFT") + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() def v2_playbook_on_include(self, included_file): @@ -464,7 +458,7 @@ class CallbackModule(CallbackModule_default): def v2_playbook_on_stats(self, stats): if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) @@ -475,22 +469,16 @@ class CallbackModule(CallbackModule_default): sys.stdout.write(vt100.bold + vt100.underline) sys.stdout.write('SUMMARY') - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() hosts = sorted(stats.processed.keys()) for h in hosts: t = stats.summarize(h) self._display.display( - u"%s : %s %s %s %s %s %s" % ( - hostcolor(h, t), - colorize(u'ok', t['ok'], C.COLOR_OK), - colorize(u'changed', t['changed'], C.COLOR_CHANGED), - colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), - colorize(u'failed', t['failures'], C.COLOR_ERROR), - colorize(u'rescued', t['rescued'], C.COLOR_OK), - colorize(u'ignored', t['ignored'], C.COLOR_WARN), - ), + f"{hostcolor(h, t)} : {colorize('ok', t['ok'], C.COLOR_OK)} {colorize('changed', t['changed'], C.COLOR_CHANGED)} " + f"{colorize('unreachable', t['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', t['failures'], C.COLOR_ERROR)} " + f"{colorize('rescued', t['rescued'], C.COLOR_OK)} {colorize('ignored', t['ignored'], C.COLOR_WARN)}", screen_only=True ) diff --git a/plugins/callback/diy.py b/plugins/callback/diy.py index b288ee4b97..c94fe25093 100644 --- a/plugins/callback/diy.py +++ b/plugins/callback/diy.py @@ -1,607 +1,601 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Trevor Highfill -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Trevor Highfill +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - name: diy - type: stdout - short_description: Customize the output - version_added: 0.2.0 - description: - - Callback plugin that allows you to supply your own custom callback templates to be output. - author: Trevor Highfill (@theque5t) - extends_documentation_fragment: - - default_callback - notes: - - Uses the C(default) callback plugin output when a custom callback message(C(msg)) is not provided. - - Makes the callback event data available via the C(ansible_callback_diy) dictionary, which can be used in the templating context for the options. - The dictionary is only available in the templating context for the options. It is not a variable that is available via the other - various execution contexts, such as playbook, play, task etc. - - Options being set by their respective variable input can only be set using the variable if the variable was set in a context that is available to the - respective callback. - Use the C(ansible_callback_diy) dictionary to see what is available to a callback. Additionally, C(ansible_callback_diy.top_level_var_names) will output - the top level variable names available to the callback. - - Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For example, - C("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}") - - "**Condition** for all C(msg) options: - if value C(is None or omit), - then the option is not being used. - **Effect**: use of the C(default) callback plugin for output" - - "**Condition** for all C(msg) options: - if value C(is not None and not omit and length is not greater than 0), - then the option is being used without output. - **Effect**: suppress output" - - "**Condition** for all C(msg) options: - if value C(is not None and not omit and length is greater than 0), - then the option is being used with output. - **Effect**: render value as template and output" - - "Valid color values: C(black), C(bright gray), C(blue), C(white), C(green), C(bright blue), C(cyan), C(bright green), C(red), C(bright cyan), - C(purple), C(bright red), C(yellow), C(bright purple), C(dark gray), C(bright yellow), C(magenta), C(bright magenta), C(normal)" - seealso: - - name: default – default Ansible screen output - description: The official documentation on the B(default) callback plugin. - link: https://docs.ansible.com/ansible/latest/plugins/callback/default.html - requirements: - - set as stdout_callback in configuration - options: - on_any_msg: - description: Output to be used for callback on_any. - ini: - - section: callback_diy - key: on_any_msg - env: - - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG - vars: - - name: ansible_callback_diy_on_any_msg - type: str +DOCUMENTATION = r""" +name: diy +type: stdout +short_description: Customize the output +version_added: 0.2.0 +description: + - Callback plugin that allows you to supply your own custom callback templates to be output. +author: Trevor Highfill (@theque5t) +extends_documentation_fragment: + - default_callback +notes: + - Uses the P(ansible.builtin.default#callback) callback plugin output when a custom callback V(message(msg\)) is not provided. + - Makes the callback event data available using the C(ansible_callback_diy) dictionary, which can be used in the templating + context for the options. The dictionary is only available in the templating context for the options. It is not a variable + that is available using the other various execution contexts, such as playbook, play, task, and so on so forth. + - Options being set by their respective variable input can only be set using the variable if the variable was set in a context + that is available to the respective callback. Use the C(ansible_callback_diy) dictionary to see what is available to a + callback. Additionally, C(ansible_callback_diy.top_level_var_names) outputs the top level variable names available + to the callback. + - Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For + example, V("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}"). + - 'B(Condition) for all C(msg) options: if value V(is None or omit), then the option is not being used. B(Effect): use of + the C(default) callback plugin for output.' + - 'B(Condition) for all C(msg) options: if value V(is not None and not omit and length is not greater than 0), then the + option is being used without output. B(Effect): suppress output.' + - 'B(Condition) for all C(msg) options: if value V(is not None and not omit and length is greater than 0), then the option + is being used with output. B(Effect): render value as template and output.' + - 'Valid color values: V(black), V(bright gray), V(blue), V(white), V(green), V(bright blue), V(cyan), V(bright green), + V(red), V(bright cyan), V(purple), V(bright red), V(yellow), V(bright purple), V(dark gray), V(bright yellow), V(magenta), + V(bright magenta), V(normal).' +seealso: + - name: default – default Ansible screen output + description: The official documentation on the B(default) callback plugin. + link: https://docs.ansible.com/ansible/latest/plugins/callback/default.html +requirements: + - set as stdout_callback in configuration +options: + on_any_msg: + description: Output to be used for callback on_any. + ini: + - section: callback_diy + key: on_any_msg + env: + - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG + vars: + - name: ansible_callback_diy_on_any_msg + type: str - on_any_msg_color: - description: - - Output color to be used for I(on_any_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: on_any_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG_COLOR - vars: - - name: ansible_callback_diy_on_any_msg_color - type: str + on_any_msg_color: + description: + - Output color to be used for O(on_any_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: on_any_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG_COLOR + vars: + - name: ansible_callback_diy_on_any_msg_color + type: str - runner_on_failed_msg: - description: Output to be used for callback runner_on_failed. - ini: - - section: callback_diy - key: runner_on_failed_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG - vars: - - name: ansible_callback_diy_runner_on_failed_msg - type: str + runner_on_failed_msg: + description: Output to be used for callback runner_on_failed. + ini: + - section: callback_diy + key: runner_on_failed_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG + vars: + - name: ansible_callback_diy_runner_on_failed_msg + type: str - runner_on_failed_msg_color: - description: - - Output color to be used for I(runner_on_failed_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_failed_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_failed_msg_color - type: str + runner_on_failed_msg_color: + description: + - Output color to be used for O(runner_on_failed_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_failed_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_failed_msg_color + type: str - runner_on_ok_msg: - description: Output to be used for callback runner_on_ok. - ini: - - section: callback_diy - key: runner_on_ok_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG - vars: - - name: ansible_callback_diy_runner_on_ok_msg - type: str + runner_on_ok_msg: + description: Output to be used for callback runner_on_ok. + ini: + - section: callback_diy + key: runner_on_ok_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG + vars: + - name: ansible_callback_diy_runner_on_ok_msg + type: str - runner_on_ok_msg_color: - description: - - Output color to be used for I(runner_on_ok_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_ok_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_ok_msg_color - type: str + runner_on_ok_msg_color: + description: + - Output color to be used for O(runner_on_ok_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_ok_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_ok_msg_color + type: str - runner_on_skipped_msg: - description: Output to be used for callback runner_on_skipped. - ini: - - section: callback_diy - key: runner_on_skipped_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG - vars: - - name: ansible_callback_diy_runner_on_skipped_msg - type: str + runner_on_skipped_msg: + description: Output to be used for callback runner_on_skipped. + ini: + - section: callback_diy + key: runner_on_skipped_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG + vars: + - name: ansible_callback_diy_runner_on_skipped_msg + type: str - runner_on_skipped_msg_color: - description: - - Output color to be used for I(runner_on_skipped_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_skipped_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_skipped_msg_color - type: str + runner_on_skipped_msg_color: + description: + - Output color to be used for O(runner_on_skipped_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_skipped_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_skipped_msg_color + type: str - runner_on_unreachable_msg: - description: Output to be used for callback runner_on_unreachable. - ini: - - section: callback_diy - key: runner_on_unreachable_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG - vars: - - name: ansible_callback_diy_runner_on_unreachable_msg - type: str + runner_on_unreachable_msg: + description: Output to be used for callback runner_on_unreachable. + ini: + - section: callback_diy + key: runner_on_unreachable_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG + vars: + - name: ansible_callback_diy_runner_on_unreachable_msg + type: str - runner_on_unreachable_msg_color: - description: - - Output color to be used for I(runner_on_unreachable_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_unreachable_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_unreachable_msg_color - type: str + runner_on_unreachable_msg_color: + description: + - Output color to be used for O(runner_on_unreachable_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_unreachable_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_unreachable_msg_color + type: str - playbook_on_start_msg: - description: Output to be used for callback playbook_on_start. - ini: - - section: callback_diy - key: playbook_on_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_start_msg - type: str + playbook_on_start_msg: + description: Output to be used for callback playbook_on_start. + ini: + - section: callback_diy + key: playbook_on_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_start_msg + type: str - playbook_on_start_msg_color: - description: - - Output color to be used for I(playbook_on_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_start_msg_color - type: str + playbook_on_start_msg_color: + description: + - Output color to be used for O(playbook_on_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_start_msg_color + type: str - playbook_on_notify_msg: - description: Output to be used for callback playbook_on_notify. - ini: - - section: callback_diy - key: playbook_on_notify_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG - vars: - - name: ansible_callback_diy_playbook_on_notify_msg - type: str + playbook_on_notify_msg: + description: Output to be used for callback playbook_on_notify. + ini: + - section: callback_diy + key: playbook_on_notify_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG + vars: + - name: ansible_callback_diy_playbook_on_notify_msg + type: str - playbook_on_notify_msg_color: - description: - - Output color to be used for I(playbook_on_notify_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_notify_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_notify_msg_color - type: str + playbook_on_notify_msg_color: + description: + - Output color to be used for O(playbook_on_notify_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_notify_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_notify_msg_color + type: str - playbook_on_no_hosts_matched_msg: - description: Output to be used for callback playbook_on_no_hosts_matched. - ini: - - section: callback_diy - key: playbook_on_no_hosts_matched_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg - type: str + playbook_on_no_hosts_matched_msg: + description: Output to be used for callback playbook_on_no_hosts_matched. + ini: + - section: callback_diy + key: playbook_on_no_hosts_matched_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg + type: str - playbook_on_no_hosts_matched_msg_color: - description: - - Output color to be used for I(playbook_on_no_hosts_matched_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_no_hosts_matched_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg_color - type: str + playbook_on_no_hosts_matched_msg_color: + description: + - Output color to be used for O(playbook_on_no_hosts_matched_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_no_hosts_matched_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg_color + type: str - playbook_on_no_hosts_remaining_msg: - description: Output to be used for callback playbook_on_no_hosts_remaining. - ini: - - section: callback_diy - key: playbook_on_no_hosts_remaining_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg - type: str + playbook_on_no_hosts_remaining_msg: + description: Output to be used for callback playbook_on_no_hosts_remaining. + ini: + - section: callback_diy + key: playbook_on_no_hosts_remaining_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg + type: str - playbook_on_no_hosts_remaining_msg_color: - description: - - Output color to be used for I(playbook_on_no_hosts_remaining_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_no_hosts_remaining_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg_color - type: str + playbook_on_no_hosts_remaining_msg_color: + description: + - Output color to be used for O(playbook_on_no_hosts_remaining_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_no_hosts_remaining_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg_color + type: str - playbook_on_task_start_msg: - description: Output to be used for callback playbook_on_task_start. - ini: - - section: callback_diy - key: playbook_on_task_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_task_start_msg - type: str + playbook_on_task_start_msg: + description: Output to be used for callback playbook_on_task_start. + ini: + - section: callback_diy + key: playbook_on_task_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_task_start_msg + type: str - playbook_on_task_start_msg_color: - description: - - Output color to be used for I(playbook_on_task_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_task_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_task_start_msg_color - type: str + playbook_on_task_start_msg_color: + description: + - Output color to be used for O(playbook_on_task_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_task_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_task_start_msg_color + type: str - playbook_on_handler_task_start_msg: - description: Output to be used for callback playbook_on_handler_task_start. - ini: - - section: callback_diy - key: playbook_on_handler_task_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_handler_task_start_msg - type: str + playbook_on_handler_task_start_msg: + description: Output to be used for callback playbook_on_handler_task_start. + ini: + - section: callback_diy + key: playbook_on_handler_task_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_handler_task_start_msg + type: str - playbook_on_handler_task_start_msg_color: - description: - - Output color to be used for I(playbook_on_handler_task_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_handler_task_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_handler_task_start_msg_color - type: str + playbook_on_handler_task_start_msg_color: + description: + - Output color to be used for O(playbook_on_handler_task_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_handler_task_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_handler_task_start_msg_color + type: str - playbook_on_vars_prompt_msg: - description: Output to be used for callback playbook_on_vars_prompt. - ini: - - section: callback_diy - key: playbook_on_vars_prompt_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG - vars: - - name: ansible_callback_diy_playbook_on_vars_prompt_msg - type: str + playbook_on_vars_prompt_msg: + description: Output to be used for callback playbook_on_vars_prompt. + ini: + - section: callback_diy + key: playbook_on_vars_prompt_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG + vars: + - name: ansible_callback_diy_playbook_on_vars_prompt_msg + type: str - playbook_on_vars_prompt_msg_color: - description: - - Output color to be used for I(playbook_on_vars_prompt_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_vars_prompt_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_vars_prompt_msg_color - type: str + playbook_on_vars_prompt_msg_color: + description: + - Output color to be used for O(playbook_on_vars_prompt_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_vars_prompt_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_vars_prompt_msg_color + type: str - playbook_on_play_start_msg: - description: Output to be used for callback playbook_on_play_start. - ini: - - section: callback_diy - key: playbook_on_play_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_play_start_msg - type: str + playbook_on_play_start_msg: + description: Output to be used for callback playbook_on_play_start. + ini: + - section: callback_diy + key: playbook_on_play_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_play_start_msg + type: str - playbook_on_play_start_msg_color: - description: - - Output color to be used for I(playbook_on_play_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_play_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_play_start_msg_color - type: str + playbook_on_play_start_msg_color: + description: + - Output color to be used for O(playbook_on_play_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_play_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_play_start_msg_color + type: str - playbook_on_stats_msg: - description: Output to be used for callback playbook_on_stats. - ini: - - section: callback_diy - key: playbook_on_stats_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG - vars: - - name: ansible_callback_diy_playbook_on_stats_msg - type: str + playbook_on_stats_msg: + description: Output to be used for callback playbook_on_stats. + ini: + - section: callback_diy + key: playbook_on_stats_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG + vars: + - name: ansible_callback_diy_playbook_on_stats_msg + type: str - playbook_on_stats_msg_color: - description: - - Output color to be used for I(playbook_on_stats_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_stats_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_stats_msg_color - type: str + playbook_on_stats_msg_color: + description: + - Output color to be used for O(playbook_on_stats_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_stats_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_stats_msg_color + type: str - on_file_diff_msg: - description: Output to be used for callback on_file_diff. - ini: - - section: callback_diy - key: on_file_diff_msg - env: - - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG - vars: - - name: ansible_callback_diy_on_file_diff_msg - type: str + on_file_diff_msg: + description: Output to be used for callback on_file_diff. + ini: + - section: callback_diy + key: on_file_diff_msg + env: + - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG + vars: + - name: ansible_callback_diy_on_file_diff_msg + type: str - on_file_diff_msg_color: - description: - - Output color to be used for I(on_file_diff_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: on_file_diff_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG_COLOR - vars: - - name: ansible_callback_diy_on_file_diff_msg_color - type: str + on_file_diff_msg_color: + description: + - Output color to be used for O(on_file_diff_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: on_file_diff_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG_COLOR + vars: + - name: ansible_callback_diy_on_file_diff_msg_color + type: str - playbook_on_include_msg: - description: Output to be used for callback playbook_on_include. - ini: - - section: callback_diy - key: playbook_on_include_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG - vars: - - name: ansible_callback_diy_playbook_on_include_msg - type: str + playbook_on_include_msg: + description: Output to be used for callback playbook_on_include. + ini: + - section: callback_diy + key: playbook_on_include_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG + vars: + - name: ansible_callback_diy_playbook_on_include_msg + type: str - playbook_on_include_msg_color: - description: - - Output color to be used for I(playbook_on_include_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_include_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_include_msg_color - type: str + playbook_on_include_msg_color: + description: + - Output color to be used for O(playbook_on_include_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_include_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_include_msg_color + type: str - runner_item_on_ok_msg: - description: Output to be used for callback runner_item_on_ok. - ini: - - section: callback_diy - key: runner_item_on_ok_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG - vars: - - name: ansible_callback_diy_runner_item_on_ok_msg - type: str + runner_item_on_ok_msg: + description: Output to be used for callback runner_item_on_ok. + ini: + - section: callback_diy + key: runner_item_on_ok_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG + vars: + - name: ansible_callback_diy_runner_item_on_ok_msg + type: str - runner_item_on_ok_msg_color: - description: - - Output color to be used for I(runner_item_on_ok_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_item_on_ok_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_item_on_ok_msg_color - type: str + runner_item_on_ok_msg_color: + description: + - Output color to be used for O(runner_item_on_ok_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_item_on_ok_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_item_on_ok_msg_color + type: str - runner_item_on_failed_msg: - description: Output to be used for callback runner_item_on_failed. - ini: - - section: callback_diy - key: runner_item_on_failed_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG - vars: - - name: ansible_callback_diy_runner_item_on_failed_msg - type: str + runner_item_on_failed_msg: + description: Output to be used for callback runner_item_on_failed. + ini: + - section: callback_diy + key: runner_item_on_failed_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG + vars: + - name: ansible_callback_diy_runner_item_on_failed_msg + type: str - runner_item_on_failed_msg_color: - description: - - Output color to be used for I(runner_item_on_failed_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_item_on_failed_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_item_on_failed_msg_color - type: str + runner_item_on_failed_msg_color: + description: + - Output color to be used for O(runner_item_on_failed_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_item_on_failed_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_item_on_failed_msg_color + type: str - runner_item_on_skipped_msg: - description: Output to be used for callback runner_item_on_skipped. - ini: - - section: callback_diy - key: runner_item_on_skipped_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG - vars: - - name: ansible_callback_diy_runner_item_on_skipped_msg - type: str + runner_item_on_skipped_msg: + description: Output to be used for callback runner_item_on_skipped. + ini: + - section: callback_diy + key: runner_item_on_skipped_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG + vars: + - name: ansible_callback_diy_runner_item_on_skipped_msg + type: str - runner_item_on_skipped_msg_color: - description: - - Output color to be used for I(runner_item_on_skipped_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_item_on_skipped_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_item_on_skipped_msg_color - type: str + runner_item_on_skipped_msg_color: + description: + - Output color to be used for O(runner_item_on_skipped_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_item_on_skipped_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_item_on_skipped_msg_color + type: str - runner_retry_msg: - description: Output to be used for callback runner_retry. - ini: - - section: callback_diy - key: runner_retry_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG - vars: - - name: ansible_callback_diy_runner_retry_msg - type: str + runner_retry_msg: + description: Output to be used for callback runner_retry. + ini: + - section: callback_diy + key: runner_retry_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG + vars: + - name: ansible_callback_diy_runner_retry_msg + type: str - runner_retry_msg_color: - description: - - Output color to be used for I(runner_retry_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_retry_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_retry_msg_color - type: str + runner_retry_msg_color: + description: + - Output color to be used for O(runner_retry_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_retry_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_retry_msg_color + type: str - runner_on_start_msg: - description: Output to be used for callback runner_on_start. - ini: - - section: callback_diy - key: runner_on_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG - vars: - - name: ansible_callback_diy_runner_on_start_msg - type: str + runner_on_start_msg: + description: Output to be used for callback runner_on_start. + ini: + - section: callback_diy + key: runner_on_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG + vars: + - name: ansible_callback_diy_runner_on_start_msg + type: str - runner_on_start_msg_color: - description: - - Output color to be used for I(runner_on_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_start_msg_color - type: str + runner_on_start_msg_color: + description: + - Output color to be used for O(runner_on_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_start_msg_color + type: str - runner_on_no_hosts_msg: - description: Output to be used for callback runner_on_no_hosts. - ini: - - section: callback_diy - key: runner_on_no_hosts_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG - vars: - - name: ansible_callback_diy_runner_on_no_hosts_msg - type: str + runner_on_no_hosts_msg: + description: Output to be used for callback runner_on_no_hosts. + ini: + - section: callback_diy + key: runner_on_no_hosts_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG + vars: + - name: ansible_callback_diy_runner_on_no_hosts_msg + type: str - runner_on_no_hosts_msg_color: - description: - - Output color to be used for I(runner_on_no_hosts_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_no_hosts_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_no_hosts_msg_color - type: str + runner_on_no_hosts_msg_color: + description: + - Output color to be used for O(runner_on_no_hosts_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_no_hosts_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_no_hosts_msg_color + type: str - playbook_on_setup_msg: - description: Output to be used for callback playbook_on_setup. - ini: - - section: callback_diy - key: playbook_on_setup_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG - vars: - - name: ansible_callback_diy_playbook_on_setup_msg - type: str + playbook_on_setup_msg: + description: Output to be used for callback playbook_on_setup. + ini: + - section: callback_diy + key: playbook_on_setup_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG + vars: + - name: ansible_callback_diy_playbook_on_setup_msg + type: str - playbook_on_setup_msg_color: - description: - - Output color to be used for I(playbook_on_setup_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_setup_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_setup_msg_color - type: str -''' + playbook_on_setup_msg_color: + description: + - Output color to be used for O(playbook_on_setup_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_setup_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_setup_msg_color + type: str +""" -EXAMPLES = r''' +EXAMPLES = r""" ansible.cfg: > # Enable plugin [defaults] @@ -622,11 +616,11 @@ ansible.cfg: > # Newline after every callback # on_any_msg='{{ " " | join("\n") }}' -playbook.yml: > +playbook.yml: >- --- - name: "Default plugin output: play example" hosts: localhost - gather_facts: no + gather_facts: false tasks: - name: Default plugin output ansible.builtin.debug: @@ -634,7 +628,7 @@ playbook.yml: > - name: Override from play vars hosts: localhost - gather_facts: no + gather_facts: false vars: ansible_connection: local green: "\e[0m\e[38;5;82m" @@ -712,7 +706,7 @@ playbook.yml: > - name: Using alias vars (see ansible.cfg) ansible.builtin.debug: msg: - when: False + when: false vars: ansible_callback_diy_playbook_on_task_start_msg: "" on_skipped_msg: "DIY output(via task vars): skipped example:\n\e[0m\e[38;5;4m\u25b6\u25b6 {{ ansible_callback_diy.result.task.name }}\n" @@ -781,19 +775,21 @@ playbook.yml: > {{ white }}{{ ansible_callback_diy[key] }} {% endfor %} -''' +""" import sys from contextlib import contextmanager -from ansible import constants as C -from ansible.playbook.task_include import TaskInclude -from ansible.plugins.callback import CallbackBase -from ansible.utils.color import colorize, hostcolor from ansible.template import Templar from ansible.vars.manager import VariableManager from ansible.plugins.callback.default import CallbackModule as Default from ansible.module_utils.common.text.converters import to_text +try: + from ansible.template import trust_as_template # noqa: F401, pylint: disable=unused-import + SUPPORTS_DATA_TAGGING = True +except ImportError: + SUPPORTS_DATA_TAGGING = False + class DummyStdout(object): def flush(self): @@ -831,9 +827,9 @@ class CallbackModule(Default): _callback_options = ['msg', 'msg_color'] for option in _callback_options: - _option_name = '%s_%s' % (_callback_type, option) + _option_name = f'{_callback_type}_{option}' _option_template = variables.get( - self.DIY_NS + "_" + _option_name, + f"{self.DIY_NS}_{_option_name}", self.get_option(_option_name) ) _ret.update({option: self._template( @@ -847,7 +843,10 @@ class CallbackModule(Default): return _ret def _using_diy(self, spec): - return (spec['msg'] is not None) and (spec['msg'] != spec['vars']['omit']) + sentinel = object() + omit = spec['vars'].get('omit', sentinel) + # With Data Tagging, omit is sentinel + return (spec['msg'] is not None) and (spec['msg'] != omit or omit is sentinel) def _parent_has_callback(self): return hasattr(super(CallbackModule, self), sys._getframe(1).f_code.co_name) @@ -870,7 +869,7 @@ class CallbackModule(Default): handler=None, result=None, stats=None, remove_attr_ref_loop=True): def _get_value(obj, attr=None, method=None): if attr: - return getattr(obj, attr, getattr(obj, "_" + attr, None)) + return getattr(obj, attr, getattr(obj, f"_{attr}", None)) if method: _method = getattr(obj, method) @@ -903,7 +902,7 @@ class CallbackModule(Default): ) _ret.update(_all) - _ret.update(_ret.get(self.DIY_NS, {self.DIY_NS: CallbackDIYDict()})) + _ret.update(_ret.get(self.DIY_NS, {self.DIY_NS: {} if SUPPORTS_DATA_TAGGING else CallbackDIYDict()})) _ret[self.DIY_NS].update({'playbook': {}}) _playbook_attributes = ['entries', 'file_name', 'basedir'] diff --git a/plugins/callback/elastic.py b/plugins/callback/elastic.py index 095c0993ca..82478b9e7d 100644 --- a/plugins/callback/elastic.py +++ b/plugins/callback/elastic.py @@ -1,72 +1,72 @@ -# (C) 2021, Victor Martinez -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Victor Martinez +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Victor Martinez (@v1v) - name: elastic - type: notification - short_description: Create distributed traces for each Ansible task in Elastic APM - version_added: 3.8.0 +DOCUMENTATION = r""" +author: Victor Martinez (@v1v) +name: elastic +type: notification +short_description: Create distributed traces for each Ansible task in Elastic APM +version_added: 3.8.0 +description: + - This callback creates distributed traces for each Ansible task in Elastic APM. + - You can configure the plugin with environment variables. + - See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html). +options: + hide_task_arguments: + default: false + type: bool description: - - This callback creates distributed traces for each Ansible task in Elastic APM. - - You can configure the plugin with environment variables. - - See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html). - options: - hide_task_arguments: - default: false - type: bool - description: - - Hide the arguments for a task. - env: - - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS - apm_service_name: - default: ansible - type: str - description: - - The service name resource attribute. - env: - - name: ELASTIC_APM_SERVICE_NAME - apm_server_url: - type: str - description: - - Use the APM server and its environment variables. - env: - - name: ELASTIC_APM_SERVER_URL - apm_secret_token: - type: str - description: - - Use the APM server token - env: - - name: ELASTIC_APM_SECRET_TOKEN - apm_api_key: - type: str - description: - - Use the APM API key - env: - - name: ELASTIC_APM_API_KEY - apm_verify_server_cert: - default: true - type: bool - description: - - Verifies the SSL certificate if an HTTPS connection. - env: - - name: ELASTIC_APM_VERIFY_SERVER_CERT - traceparent: - type: str - description: - - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). - env: - - name: TRACEPARENT - requirements: - - elastic-apm (Python library) -''' + - Hide the arguments for a task. + env: + - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS + apm_service_name: + default: ansible + type: str + description: + - The service name resource attribute. + env: + - name: ELASTIC_APM_SERVICE_NAME + apm_server_url: + type: str + description: + - Use the APM server and its environment variables. + env: + - name: ELASTIC_APM_SERVER_URL + apm_secret_token: + type: str + description: + - Use the APM server token. + env: + - name: ELASTIC_APM_SECRET_TOKEN + apm_api_key: + type: str + description: + - Use the APM API key. + env: + - name: ELASTIC_APM_API_KEY + apm_verify_server_cert: + default: true + type: bool + description: + - Verifies the SSL certificate if an HTTPS connection. + env: + - name: ELASTIC_APM_VERIFY_SERVER_CERT + traceparent: + type: str + description: + - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). + env: + - name: TRACEPARENT +requirements: + - elastic-apm (Python library) +""" -EXAMPLES = ''' -examples: | +EXAMPLES = r""" +examples: |- Enable the plugin in ansible.cfg: [defaults] callbacks_enabled = community.general.elastic @@ -75,7 +75,7 @@ examples: | export ELASTIC_APM_SERVER_URL= export ELASTIC_APM_SERVICE_NAME=your_service_name export ELASTIC_APM_API_KEY=your_APM_API_KEY -''' +""" import getpass import socket @@ -83,10 +83,11 @@ import time import uuid from collections import OrderedDict +from contextlib import closing from os.path import basename from ansible.errors import AnsibleError, AnsibleRuntimeError -from ansible.module_utils.six import raise_from +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.plugins.callback import CallbackBase try: @@ -116,7 +117,7 @@ class TaskData: if host.uuid in self.host_data: if host.status == 'included': # concatenate task include output from multiple items - host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result) + host.result = f'{self.host_data[host.uuid].result}\n{host.result}' else: return @@ -139,7 +140,6 @@ class HostData: class ElasticSource(object): def __init__(self, display): self.ansible_playbook = "" - self.ansible_version = None self.session = str(uuid.uuid4()) self.host = socket.gethostname() try: @@ -164,7 +164,7 @@ class ElasticSource(object): args = None if not task.no_log and not hide_task_arguments: - args = ', '.join(('%s=%s' % a for a in task.args.items())) + args = ', '.join((f'{k}={v}' for k, v in task.args.items())) tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args) @@ -182,9 +182,6 @@ class ElasticSource(object): task = tasks_data[task_uuid] - if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'): - self.ansible_version = result._task_fields['args'].get('_ansible_version') - task.add_host(HostData(host_uuid, host_name, status, result)) def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, apm_service_name, @@ -200,29 +197,29 @@ class ElasticSource(object): apm_cli = self.init_apm_client(apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key) if apm_cli: - instrument() # Only call this once, as early as possible. - if traceparent: - parent = trace_parent_from_string(traceparent) - apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time) - else: - apm_cli.begin_transaction("Session", start=parent_start_time) - # Populate trace metadata attributes - if self.ansible_version is not None: - label(ansible_version=self.ansible_version) - label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user) - if self.ip_address is not None: - label(ansible_host_ip=self.ip_address) + with closing(apm_cli): + instrument() # Only call this once, as early as possible. + if traceparent: + parent = trace_parent_from_string(traceparent) + apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time) + else: + apm_cli.begin_transaction("Session", start=parent_start_time) + # Populate trace metadata attributes + label(ansible_version=ansible_version) + label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user) + if self.ip_address is not None: + label(ansible_host_ip=self.ip_address) - for task_data in tasks: - for host_uuid, host_data in task_data.host_data.items(): - self.create_span_data(apm_cli, task_data, host_data) + for task_data in tasks: + for host_uuid, host_data in task_data.host_data.items(): + self.create_span_data(apm_cli, task_data, host_data) - apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time) + apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time) def create_span_data(self, apm_cli, task_data, host_data): """ create the span with the given TaskData and HostData """ - name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name) + name = f'[{host_data.name}] {task_data.play}: {task_data.name}' message = "success" status = "success" @@ -256,7 +253,7 @@ class ElasticSource(object): "ansible.task.host.status": host_data.status}) as span: span.outcome = status if 'failure' in status: - exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, enriched_error_message)) + exception = AnsibleRuntimeError(message=f"{task_data.action}: {name} failed with error message {enriched_error_message}") apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True) def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key): @@ -285,7 +282,7 @@ class ElasticSource(object): message = result.get('msg', 'failed') exception = result.get('exception') stderr = result.get('stderr') - return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr) + return f"message: \"{message}\"\nexception: \"{exception}\"\nstderr: \"{stderr}\"" class CallbackModule(CallbackBase): @@ -310,9 +307,7 @@ class CallbackModule(CallbackBase): self.disabled = False if ELASTIC_LIBRARY_IMPORT_ERROR: - raise_from( - AnsibleError('The `elastic-apm` must be installed to use this plugin'), - ELASTIC_LIBRARY_IMPORT_ERROR) + raise AnsibleError('The `elastic-apm` must be installed to use this plugin') from ELASTIC_LIBRARY_IMPORT_ERROR self.tasks_data = OrderedDict() diff --git a/plugins/callback/hipchat.py b/plugins/callback/hipchat.py deleted file mode 100644 index c64b892d9b..0000000000 --- a/plugins/callback/hipchat.py +++ /dev/null @@ -1,228 +0,0 @@ -# -*- coding: utf-8 -*- -# (C) 2014, Matt Martz -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: hipchat - type: notification - requirements: - - whitelist in configuration. - - prettytable (python lib) - short_description: post task events to hipchat - description: - - This callback plugin sends status updates to a HipChat channel during playbook execution. - - Before 2.4 only environment variables were available for configuring this plugin. - options: - token: - description: HipChat API token for v1 or v2 API. - required: True - env: - - name: HIPCHAT_TOKEN - ini: - - section: callback_hipchat - key: token - api_version: - description: HipChat API version, v1 or v2. - required: False - default: v1 - env: - - name: HIPCHAT_API_VERSION - ini: - - section: callback_hipchat - key: api_version - room: - description: HipChat room to post in. - default: ansible - env: - - name: HIPCHAT_ROOM - ini: - - section: callback_hipchat - key: room - from: - description: Name to post as - default: ansible - env: - - name: HIPCHAT_FROM - ini: - - section: callback_hipchat - key: from - notify: - description: Add notify flag to important messages - type: bool - default: True - env: - - name: HIPCHAT_NOTIFY - ini: - - section: callback_hipchat - key: notify - -''' - -import os -import json - -try: - import prettytable - HAS_PRETTYTABLE = True -except ImportError: - HAS_PRETTYTABLE = False - -from ansible.plugins.callback import CallbackBase -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import open_url - - -class CallbackModule(CallbackBase): - """This is an example ansible callback plugin that sends status - updates to a HipChat channel during playbook execution. - """ - - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.hipchat' - CALLBACK_NEEDS_WHITELIST = True - - API_V1_URL = 'https://api.hipchat.com/v1/rooms/message' - API_V2_URL = 'https://api.hipchat.com/v2/' - - def __init__(self): - - super(CallbackModule, self).__init__() - - if not HAS_PRETTYTABLE: - self.disabled = True - self._display.warning('The `prettytable` python module is not installed. ' - 'Disabling the HipChat callback plugin.') - self.printed_playbook = False - self.playbook_name = None - self.play = None - - def set_options(self, task_keys=None, var_options=None, direct=None): - super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) - - self.token = self.get_option('token') - self.api_version = self.get_option('api_version') - self.from_name = self.get_option('from') - self.allow_notify = self.get_option('notify') - self.room = self.get_option('room') - - if self.token is None: - self.disabled = True - self._display.warning('HipChat token could not be loaded. The HipChat ' - 'token can be provided using the `HIPCHAT_TOKEN` ' - 'environment variable.') - - # Pick the request handler. - if self.api_version == 'v2': - self.send_msg = self.send_msg_v2 - else: - self.send_msg = self.send_msg_v1 - - def send_msg_v2(self, msg, msg_format='text', color='yellow', notify=False): - """Method for sending a message to HipChat""" - - headers = {'Authorization': 'Bearer %s' % self.token, 'Content-Type': 'application/json'} - - body = {} - body['room_id'] = self.room - body['from'] = self.from_name[:15] # max length is 15 - body['message'] = msg - body['message_format'] = msg_format - body['color'] = color - body['notify'] = self.allow_notify and notify - - data = json.dumps(body) - url = self.API_V2_URL + "room/{room_id}/notification".format(room_id=self.room) - try: - response = open_url(url, data=data, headers=headers, method='POST') - return response.read() - except Exception as ex: - self._display.warning('Could not submit message to hipchat: {0}'.format(ex)) - - def send_msg_v1(self, msg, msg_format='text', color='yellow', notify=False): - """Method for sending a message to HipChat""" - - params = {} - params['room_id'] = self.room - params['from'] = self.from_name[:15] # max length is 15 - params['message'] = msg - params['message_format'] = msg_format - params['color'] = color - params['notify'] = int(self.allow_notify and notify) - - url = ('%s?auth_token=%s' % (self.API_V1_URL, self.token)) - try: - response = open_url(url, data=urlencode(params)) - return response.read() - except Exception as ex: - self._display.warning('Could not submit message to hipchat: {0}'.format(ex)) - - def v2_playbook_on_play_start(self, play): - """Display Playbook and play start messages""" - - self.play = play - name = play.name - # This block sends information about a playbook when it starts - # The playbook object is not immediately available at - # playbook_on_start so we grab it via the play - # - # Displays info about playbook being started by a person on an - # inventory, as well as Tags, Skip Tags and Limits - if not self.printed_playbook: - self.playbook_name, dummy = os.path.splitext(os.path.basename(self.play.playbook.filename)) - host_list = self.play.playbook.inventory.host_list - inventory = os.path.basename(os.path.realpath(host_list)) - self.send_msg("%s: Playbook initiated by %s against %s" % - (self.playbook_name, - self.play.playbook.remote_user, - inventory), notify=True) - self.printed_playbook = True - subset = self.play.playbook.inventory._subset - skip_tags = self.play.playbook.skip_tags - self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" % - (self.playbook_name, - ', '.join(self.play.playbook.only_tags), - ', '.join(skip_tags) if skip_tags else None, - ', '.join(subset) if subset else subset)) - - # This is where we actually say we are starting a play - self.send_msg("%s: Starting play: %s" % - (self.playbook_name, name)) - - def playbook_on_stats(self, stats): - """Display info about playbook statistics""" - hosts = sorted(stats.processed.keys()) - - t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable', - 'Failures']) - - failures = False - unreachable = False - - for h in hosts: - s = stats.summarize(h) - - if s['failures'] > 0: - failures = True - if s['unreachable'] > 0: - unreachable = True - - t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable', - 'failures']]) - - self.send_msg("%s: Playbook complete" % self.playbook_name, - notify=True) - - if failures or unreachable: - color = 'red' - self.send_msg("%s: Failures detected" % self.playbook_name, - color=color, notify=True) - else: - color = 'green' - - self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color) diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py index b535fa9540..319611d460 100644 --- a/plugins/callback/jabber.py +++ b/plugins/callback/jabber.py @@ -1,43 +1,46 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2016 maxn nikolaev.makc@gmail.com # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: jabber - type: notification - short_description: post task events to a jabber server - description: - - The chatty part of ChatOps with a Hipchat server as a target - - This callback plugin sends status updates to a HipChat channel during playbook execution. - requirements: - - xmpp (python lib https://github.com/ArchipelProject/xmpppy) - options: - server: - description: connection info to jabber server - required: True - env: - - name: JABBER_SERV - user: - description: Jabber user to authenticate as - required: True - env: - - name: JABBER_USER - password: - description: Password for the user to the jabber server - required: True - env: - - name: JABBER_PASS - to: - description: chat identifier that will receive the message - required: True - env: - - name: JABBER_TO -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: jabber +type: notification +short_description: Post task events to a Jabber server +description: + - The chatty part of ChatOps with a Hipchat server as a target. + - This callback plugin sends status updates to a HipChat channel during playbook execution. +requirements: + - xmpp (Python library U(https://github.com/ArchipelProject/xmpppy)) +options: + server: + description: Connection info to Jabber server. + type: str + required: true + env: + - name: JABBER_SERV + user: + description: Jabber user to authenticate as. + type: str + required: true + env: + - name: JABBER_USER + password: + description: Password for the user to the Jabber server. + type: str + required: true + env: + - name: JABBER_PASS + to: + description: Chat identifier that receives the message. + type: str + required: true + env: + - name: JABBER_TO +""" import os @@ -97,7 +100,7 @@ class CallbackModule(CallbackBase): """Display Playbook and play start messages""" self.play = play name = play.name - self.send_msg("Ansible starting play: %s" % (name)) + self.send_msg(f"Ansible starting play: {name}") def playbook_on_stats(self, stats): name = self.play @@ -113,7 +116,7 @@ class CallbackModule(CallbackBase): if failures or unreachable: out = self.debug - self.send_msg("%s: Failures detected \n%s \nHost: %s\n Failed at:\n%s" % (name, self.task, h, out)) + self.send_msg(f"{name}: Failures detected \n{self.task} \nHost: {h}\n Failed at:\n{out}") else: out = self.debug - self.send_msg("Great! \n Playbook %s completed:\n%s \n Last task debug:\n %s" % (name, s, out)) + self.send_msg(f"Great! \n Playbook {name} completed:\n{s} \n Last task debug:\n {out}") diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py index 2539bd9ade..89ec8cbff3 100644 --- a/plugins/callback/log_plays.py +++ b/plugins/callback/log_plays.py @@ -1,31 +1,31 @@ -# -*- coding: utf-8 -*- -# (C) 2012, Michael DeHaan, -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2012, Michael DeHaan, +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: log_plays - type: notification - short_description: write playbook output to log file - description: - - This callback writes playbook output to a file per host in the `/var/log/ansible/hosts` directory - requirements: - - Whitelist in configuration - - A writeable /var/log/ansible/hosts directory by the user executing Ansible on the controller - options: - log_folder: - default: /var/log/ansible/hosts - description: The folder where log files will be created. - env: - - name: ANSIBLE_LOG_FOLDER - ini: - - section: callback_log_plays - key: log_folder -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: log_plays +type: notification +short_description: Write playbook output to log file +description: + - This callback writes playbook output to a file per host in the C(/var/log/ansible/hosts) directory. +requirements: + - Whitelist in configuration + - A writeable C(/var/log/ansible/hosts) directory by the user executing Ansible on the controller +options: + log_folder: + default: /var/log/ansible/hosts + description: The folder where log files are created. + type: str + env: + - name: ANSIBLE_LOG_FOLDER + ini: + - section: callback_log_plays + key: log_folder +""" import os import time @@ -33,7 +33,7 @@ import json from ansible.utils.path import makedirs_safe from ansible.module_utils.common.text.converters import to_bytes -from ansible.module_utils.common._collections_compat import MutableMapping +from collections.abc import MutableMapping from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase @@ -55,7 +55,10 @@ class CallbackModule(CallbackBase): CALLBACK_NEEDS_WHITELIST = True TIME_FORMAT = "%b %d %Y %H:%M:%S" - MSG_FORMAT = "%(now)s - %(playbook)s - %(task_name)s - %(task_action)s - %(category)s - %(data)s\n\n" + + @staticmethod + def _make_msg(now, playbook, task_name, task_action, category, data): + return f"{now} - {playbook} - {task_name} - {task_action} - {category} - {data}\n\n" def __init__(self): @@ -80,22 +83,12 @@ class CallbackModule(CallbackBase): invocation = data.pop('invocation', None) data = json.dumps(data, cls=AnsibleJSONEncoder) if invocation is not None: - data = json.dumps(invocation) + " => %s " % data + data = f"{json.dumps(invocation)} => {data} " path = os.path.join(self.log_folder, result._host.get_name()) now = time.strftime(self.TIME_FORMAT, time.localtime()) - msg = to_bytes( - self.MSG_FORMAT - % dict( - now=now, - playbook=self.playbook, - task_name=result._task.name, - task_action=result._task.action, - category=category, - data=data, - ) - ) + msg = to_bytes(self._make_msg(now, self.playbook, result._task.name, result._task.action, category, data)) with open(path, "ab") as fd: fd.write(msg) diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py index 04fc646dc4..05996f2492 100644 --- a/plugins/callback/loganalytics.py +++ b/plugins/callback/loganalytics.py @@ -1,42 +1,44 @@ -# -*- coding: utf-8 -*- -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: loganalytics - type: aggregate - short_description: Posts task results to Azure Log Analytics - author: "Cyrus Li (@zhcli) " - description: - - This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace. - - Credits to authors of splunk callback plugin. - version_added: "2.4.0" - requirements: - - Whitelisting this callback plugin. - - An Azure log analytics work space has been established. - options: - workspace_id: - description: Workspace ID of the Azure log analytics workspace. - required: true - env: - - name: WORKSPACE_ID - ini: - - section: callback_loganalytics - key: workspace_id - shared_key: - description: Shared key to connect to Azure log analytics workspace. - required: true - env: - - name: WORKSPACE_SHARED_KEY - ini: - - section: callback_loganalytics - key: shared_key -''' +DOCUMENTATION = r""" +name: loganalytics +type: notification +short_description: Posts task results to Azure Log Analytics +author: "Cyrus Li (@zhcli) " +description: + - This callback plugin posts task results in JSON formatted to an Azure Log Analytics workspace. + - Credits to authors of splunk callback plugin. +version_added: "2.4.0" +requirements: + - Whitelisting this callback plugin. + - An Azure log analytics work space has been established. +options: + workspace_id: + description: Workspace ID of the Azure log analytics workspace. + type: str + required: true + env: + - name: WORKSPACE_ID + ini: + - section: callback_loganalytics + key: workspace_id + shared_key: + description: Shared key to connect to Azure log analytics workspace. + type: str + required: true + env: + - name: WORKSPACE_SHARED_KEY + ini: + - section: callback_loganalytics + key: shared_key +""" -EXAMPLES = ''' -examples: | +EXAMPLES = r""" +examples: |- Whitelist the plugin in ansible.cfg: [defaults] callback_whitelist = community.general.loganalytics @@ -47,30 +49,32 @@ examples: | [callback_loganalytics] workspace_id = 01234567-0123-0123-0123-01234567890a shared_key = dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA== -''' +""" import hashlib import hmac import base64 -import logging import json import uuid import socket import getpass -from datetime import datetime from os.path import basename +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.urls import open_url from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class AzureLogAnalyticsSource(object): def __init__(self): self.ansible_check_mode = False self.ansible_playbook = "" - self.ansible_version = "" self.session = str(uuid.uuid4()) self.host = socket.gethostname() self.user = getpass.getuser() @@ -78,30 +82,25 @@ class AzureLogAnalyticsSource(object): def __build_signature(self, date, workspace_id, shared_key, content_length): # Build authorisation signature for Azure log analytics API call - sigs = "POST\n{0}\napplication/json\nx-ms-date:{1}\n/api/logs".format( - str(content_length), date) + sigs = f"POST\n{content_length}\napplication/json\nx-ms-date:{date}\n/api/logs" utf8_sigs = sigs.encode('utf-8') decoded_shared_key = base64.b64decode(shared_key) hmac_sha256_sigs = hmac.new( decoded_shared_key, utf8_sigs, digestmod=hashlib.sha256).digest() encoded_hash = base64.b64encode(hmac_sha256_sigs).decode('utf-8') - signature = "SharedKey {0}:{1}".format(workspace_id, encoded_hash) + signature = f"SharedKey {workspace_id}:{encoded_hash}" return signature def __build_workspace_url(self, workspace_id): - return "https://{0}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01".format(workspace_id) + return f"https://{workspace_id}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01" def __rfc1123date(self): - return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') + return now().strftime('%a, %d %b %Y %H:%M:%S GMT') def send_event(self, workspace_id, shared_key, state, result, runtime): if result._task_fields['args'].get('_ansible_check_mode') is True: self.ansible_check_mode = True - if result._task_fields['args'].get('_ansible_version'): - self.ansible_version = \ - result._task_fields['args'].get('_ansible_version') - if result._task._role: ansible_role = str(result._task._role) else: @@ -115,7 +114,7 @@ class AzureLogAnalyticsSource(object): data['host'] = self.host data['user'] = self.user data['runtime'] = runtime - data['ansible_version'] = self.ansible_version + data['ansible_version'] = ansible_version data['ansible_check_mode'] = self.ansible_check_mode data['ansible_host'] = result._host.name data['ansible_playbook'] = self.ansible_playbook @@ -153,7 +152,7 @@ class AzureLogAnalyticsSource(object): class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'aggregate' + CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'loganalytics' CALLBACK_NEEDS_WHITELIST = True @@ -166,7 +165,7 @@ class CallbackModule(CallbackBase): def _seconds_since_start(self, result): return ( - datetime.utcnow() - + now() - self.start_datetimes[result._task._uuid] ).total_seconds() @@ -184,10 +183,10 @@ class CallbackModule(CallbackBase): self.loganalytics.ansible_playbook = basename(playbook._file_name) def v2_playbook_on_task_start(self, task, is_conditional): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_playbook_on_handler_task_start(self, task): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_runner_on_ok(self, result, **kwargs): self.loganalytics.send_event( diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py index 138b612de8..09d8b38dcb 100644 --- a/plugins/callback/logdna.py +++ b/plugins/callback/logdna.py @@ -1,60 +1,59 @@ -# -*- coding: utf-8 -*- -# (c) 2018, Samir Musali -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Samir Musali +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: logdna - type: aggregate - short_description: Sends playbook logs to LogDNA - description: - - This callback will report logs from playbook actions, tasks, and events to LogDNA (https://app.logdna.com) - requirements: - - LogDNA Python Library (https://github.com/logdna/python) - - whitelisting in configuration - options: - conf_key: - required: True - description: LogDNA Ingestion Key - type: string - env: - - name: LOGDNA_INGESTION_KEY - ini: - - section: callback_logdna - key: conf_key - plugin_ignore_errors: - required: False - description: Whether to ignore errors on failing or not - type: boolean - env: - - name: ANSIBLE_IGNORE_ERRORS - ini: - - section: callback_logdna - key: plugin_ignore_errors - default: False - conf_hostname: - required: False - description: Alternative Host Name; the current host name by default - type: string - env: - - name: LOGDNA_HOSTNAME - ini: - - section: callback_logdna - key: conf_hostname - conf_tags: - required: False - description: Tags - type: string - env: - - name: LOGDNA_TAGS - ini: - - section: callback_logdna - key: conf_tags - default: ansible -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: logdna +type: notification +short_description: Sends playbook logs to LogDNA +description: + - This callback reports logs from playbook actions, tasks, and events to LogDNA (U(https://app.logdna.com)). +requirements: + - LogDNA Python Library (U(https://github.com/logdna/python)) + - whitelisting in configuration +options: + conf_key: + required: true + description: LogDNA Ingestion Key. + type: string + env: + - name: LOGDNA_INGESTION_KEY + ini: + - section: callback_logdna + key: conf_key + plugin_ignore_errors: + required: false + description: Whether to ignore errors on failing or not. + type: boolean + env: + - name: ANSIBLE_IGNORE_ERRORS + ini: + - section: callback_logdna + key: plugin_ignore_errors + default: false + conf_hostname: + required: false + description: Alternative Host Name; the current host name by default. + type: string + env: + - name: LOGDNA_HOSTNAME + ini: + - section: callback_logdna + key: conf_hostname + conf_tags: + required: false + description: Tags. + type: string + env: + - name: LOGDNA_TAGS + ini: + - section: callback_logdna + key: conf_tags + default: ansible +""" import logging import json @@ -72,7 +71,7 @@ except ImportError: # Getting MAC Address of system: def get_mac(): - mac = "%012x" % getnode() + mac = f"{getnode():012x}" return ":".join(map(lambda index: mac[index:index + 2], range(int(len(mac) / 2)))) @@ -110,7 +109,7 @@ def isJSONable(obj): class CallbackModule(CallbackBase): CALLBACK_VERSION = 0.1 - CALLBACK_TYPE = 'aggregate' + CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'community.general.logdna' CALLBACK_NEEDS_WHITELIST = True @@ -160,7 +159,7 @@ class CallbackModule(CallbackBase): if ninvalidKeys > 0: for key in invalidKeys: del meta[key] - meta['__errors'] = 'These keys have been sanitized: ' + ', '.join(invalidKeys) + meta['__errors'] = f"These keys have been sanitized: {', '.join(invalidKeys)}" return meta def sanitizeJSON(self, data): diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py index ad71a6d448..8fbcef4dd6 100644 --- a/plugins/callback/logentries.py +++ b/plugins/callback/logentries.py @@ -1,79 +1,80 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Logentries.com, Jimmy Tang -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2015, Logentries.com, Jimmy Tang +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: logentries - type: notification - short_description: Sends events to Logentries +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: logentries +type: notification +short_description: Sends events to Logentries +description: + - This callback plugin generates JSON objects and send them to Logentries using TCP for auditing/debugging purposes. +requirements: + - whitelisting in configuration + - certifi (Python library) + - flatdict (Python library), if you want to use the O(flatten) option +options: + api: + description: URI to the Logentries API. + type: str + env: + - name: LOGENTRIES_API + default: data.logentries.com + ini: + - section: callback_logentries + key: api + port: + description: HTTP port to use when connecting to the API. + type: int + env: + - name: LOGENTRIES_PORT + default: 80 + ini: + - section: callback_logentries + key: port + tls_port: + description: Port to use when connecting to the API when TLS is enabled. + type: int + env: + - name: LOGENTRIES_TLS_PORT + default: 443 + ini: + - section: callback_logentries + key: tls_port + token: + description: The logentries C(TCP token). + type: str + env: + - name: LOGENTRIES_ANSIBLE_TOKEN + required: true + ini: + - section: callback_logentries + key: token + use_tls: description: - - This callback plugin will generate JSON objects and send them to Logentries via TCP for auditing/debugging purposes. - - Before 2.4, if you wanted to use an ini configuration, the file must be placed in the same directory as this plugin and named logentries.ini - - In 2.4 and above you can just put it in the main Ansible configuration file. - requirements: - - whitelisting in configuration - - certifi (python library) - - flatdict (python library), if you want to use the 'flatten' option - options: - api: - description: URI to the Logentries API - env: - - name: LOGENTRIES_API - default: data.logentries.com - ini: - - section: callback_logentries - key: api - port: - description: HTTP port to use when connecting to the API - env: - - name: LOGENTRIES_PORT - default: 80 - ini: - - section: callback_logentries - key: port - tls_port: - description: Port to use when connecting to the API when TLS is enabled - env: - - name: LOGENTRIES_TLS_PORT - default: 443 - ini: - - section: callback_logentries - key: tls_port - token: - description: The logentries "TCP token" - env: - - name: LOGENTRIES_ANSIBLE_TOKEN - required: True - ini: - - section: callback_logentries - key: token - use_tls: - description: - - Toggle to decide whether to use TLS to encrypt the communications with the API server - env: - - name: LOGENTRIES_USE_TLS - default: False - type: boolean - ini: - - section: callback_logentries - key: use_tls - flatten: - description: flatten complex data structures into a single dictionary with complex keys - type: boolean - default: False - env: - - name: LOGENTRIES_FLATTEN - ini: - - section: callback_logentries - key: flatten -''' + - Toggle to decide whether to use TLS to encrypt the communications with the API server. + env: + - name: LOGENTRIES_USE_TLS + default: false + type: boolean + ini: + - section: callback_logentries + key: use_tls + flatten: + description: Flatten complex data structures into a single dictionary with complex keys. + type: boolean + default: false + env: + - name: LOGENTRIES_FLATTEN + ini: + - section: callback_logentries + key: flatten +""" -EXAMPLES = ''' -examples: > +EXAMPLES = r""" +examples: >- To enable, add this to your ansible.cfg file in the defaults block [defaults] @@ -89,10 +90,10 @@ examples: > api = data.logentries.com port = 10000 tls_port = 20000 - use_tls = no + use_tls = true token = dd21fc88-f00a-43ff-b977-e3a4233c53af - flatten = False -''' + flatten = false +""" import os import socket @@ -130,7 +131,7 @@ class PlainTextSocketAppender(object): # Error message displayed when an incorrect Token has been detected self.INVALID_TOKEN = "\n\nIt appears the LOGENTRIES_TOKEN parameter you entered is incorrect!\n\n" # Unicode Line separator character \u2028 - self.LINE_SEP = u'\u2028' + self.LINE_SEP = '\u2028' self._display = display self._conn = None @@ -148,7 +149,7 @@ class PlainTextSocketAppender(object): self.open_connection() return except Exception as e: - self._display.vvvv(u"Unable to connect to Logentries: %s" % to_text(e)) + self._display.vvvv(f"Unable to connect to Logentries: {e}") root_delay *= 2 if root_delay > self.MAX_DELAY: @@ -157,7 +158,7 @@ class PlainTextSocketAppender(object): wait_for = root_delay + random.uniform(0, root_delay) try: - self._display.vvvv("sleeping %s before retry" % wait_for) + self._display.vvvv(f"sleeping {wait_for} before retry") time.sleep(wait_for) except KeyboardInterrupt: raise @@ -170,8 +171,8 @@ class PlainTextSocketAppender(object): # Replace newlines with Unicode line separator # for multi-line events data = to_text(data, errors='surrogate_or_strict') - multiline = data.replace(u'\n', self.LINE_SEP) - multiline += u"\n" + multiline = data.replace('\n', self.LINE_SEP) + multiline += "\n" # Send data, reconnect if needed while True: try: @@ -195,15 +196,11 @@ else: class TLSSocketAppender(PlainTextSocketAppender): def open_connection(self): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock = ssl.wrap_socket( + context = ssl.create_default_context( + purpose=ssl.Purpose.SERVER_AUTH, + cafile=certifi.where(), ) + sock = context.wrap_socket( sock=sock, - keyfile=None, - certfile=None, - server_side=False, - cert_reqs=ssl.CERT_REQUIRED, - ssl_version=getattr( - ssl, 'PROTOCOL_TLSv1_2', ssl.PROTOCOL_TLSv1), - ca_certs=certifi.where(), do_handshake_on_connect=True, suppress_ragged_eofs=True, ) sock.connect((self.LE_API, self.LE_TLS_PORT)) @@ -248,7 +245,7 @@ class CallbackModule(CallbackBase): self.use_tls = self.get_option('use_tls') self.flatten = self.get_option('flatten') except KeyError as e: - self._display.warning(u"Missing option for Logentries callback plugin: %s" % to_text(e)) + self._display.warning(f"Missing option for Logentries callback plugin: {e}") self.disabled = True try: @@ -267,10 +264,10 @@ class CallbackModule(CallbackBase): if not self.disabled: if self.use_tls: - self._display.vvvv("Connecting to %s:%s with TLS" % (self.api_url, self.api_tls_port)) + self._display.vvvv(f"Connecting to {self.api_url}:{self.api_tls_port} with TLS") self._appender = TLSSocketAppender(display=self._display, LE_API=self.api_url, LE_TLS_PORT=self.api_tls_port) else: - self._display.vvvv("Connecting to %s:%s" % (self.api_url, self.api_port)) + self._display.vvvv(f"Connecting to {self.api_url}:{self.api_port}") self._appender = PlainTextSocketAppender(display=self._display, LE_API=self.api_url, LE_PORT=self.api_port) self._appender.reopen_connection() @@ -283,7 +280,7 @@ class CallbackModule(CallbackBase): def emit(self, record): msg = record.rstrip('\n') - msg = "{0} {1}".format(self.token, msg) + msg = f"{self.token} {msg}" self._appender.put(msg) self._display.vvvv("Sent event to logentries") diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py index 133010cbdb..f2279929f0 100644 --- a/plugins/callback/logstash.py +++ b/plugins/callback/logstash.py @@ -1,96 +1,98 @@ -# -*- coding: utf-8 -*- -# (C) 2020, Yevhen Khmelenko -# (C) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, Yevhen Khmelenko +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - author: Yevhen Khmelenko (@ujenmr) - name: logstash - type: notification - short_description: Sends events to Logstash - description: - - This callback will report facts and task events to Logstash https://www.elastic.co/products/logstash - requirements: - - whitelisting in configuration - - logstash (python library) - options: - server: - description: Address of the Logstash server - env: - - name: LOGSTASH_SERVER - ini: - - section: callback_logstash - key: server - version_added: 1.0.0 - default: localhost - port: - description: Port on which logstash is listening - env: - - name: LOGSTASH_PORT - ini: - - section: callback_logstash - key: port - version_added: 1.0.0 - default: 5000 - type: - description: Message type - env: - - name: LOGSTASH_TYPE - ini: - - section: callback_logstash - key: type - version_added: 1.0.0 - default: ansible - pre_command: - description: Executes command before run and its result is added to the C(ansible_pre_command_output) logstash field. - version_added: 2.0.0 - ini: - - section: callback_logstash - key: pre_command - env: - - name: LOGSTASH_PRE_COMMAND - format_version: - description: Logging format - type: str - version_added: 2.0.0 - ini: - - section: callback_logstash - key: format_version - env: - - name: LOGSTASH_FORMAT_VERSION - default: v1 - choices: - - v1 - - v2 +DOCUMENTATION = r""" +author: Yevhen Khmelenko (@ujenmr) +name: logstash +type: notification +short_description: Sends events to Logstash +description: + - This callback reports facts and task events to Logstash U(https://www.elastic.co/products/logstash). +requirements: + - whitelisting in configuration + - logstash (Python library) +options: + server: + description: Address of the Logstash server. + type: str + env: + - name: LOGSTASH_SERVER + ini: + - section: callback_logstash + key: server + version_added: 1.0.0 + default: localhost + port: + description: Port on which logstash is listening. + type: int + env: + - name: LOGSTASH_PORT + ini: + - section: callback_logstash + key: port + version_added: 1.0.0 + default: 5000 + type: + description: Message type. + type: str + env: + - name: LOGSTASH_TYPE + ini: + - section: callback_logstash + key: type + version_added: 1.0.0 + default: ansible + pre_command: + description: Executes command before run and its result is added to the C(ansible_pre_command_output) logstash field. + type: str + version_added: 2.0.0 + ini: + - section: callback_logstash + key: pre_command + env: + - name: LOGSTASH_PRE_COMMAND + format_version: + description: Logging format. + type: str + version_added: 2.0.0 + ini: + - section: callback_logstash + key: format_version + env: + - name: LOGSTASH_FORMAT_VERSION + default: v1 + choices: + - v1 + - v2 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" ansible.cfg: | - # Enable Callback plugin - [defaults] - callback_whitelist = community.general.logstash + # Enable Callback plugin + [defaults] + callback_whitelist = community.general.logstash - [callback_logstash] - server = logstash.example.com - port = 5000 - pre_command = git rev-parse HEAD - type = ansible + [callback_logstash] + server = logstash.example.com + port = 5000 + pre_command = git rev-parse HEAD + type = ansible -11-input-tcp.conf: | - # Enable Logstash TCP Input - input { - tcp { - port => 5000 - codec => json - add_field => { "[@metadata][beat]" => "notify" } - add_field => { "[@metadata][type]" => "ansible" } - } - } -''' +11-input-tcp.conf: |- + # Enable Logstash TCP Input + input { + tcp { + port => 5000 + codec => json + add_field => { "[@metadata][beat]" => "notify" } + add_field => { "[@metadata][type]" => "ansible" } + } + } +""" import os import json @@ -98,7 +100,6 @@ from ansible import context import socket import uuid import logging -from datetime import datetime try: import logstash @@ -108,11 +109,15 @@ except ImportError: from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'aggregate' + CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'community.general.logstash' CALLBACK_NEEDS_WHITELIST = True @@ -121,11 +126,9 @@ class CallbackModule(CallbackBase): if not HAS_LOGSTASH: self.disabled = True - self._display.warning("The required python-logstash/python3-logstash is not installed. " - "pip install python-logstash for Python 2" - "pip install python3-logstash for Python 3") + self._display.warning("The required python3-logstash is not installed.") - self.start_time = datetime.utcnow() + self.start_time = now() def _init_plugin(self): if not self.disabled: @@ -176,7 +179,7 @@ class CallbackModule(CallbackBase): data['status'] = "OK" data['ansible_playbook'] = playbook._file_name - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "START PLAYBOOK | %s", data['ansible_playbook'], extra=data ) @@ -184,7 +187,7 @@ class CallbackModule(CallbackBase): self.logger.info("ansible start", extra=data) def v2_playbook_on_stats(self, stats): - end_time = datetime.utcnow() + end_time = now() runtime = end_time - self.start_time summarize_stat = {} for host in stats.processed.keys(): @@ -201,7 +204,7 @@ class CallbackModule(CallbackBase): data['ansible_playbook_duration'] = runtime.total_seconds() data['ansible_result'] = json.dumps(summarize_stat) # deprecated field - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "FINISH PLAYBOOK | %s", json.dumps(summarize_stat), extra=data ) @@ -220,7 +223,7 @@ class CallbackModule(CallbackBase): data['ansible_play_id'] = self.play_id data['ansible_play_name'] = self.play_name - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("START PLAY | %s", self.play_name, extra=data) else: self.logger.info("ansible play", extra=data) @@ -245,7 +248,7 @@ class CallbackModule(CallbackBase): data['ansible_task'] = task_name data['ansible_facts'] = self._dump_results(result._result) - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "SETUP FACTS | %s", self._dump_results(result._result), extra=data ) @@ -266,7 +269,7 @@ class CallbackModule(CallbackBase): data['ansible_task_id'] = self.task_id data['ansible_result'] = self._dump_results(result._result) - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "TASK OK | %s | RESULT | %s", task_name, self._dump_results(result._result), extra=data @@ -287,7 +290,7 @@ class CallbackModule(CallbackBase): data['ansible_task_id'] = self.task_id data['ansible_result'] = self._dump_results(result._result) - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("TASK SKIPPED | %s", task_name, extra=data) else: self.logger.info("ansible skipped", extra=data) @@ -301,7 +304,7 @@ class CallbackModule(CallbackBase): data['ansible_play_name'] = self.play_name data['imported_file'] = imported_file - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("IMPORT | %s", imported_file, extra=data) else: self.logger.info("ansible import", extra=data) @@ -315,7 +318,7 @@ class CallbackModule(CallbackBase): data['ansible_play_name'] = self.play_name data['imported_file'] = missing_file - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("NOT IMPORTED | %s", missing_file, extra=data) else: self.logger.info("ansible import", extra=data) @@ -339,7 +342,7 @@ class CallbackModule(CallbackBase): data['ansible_result'] = self._dump_results(result._result) self.errors += 1 - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.error( "TASK FAILED | %s | HOST | %s | RESULT | %s", task_name, self.hostname, @@ -362,7 +365,7 @@ class CallbackModule(CallbackBase): data['ansible_result'] = self._dump_results(result._result) self.errors += 1 - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.error( "UNREACHABLE | %s | HOST | %s | RESULT | %s", task_name, self.hostname, @@ -385,7 +388,7 @@ class CallbackModule(CallbackBase): data['ansible_result'] = self._dump_results(result._result) self.errors += 1 - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.error( "ASYNC FAILED | %s | HOST | %s | RESULT | %s", task_name, self.hostname, diff --git a/plugins/callback/mail.py b/plugins/callback/mail.py index 3805bae508..7afb08e3f0 100644 --- a/plugins/callback/mail.py +++ b/plugins/callback/mail.py @@ -1,75 +1,84 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2012, Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2012, Dag Wieers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: mail type: notification -short_description: Sends failure events via email +short_description: Sends failure events through email description: -- This callback will report failures via email. + - This callback reports failures through email. author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) requirements: -- whitelisting in configuration + - whitelisting in configuration options: mta: description: - - Mail Transfer Agent, server that accepts SMTP. + - Mail Transfer Agent, server that accepts SMTP. type: str env: - - name: SMTPHOST + - name: SMTPHOST ini: - - section: callback_mail - key: smtphost + - section: callback_mail + key: smtphost default: localhost mtaport: description: - - Mail Transfer Agent Port. - - Port at which server SMTP. + - Mail Transfer Agent Port. + - Port at which server SMTP. type: int ini: - - section: callback_mail - key: smtpport + - section: callback_mail + key: smtpport default: 25 to: description: - - Mail recipient. + - Mail recipient. type: list elements: str ini: - - section: callback_mail - key: to + - section: callback_mail + key: to default: [root] sender: description: - - Mail sender. - - Note that this will be required from community.general 6.0.0 on. + - Mail sender. + - This is required since community.general 6.0.0. type: str + required: true ini: - - section: callback_mail - key: sender + - section: callback_mail + key: sender cc: description: - - CC'd recipients. + - CC'd recipients. type: list elements: str ini: - - section: callback_mail - key: cc + - section: callback_mail + key: cc bcc: description: - - BCC'd recipients. + - BCC'd recipients. type: list elements: str ini: - - section: callback_mail - key: bcc -''' + - section: callback_mail + key: bcc + message_id_domain: + description: + - The domain name to use for the L(Message-ID header, https://en.wikipedia.org/wiki/Message-ID). + - The default is the hostname of the control node. + type: str + ini: + - section: callback_mail + key: message_id_domain + version_added: 8.2.0 +""" import json import os @@ -77,7 +86,6 @@ import re import email.utils import smtplib -from ansible.module_utils.six import string_types from ansible.module_utils.common.text.converters import to_bytes from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase @@ -104,10 +112,6 @@ class CallbackModule(CallbackBase): super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) self.sender = self.get_option('sender') - if self.sender is None: - self._display.deprecated( - 'The sender for the mail callback has not been specified. This will be an error in the future', - version='6.0.0', collection_name='community.general') self.to = self.get_option('to') self.smtphost = self.get_option('mta') self.smtpport = self.get_option('mtaport') @@ -128,14 +132,14 @@ class CallbackModule(CallbackBase): if self.bcc: bcc_addresses = email.utils.getaddresses(self.bcc) - content = 'Date: %s\n' % email.utils.formatdate() - content += 'From: %s\n' % email.utils.formataddr(sender_address) + content = f'Date: {email.utils.formatdate()}\n' + content += f'From: {email.utils.formataddr(sender_address)}\n' if self.to: - content += 'To: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in to_addresses]) + content += f"To: {', '.join([email.utils.formataddr(pair) for pair in to_addresses])}\n" if self.cc: - content += 'Cc: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in cc_addresses]) - content += 'Message-ID: %s\n' % email.utils.make_msgid() - content += 'Subject: %s\n\n' % subject.strip() + content += f"Cc: {', '.join([email.utils.formataddr(pair) for pair in cc_addresses])}\n" + content += f"Message-ID: {email.utils.make_msgid(domain=self.get_option('message_id_domain'))}\n" + content += f'Subject: {subject.strip()}\n\n' content += body addresses = to_addresses @@ -152,23 +156,22 @@ class CallbackModule(CallbackBase): smtp.quit() def subject_msg(self, multiline, failtype, linenr): - return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr]) + msg = multiline.strip('\r\n').splitlines()[linenr] + return f'{failtype}: {msg}' def indent(self, multiline, indent=8): return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE) def body_blob(self, multiline, texttype): ''' Turn some text output in a well-indented block for sending in a mail body ''' - intro = 'with the following %s:\n\n' % texttype - blob = '' - for line in multiline.strip('\r\n').splitlines(): - blob += '%s\n' % line - return intro + self.indent(blob) + '\n' + intro = f'with the following {texttype}:\n\n' + blob = "\n".join(multiline.strip('\r\n').splitlines()) + return f"{intro}{self.indent(blob)}\n" def mail_result(self, result, failtype): host = result._host.get_name() if not self.sender: - self.sender = '"Ansible: %s" ' % host + self.sender = f'"Ansible: {host}" ' # Add subject if self.itembody: @@ -184,31 +187,33 @@ class CallbackModule(CallbackBase): elif result._result.get('exception'): # Unrelated exceptions are added to output :-/ subject = self.subject_msg(result._result['exception'], failtype, -1) else: - subject = '%s: %s' % (failtype, result._task.name or result._task.action) + subject = f'{failtype}: {result._task.name or result._task.action}' # Make playbook name visible (e.g. in Outlook/Gmail condensed view) - body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name) + body = f'Playbook: {os.path.basename(self.playbook._file_name)}\n' if result._task.name: - body += 'Task: %s\n' % result._task.name - body += 'Module: %s\n' % result._task.action - body += 'Host: %s\n' % host + body += f'Task: {result._task.name}\n' + body += f'Module: {result._task.action}\n' + body += f'Host: {host}\n' body += '\n' # Add task information (as much as possible) body += 'The following task failed:\n\n' if 'invocation' in result._result: - body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4))) + body += self.indent(f"{result._task.action}: {json.dumps(result._result['invocation']['module_args'], indent=4)}\n") elif result._task.name: - body += self.indent('%s (%s)\n' % (result._task.name, result._task.action)) + body += self.indent(f'{result._task.name} ({result._task.action})\n') else: - body += self.indent('%s\n' % result._task.action) + body += self.indent(f'{result._task.action}\n') body += '\n' # Add item / message if self.itembody: body += self.itembody elif result._result.get('failed_when_result') is True: - body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n' + fail_cond_list = '\n- '.join(result._task.failed_when) + fail_cond = self.indent(f"failed_when:\n- {fail_cond_list}") + body += f"due to the following condition:\n\n{fail_cond}\n\n" elif result._result.get('msg'): body += self.body_blob(result._result['msg'], 'message') @@ -221,13 +226,13 @@ class CallbackModule(CallbackBase): body += self.body_blob(result._result['exception'], 'exception') if result._result.get('warnings'): for i in range(len(result._result.get('warnings'))): - body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1)) + body += self.body_blob(result._result['warnings'][i], f'exception {i + 1}') if result._result.get('deprecations'): for i in range(len(result._result.get('deprecations'))): - body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1)) + body += self.body_blob(result._result['deprecations'][i], f'exception {i + 1}') body += 'and a complete dump of the error:\n\n' - body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4))) + body += self.indent(f'{failtype}: {json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)}') self.mail(subject=subject, body=body) @@ -250,4 +255,4 @@ class CallbackModule(CallbackBase): def v2_runner_item_on_failed(self, result): # Pass item information to task failure self.itemsubject = result._result['msg'] - self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), "failed item dump '%(item)s'" % result._result) + self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), f"failed item dump '{result._result['item']}'") diff --git a/plugins/callback/nrdp.py b/plugins/callback/nrdp.py index 08096cab72..6f1b5e2f5b 100644 --- a/plugins/callback/nrdp.py +++ b/plugins/callback/nrdp.py @@ -1,75 +1,72 @@ -# -*- coding: utf-8 -*- -# (c) 2018 Remi Verchere -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018 Remi Verchere +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: nrdp - type: notification - author: "Remi VERCHERE (@rverchere)" - short_description: Post task results to a Nagios server through nrdp - description: - - This callback send playbook result to Nagios. - - Nagios shall use NRDP to recive passive events. - - The passive check is sent to a dedicated host/service for Ansible. - options: - url: - description: URL of the nrdp server. - required: true - env: - - name : NRDP_URL - ini: - - section: callback_nrdp - key: url - type: string - validate_certs: - description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs.) - env: - - name: NRDP_VALIDATE_CERTS - ini: - - section: callback_nrdp - key: validate_nrdp_certs - - section: callback_nrdp - key: validate_certs - type: boolean - default: false - aliases: [ validate_nrdp_certs ] - token: - description: Token to be allowed to push nrdp events. - required: true - env: - - name: NRDP_TOKEN - ini: - - section: callback_nrdp - key: token - type: string - hostname: - description: Hostname where the passive check is linked to. - required: true - env: - - name : NRDP_HOSTNAME - ini: - - section: callback_nrdp - key: hostname - type: string - servicename: - description: Service where the passive check is linked to. - required: true - env: - - name : NRDP_SERVICENAME - ini: - - section: callback_nrdp - key: servicename - type: string -''' +DOCUMENTATION = r""" +name: nrdp +type: notification +author: "Remi VERCHERE (@rverchere)" +short_description: Post task results to a Nagios server through nrdp +description: + - This callback send playbook result to Nagios. + - Nagios shall use NRDP to receive passive events. + - The passive check is sent to a dedicated host/service for Ansible. +options: + url: + description: URL of the nrdp server. + required: true + env: + - name: NRDP_URL + ini: + - section: callback_nrdp + key: url + type: string + validate_certs: + description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs). + env: + - name: NRDP_VALIDATE_CERTS + ini: + - section: callback_nrdp + key: validate_nrdp_certs + - section: callback_nrdp + key: validate_certs + type: boolean + default: false + aliases: [validate_nrdp_certs] + token: + description: Token to be allowed to push nrdp events. + required: true + env: + - name: NRDP_TOKEN + ini: + - section: callback_nrdp + key: token + type: string + hostname: + description: Hostname where the passive check is linked to. + required: true + env: + - name: NRDP_HOSTNAME + ini: + - section: callback_nrdp + key: hostname + type: string + servicename: + description: Service where the passive check is linked to. + required: true + env: + - name: NRDP_SERVICENAME + ini: + - section: callback_nrdp + key: servicename + type: string +""" -import os -import json +from urllib.parse import urlencode -from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.urls import open_url from ansible.plugins.callback import CallbackBase @@ -134,10 +131,10 @@ class CallbackModule(CallbackBase): xmldata = "\n" xmldata += "\n" xmldata += "\n" - xmldata += "%s\n" % self.hostname - xmldata += "%s\n" % self.servicename - xmldata += "%d\n" % state - xmldata += "%s\n" % msg + xmldata += f"{self.hostname}\n" + xmldata += f"{self.servicename}\n" + xmldata += f"{state}\n" + xmldata += f"{msg}\n" xmldata += "\n" xmldata += "\n" @@ -154,7 +151,7 @@ class CallbackModule(CallbackBase): validate_certs=self.validate_nrdp_certs) return response.read() except Exception as ex: - self._display.warning("NRDP callback cannot send result {0}".format(ex)) + self._display.warning(f"NRDP callback cannot send result {ex}") def v2_playbook_on_play_start(self, play): ''' @@ -172,17 +169,16 @@ class CallbackModule(CallbackBase): critical = warning = 0 for host in hosts: stat = stats.summarize(host) - gstats += "'%s_ok'=%d '%s_changed'=%d \ - '%s_unreachable'=%d '%s_failed'=%d " % \ - (host, stat['ok'], host, stat['changed'], - host, stat['unreachable'], host, stat['failures']) + gstats += ( + f"'{host}_ok'={stat['ok']} '{host}_changed'={stat['changed']} '{host}_unreachable'={stat['unreachable']} '{host}_failed'={stat['failures']} " + ) # Critical when failed tasks or unreachable host critical += stat['failures'] critical += stat['unreachable'] # Warning when changed tasks warning += stat['changed'] - msg = "%s | %s" % (name, gstats) + msg = f"{name} | {gstats}" if critical: # Send Critical self._send_nrdp(self.CRITICAL, msg) diff --git a/plugins/callback/null.py b/plugins/callback/null.py index 13ea65b438..3074a698d0 100644 --- a/plugins/callback/null.py +++ b/plugins/callback/null.py @@ -1,21 +1,20 @@ -# -*- coding: utf-8 -*- -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: 'null' - type: stdout - requirements: - - set as main display callback - short_description: Don't display stuff to screen - description: - - This callback prevents outputing events to screen -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: 'null' +type: stdout +requirements: + - set as main display callback +short_description: Do not display stuff to screen +description: + - This callback prevents outputting events to screen. +""" from ansible.plugins.callback import CallbackBase @@ -23,7 +22,7 @@ from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): ''' - This callback wont print messages to stdout when new callback events are received. + This callback won't print messages to stdout when new callback events are received. ''' CALLBACK_VERSION = 2.0 diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index 1ea6e79622..ca6ec2b916 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -1,100 +1,166 @@ -# -*- coding: utf-8 -*- -# (C) 2021, Victor Martinez -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Victor Martinez +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Victor Martinez (@v1v) - name: opentelemetry - type: notification - short_description: Create distributed traces with OpenTelemetry - version_added: 3.7.0 +DOCUMENTATION = r""" +author: Victor Martinez (@v1v) +name: opentelemetry +type: notification +short_description: Create distributed traces with OpenTelemetry +version_added: 3.7.0 +description: + - This callback creates distributed traces for each Ansible task with OpenTelemetry. + - You can configure the OpenTelemetry exporter and SDK with environment variables. + - See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html). + - See + U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables). +options: + hide_task_arguments: + default: false + type: bool description: - - This callback creates distributed traces for each Ansible task with OpenTelemetry. - - You can configure the OpenTelemetry exporter and SDK with environment variables. - - See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html). - - See U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables). - options: - hide_task_arguments: - default: false - type: bool - description: - - Hide the arguments for a task. - env: - - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS - enable_from_environment: - type: str - description: - - Whether to enable this callback only if the given environment variable exists and it is set to C(true). - - This is handy when you use Configuration as Code and want to send distributed traces - if running in the CI rather when running Ansible locally. - - For such, it evaluates the given I(enable_from_environment) value as environment variable - and if set to true this plugin will be enabled. - env: - - name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT - version_added: 3.8.0 - otel_service_name: - default: ansible - type: str - description: - - The service name resource attribute. - env: - - name: OTEL_SERVICE_NAME - traceparent: - default: None - type: str - description: - - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). - env: - - name: TRACEPARENT - requirements: - - opentelemetry-api (Python library) - - opentelemetry-exporter-otlp (Python library) - - opentelemetry-sdk (Python library) -''' + - Hide the arguments for a task. + env: + - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS + ini: + - section: callback_opentelemetry + key: hide_task_arguments + version_added: 5.3.0 + enable_from_environment: + type: str + description: + - Whether to enable this callback only if the given environment variable exists and it is set to V(true). + - This is handy when you use Configuration as Code and want to send distributed traces if running in the CI rather when + running Ansible locally. + - For such, it evaluates the given O(enable_from_environment) value as environment variable and if set to V(true) this + plugin is enabled. + env: + - name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT + ini: + - section: callback_opentelemetry + key: enable_from_environment + version_added: 5.3.0 + version_added: 3.8.0 + otel_service_name: + default: ansible + type: str + description: + - The service name resource attribute. + env: + - name: OTEL_SERVICE_NAME + ini: + - section: callback_opentelemetry + key: otel_service_name + version_added: 5.3.0 + traceparent: + default: None + type: str + description: + - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). + env: + - name: TRACEPARENT + disable_logs: + default: false + type: bool + description: + - Disable sending logs. + env: + - name: ANSIBLE_OPENTELEMETRY_DISABLE_LOGS + ini: + - section: callback_opentelemetry + key: disable_logs + version_added: 5.8.0 + disable_attributes_in_logs: + default: false + type: bool + description: + - Disable populating span attributes to the logs. + env: + - name: ANSIBLE_OPENTELEMETRY_DISABLE_ATTRIBUTES_IN_LOGS + ini: + - section: callback_opentelemetry + key: disable_attributes_in_logs + version_added: 7.1.0 + store_spans_in_file: + type: str + description: + - It stores the exported spans in the given file. + env: + - name: ANSIBLE_OPENTELEMETRY_STORE_SPANS_IN_FILE + ini: + - section: callback_opentelemetry + key: store_spans_in_file + version_added: 9.0.0 + otel_exporter_otlp_traces_protocol: + type: str + description: + - E(OTEL_EXPORTER_OTLP_TRACES_PROTOCOL) represents the transport protocol for spans. + - See + U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#envvar-OTEL_EXPORTER_OTLP_TRACES_PROTOCOL). + default: grpc + choices: + - grpc + - http/protobuf + env: + - name: OTEL_EXPORTER_OTLP_TRACES_PROTOCOL + ini: + - section: callback_opentelemetry + key: otel_exporter_otlp_traces_protocol + version_added: 9.0.0 +requirements: + - opentelemetry-api (Python library) + - opentelemetry-exporter-otlp (Python library) + - opentelemetry-sdk (Python library) +""" -EXAMPLES = ''' -examples: | +EXAMPLES = r""" +examples: |- Enable the plugin in ansible.cfg: [defaults] callbacks_enabled = community.general.opentelemetry + [callback_opentelemetry] + enable_from_environment = ANSIBLE_OPENTELEMETRY_ENABLED Set the environment variable: export OTEL_EXPORTER_OTLP_ENDPOINT= export OTEL_EXPORTER_OTLP_HEADERS="authorization=Bearer your_otel_token" export OTEL_SERVICE_NAME=your_service_name -''' + export ANSIBLE_OPENTELEMETRY_ENABLED=true +""" import getpass +import json import os import socket -import sys -import time import uuid - from collections import OrderedDict from os.path import basename +from time import time_ns +from urllib.parse import urlparse from ansible.errors import AnsibleError -from ansible.module_utils.six import raise_from -from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.plugins.callback import CallbackBase try: from opentelemetry import trace from opentelemetry.trace import SpanKind - from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter + from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter as GRPCOTLPSpanExporter + from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter as HTTPOTLPSpanExporter from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.trace.status import Status, StatusCode from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( - BatchSpanProcessor + BatchSpanProcessor, + SimpleSpanProcessor + ) + from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter ) - from opentelemetry.util._time import _time_ns except ImportError as imp_exc: OTEL_LIBRARY_IMPORT_ERROR = imp_exc else: @@ -112,18 +178,16 @@ class TaskData: self.path = path self.play = play self.host_data = OrderedDict() - if sys.version_info >= (3, 7): - self.start = time.time_ns() - else: - self.start = _time_ns() + self.start = time_ns() self.action = action self.args = args + self.dump = None def add_host(self, host): if host.uuid in self.host_data: if host.status == 'included': # concatenate task include output from multiple items - host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result) + host.result = f'{self.host_data[host.uuid].result}\n{host.result}' else: return @@ -140,16 +204,12 @@ class HostData: self.name = name self.status = status self.result = result - if sys.version_info >= (3, 7): - self.finish = time.time_ns() - else: - self.finish = _time_ns() + self.finish = time_ns() class OpenTelemetrySource(object): def __init__(self, display): self.ansible_playbook = "" - self.ansible_version = None self.session = str(uuid.uuid4()) self.host = socket.gethostname() try: @@ -183,7 +243,7 @@ class OpenTelemetrySource(object): tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args) - def finish_task(self, tasks_data, status, result): + def finish_task(self, tasks_data, status, result, dump): """ record the results of a task for a single host """ task_uuid = result._task._uuid @@ -197,12 +257,19 @@ class OpenTelemetrySource(object): task = tasks_data[task_uuid] - if self.ansible_version is None and hasattr(result, '_task_fields') and result._task_fields['args'].get('_ansible_version'): - self.ansible_version = result._task_fields['args'].get('_ansible_version') - + task.dump = dump task.add_host(HostData(host_uuid, host_name, status, result)) - def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent): + def generate_distributed_traces(self, + otel_service_name, + ansible_playbook, + tasks_data, + status, + traceparent, + disable_logs, + disable_attributes_in_logs, + otel_exporter_otlp_traces_protocol, + store_spans_in_file): """ generate distributed traces from the collected TaskData and HostData """ tasks = [] @@ -218,7 +285,16 @@ class OpenTelemetrySource(object): ) ) - processor = BatchSpanProcessor(OTLPSpanExporter()) + otel_exporter = None + if store_spans_in_file: + otel_exporter = InMemorySpanExporter() + processor = SimpleSpanProcessor(otel_exporter) + else: + if otel_exporter_otlp_traces_protocol == 'grpc': + otel_exporter = GRPCOTLPSpanExporter() + else: + otel_exporter = HTTPOTLPSpanExporter() + processor = BatchSpanProcessor(otel_exporter) trace.get_tracer_provider().add_span_processor(processor) @@ -228,8 +304,7 @@ class OpenTelemetrySource(object): start_time=parent_start_time, kind=SpanKind.SERVER) as parent: parent.set_status(status) # Populate trace metadata attributes - if self.ansible_version is not None: - parent.set_attribute("ansible.version", self.ansible_version) + parent.set_attribute("ansible.version", ansible_version) parent.set_attribute("ansible.session", self.session) parent.set_attribute("ansible.host.name", self.host) if self.ip_address is not None: @@ -238,12 +313,14 @@ class OpenTelemetrySource(object): for task in tasks: for host_uuid, host_data in task.host_data.items(): with tracer.start_as_current_span(task.name, start_time=task.start, end_on_exit=False) as span: - self.update_span_data(task, host_data, span) + self.update_span_data(task, host_data, span, disable_logs, disable_attributes_in_logs) - def update_span_data(self, task_data, host_data, span): + return otel_exporter + + def update_span_data(self, task_data, host_data, span, disable_logs, disable_attributes_in_logs): """ update the span with the given TaskData and HostData """ - name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name) + name = f'[{host_data.name}] {task_data.play}: {task_data.name}' message = 'success' res = {} @@ -251,6 +328,7 @@ class OpenTelemetrySource(object): status = Status(status_code=StatusCode.OK) if host_data.status != 'included': # Support loops + enriched_error_message = None if 'results' in host_data.result._result: if host_data.status == 'failed': message = self.get_error_message_from_results(host_data.result._result['results'], task_data.action) @@ -273,36 +351,48 @@ class OpenTelemetrySource(object): status = Status(status_code=StatusCode.UNSET) span.set_status(status) + + # Create the span and log attributes + attributes = { + "ansible.task.module": task_data.action, + "ansible.task.message": message, + "ansible.task.name": name, + "ansible.task.result": rc, + "ansible.task.host.name": host_data.name, + "ansible.task.host.status": host_data.status + } if isinstance(task_data.args, dict) and "gather_facts" not in task_data.action: names = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.keys()) values = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.values()) - self.set_span_attribute(span, ("ansible.task.args.name"), names) - self.set_span_attribute(span, ("ansible.task.args.value"), values) - self.set_span_attribute(span, "ansible.task.module", task_data.action) - self.set_span_attribute(span, "ansible.task.message", message) - self.set_span_attribute(span, "ansible.task.name", name) - self.set_span_attribute(span, "ansible.task.result", rc) - self.set_span_attribute(span, "ansible.task.host.name", host_data.name) - self.set_span_attribute(span, "ansible.task.host.status", host_data.status) + attributes[("ansible.task.args.name")] = names + attributes[("ansible.task.args.value")] = values + + self.set_span_attributes(span, attributes) + # This will allow to enrich the service map self.add_attributes_for_service_map_if_possible(span, task_data) + # Send logs + if not disable_logs: + # This will avoid populating span attributes to the logs + span.add_event(task_data.dump, attributes={} if disable_attributes_in_logs else attributes) + # Close span always span.end(end_time=host_data.finish) - def set_span_attribute(self, span, attributeName, attributeValue): - """ update the span attribute with the given attribute and value if not None """ + def set_span_attributes(self, span, attributes): + """ update the span attributes with the given attributes if not None """ if span is None and self._display is not None: self._display.warning('span object is None. Please double check if that is expected.') else: - if attributeValue is not None: - span.set_attribute(attributeName, attributeValue) + if attributes is not None: + span.set_attributes(attributes) def add_attributes_for_service_map_if_possible(self, span, task_data): """Update the span attributes with the service that the task interacted with, if possible.""" redacted_url = self.parse_and_redact_url_if_possible(task_data.args) if redacted_url: - self.set_span_attribute(span, "http.url", redacted_url.geturl()) + span.set_attribute("http.url", redacted_url.geturl()) @staticmethod def parse_and_redact_url_if_possible(args): @@ -353,7 +443,7 @@ class OpenTelemetrySource(object): def get_error_message_from_results(results, action): for result in results: if result.get('failed', False): - return ('{0}({1}) - {2}').format(action, result.get('item', 'none'), OpenTelemetrySource.get_error_message(result)) + return f"{action}({result.get('item', 'none')}) - {OpenTelemetrySource.get_error_message(result)}" @staticmethod def _last_line(text): @@ -365,14 +455,14 @@ class OpenTelemetrySource(object): message = result.get('msg', 'failed') exception = result.get('exception') stderr = result.get('stderr') - return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr) + return f"message: \"{message}\"\nexception: \"{exception}\"\nstderr: \"{stderr}\"" @staticmethod def enrich_error_message_from_results(results, action): message = "" for result in results: if result.get('failed', False): - message = ('{0}({1}) - {2}\n{3}').format(action, result.get('item', 'none'), OpenTelemetrySource.enrich_error_message(result), message) + message = f"{action}({result.get('item', 'none')}) - {OpenTelemetrySource.enrich_error_message(result)}\n{message}" return message @@ -389,6 +479,8 @@ class CallbackModule(CallbackBase): def __init__(self, display=None): super(CallbackModule, self).__init__(display=display) self.hide_task_arguments = None + self.disable_attributes_in_logs = None + self.disable_logs = None self.otel_service_name = None self.ansible_playbook = None self.play_name = None @@ -396,11 +488,13 @@ class CallbackModule(CallbackBase): self.errors = 0 self.disabled = False self.traceparent = False + self.store_spans_in_file = False + self.otel_exporter_otlp_traces_protocol = None if OTEL_LIBRARY_IMPORT_ERROR: - raise_from( - AnsibleError('The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin'), - OTEL_LIBRARY_IMPORT_ERROR) + raise AnsibleError( + 'The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin' + ) from OTEL_LIBRARY_IMPORT_ERROR self.tasks_data = OrderedDict() @@ -414,11 +508,18 @@ class CallbackModule(CallbackBase): environment_variable = self.get_option('enable_from_environment') if environment_variable is not None and os.environ.get(environment_variable, 'false').lower() != 'true': self.disabled = True - self._display.warning("The `enable_from_environment` option has been set and {0} is not enabled. " - "Disabling the `opentelemetry` callback plugin.".format(environment_variable)) + self._display.warning( + f"The `enable_from_environment` option has been set and {environment_variable} is not enabled. Disabling the `opentelemetry` callback plugin." + ) self.hide_task_arguments = self.get_option('hide_task_arguments') + self.disable_attributes_in_logs = self.get_option('disable_attributes_in_logs') + + self.disable_logs = self.get_option('disable_logs') + + self.store_spans_in_file = self.get_option('store_spans_in_file') + self.otel_service_name = self.get_option('otel_service_name') if not self.otel_service_name: @@ -427,6 +528,22 @@ class CallbackModule(CallbackBase): # See https://github.com/open-telemetry/opentelemetry-specification/issues/740 self.traceparent = self.get_option('traceparent') + self.otel_exporter_otlp_traces_protocol = self.get_option('otel_exporter_otlp_traces_protocol') + + def dump_results(self, task, result): + """ dump the results if disable_logs is not enabled """ + if self.disable_logs: + return "" + # ansible.builtin.uri contains the response in the json field + save = dict(result._result) + + if "json" in save and task.action in ("ansible.builtin.uri", "ansible.legacy.uri", "uri"): + save.pop("json") + # ansible.builtin.slurp contains the response in the content field + if "content" in save and task.action in ("ansible.builtin.slurp", "ansible.legacy.slurp", "slurp"): + save.pop("content") + return self._dump_results(save) + def v2_playbook_on_start(self, playbook): self.ansible_playbook = basename(playbook._file_name) @@ -475,28 +592,32 @@ class CallbackModule(CallbackBase): self.opentelemetry.finish_task( self.tasks_data, status, - result + result, + self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_runner_on_ok(self, result): self.opentelemetry.finish_task( self.tasks_data, 'ok', - result + result, + self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_runner_on_skipped(self, result): self.opentelemetry.finish_task( self.tasks_data, 'skipped', - result + result, + self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_playbook_on_include(self, included_file): self.opentelemetry.finish_task( self.tasks_data, 'included', - included_file + included_file, + "" ) def v2_playbook_on_stats(self, stats): @@ -504,13 +625,22 @@ class CallbackModule(CallbackBase): status = Status(status_code=StatusCode.OK) else: status = Status(status_code=StatusCode.ERROR) - self.opentelemetry.generate_distributed_traces( + otel_exporter = self.opentelemetry.generate_distributed_traces( self.otel_service_name, self.ansible_playbook, self.tasks_data, status, - self.traceparent + self.traceparent, + self.disable_logs, + self.disable_attributes_in_logs, + self.otel_exporter_otlp_traces_protocol, + self.store_spans_in_file ) + if self.store_spans_in_file: + spans = [json.loads(span.to_json()) for span in otel_exporter.get_finished_spans()] + with open(self.store_spans_in_file, "w", encoding="utf-8") as output: + json.dump({"spans": spans}, output, indent=4) + def v2_runner_on_async_failed(self, result, **kwargs): self.errors += 1 diff --git a/plugins/callback/print_task.py b/plugins/callback/print_task.py new file mode 100644 index 0000000000..f6008c817f --- /dev/null +++ b/plugins/callback/print_task.py @@ -0,0 +1,62 @@ +# Copyright (c) 2025, Max Mitschke +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: print_task +type: aggregate +short_description: Prints playbook task snippet to job output +description: + - This plugin prints the currently executing playbook task to the job output. +version_added: 10.7.0 +requirements: + - enable in configuration +""" + +EXAMPLES = r""" +ansible.cfg: |- + # Enable plugin + [defaults] + callbacks_enabled=community.general.print_task +""" + +from yaml import load, dump + +try: + from yaml import CSafeDumper as SafeDumper + from yaml import CSafeLoader as SafeLoader +except ImportError: + from yaml import SafeDumper, SafeLoader + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + """ + This callback module tells you how long your plays ran for. + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'community.general.print_task' + + CALLBACK_NEEDS_ENABLED = True + + def __init__(self): + super(CallbackModule, self).__init__() + self._printed_message = False + + def _print_task(self, task): + if hasattr(task, '_ds'): + task_snippet = load(str([task._ds.copy()]), Loader=SafeLoader) + task_yaml = dump(task_snippet, sort_keys=False, Dumper=SafeDumper) + self._display.display(f"\n{task_yaml}\n") + self._printed_message = True + + def v2_playbook_on_task_start(self, task, is_conditional): + self._printed_message = False + + def v2_runner_on_start(self, host, task): + if not self._printed_message: + self._print_task(task) diff --git a/plugins/callback/say.py b/plugins/callback/say.py index 8d67e4336a..0455ee69e6 100644 --- a/plugins/callback/say.py +++ b/plugins/callback/say.py @@ -1,25 +1,22 @@ -# -*- coding: utf-8 -*- -# (c) 2012, Michael DeHaan, -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2012, Michael DeHaan, +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: say - type: notification - requirements: - - whitelisting in configuration - - the '/usr/bin/say' command line program (standard on macOS) or 'espeak' command line program - short_description: notify using software speech synthesizer - description: - - This plugin will use the 'say' or 'espeak' program to "speak" about play events. - notes: - - In 2.8, this callback has been renamed from C(osx_say) into M(community.general.say). -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: say +type: notification +requirements: + - whitelisting in configuration + - the C(/usr/bin/say) command line program (standard on macOS) or C(espeak) command line program +short_description: Notify using software speech synthesizer +description: + - This plugin uses C(say) or C(espeak) to "speak" about play events. +""" import platform import subprocess @@ -51,7 +48,7 @@ class CallbackModule(CallbackBase): self.synthesizer = get_bin_path('say') if platform.system() != 'Darwin': # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter - self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system()) + self._display.warning(f"'say' executable found but system is '{platform.system()}': ignoring voice parameter") else: self.FAILED_VOICE = 'Zarvox' self.REGULAR_VOICE = 'Trinoids' @@ -70,7 +67,7 @@ class CallbackModule(CallbackBase): # ansible will not call any callback if disabled is set to True if not self.synthesizer: self.disabled = True - self._display.warning("Unable to find either 'say' or 'espeak' executable, plugin %s disabled" % os.path.basename(__file__)) + self._display.warning(f"Unable to find either 'say' or 'espeak' executable, plugin {os.path.basename(__file__)} disabled") def say(self, msg, voice): cmd = [self.synthesizer, msg] @@ -79,7 +76,7 @@ class CallbackModule(CallbackBase): subprocess.call(cmd) def runner_on_failed(self, host, res, ignore_errors=False): - self.say("Failure on host %s" % host, self.FAILED_VOICE) + self.say(f"Failure on host {host}", self.FAILED_VOICE) def runner_on_ok(self, host, res): self.say("pew", self.LASER_VOICE) @@ -88,13 +85,13 @@ class CallbackModule(CallbackBase): self.say("pew", self.LASER_VOICE) def runner_on_unreachable(self, host, res): - self.say("Failure on host %s" % host, self.FAILED_VOICE) + self.say(f"Failure on host {host}", self.FAILED_VOICE) def runner_on_async_ok(self, host, res, jid): self.say("pew", self.LASER_VOICE) def runner_on_async_failed(self, host, res, jid): - self.say("Failure on host %s" % host, self.FAILED_VOICE) + self.say(f"Failure on host {host}", self.FAILED_VOICE) def playbook_on_start(self): self.say("Running Playbook", self.REGULAR_VOICE) @@ -104,15 +101,15 @@ class CallbackModule(CallbackBase): def playbook_on_task_start(self, name, is_conditional): if not is_conditional: - self.say("Starting task: %s" % name, self.REGULAR_VOICE) + self.say(f"Starting task: {name}", self.REGULAR_VOICE) else: - self.say("Notifying task: %s" % name, self.REGULAR_VOICE) + self.say(f"Notifying task: {name}", self.REGULAR_VOICE) def playbook_on_setup(self): self.say("Gathering facts", self.REGULAR_VOICE) def playbook_on_play_start(self, name): - self.say("Starting play: %s" % name, self.HAPPY_VOICE) + self.say(f"Starting play: {name}", self.HAPPY_VOICE) def playbook_on_stats(self, stats): self.say("Play complete", self.HAPPY_VOICE) diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py index 403eb84b33..2a7dd07a3e 100644 --- a/plugins/callback/selective.py +++ b/plugins/callback/selective.py @@ -1,40 +1,39 @@ -# -*- coding: utf-8 -*- -# (c) Fastly, inc 2016 -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Fastly, inc 2016 +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: selective - type: stdout - requirements: - - set as main display callback - short_description: only print certain tasks - description: - - This callback only prints tasks that have been tagged with `print_action` or that have failed. - This allows operators to focus on the tasks that provide value only. - - Tasks that are not printed are placed with a '.'. - - If you increase verbosity all tasks are printed. - options: - nocolor: - default: False - description: This setting allows suppressing colorizing output - env: - - name: ANSIBLE_NOCOLOR - - name: ANSIBLE_SELECTIVE_DONT_COLORIZE - ini: - - section: defaults - key: nocolor - type: boolean -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: selective +type: stdout +requirements: + - set as main display callback +short_description: Only print certain tasks +description: + - This callback only prints tasks that have been tagged with C(print_action) or that have failed. This allows operators + to focus on the tasks that provide value only. + - Tasks that are not printed are placed with a C(.). + - If you increase verbosity all tasks are printed. +options: + nocolor: + default: false + description: This setting allows suppressing colorizing output. + env: + - name: ANSIBLE_NOCOLOR + - name: ANSIBLE_SELECTIVE_DONT_COLORIZE + ini: + - section: defaults + key: nocolor + type: boolean +""" -EXAMPLES = """ - - ansible.builtin.debug: msg="This will not be printed" - - ansible.builtin.debug: msg="But this will" - tags: [print_action] +EXAMPLES = r""" +- ansible.builtin.debug: msg="This will not be printed" +- ansible.builtin.debug: msg="But this will" + tags: [print_action] """ import difflib @@ -43,26 +42,17 @@ from ansible import constants as C from ansible.plugins.callback import CallbackBase from ansible.module_utils.common.text.converters import to_text -try: - codeCodes = C.COLOR_CODES -except AttributeError: - # This constant was moved to ansible.constants in - # https://github.com/ansible/ansible/commit/1202dd000f10b0e8959019484f1c3b3f9628fc67 - # (will be included in ansible-core 2.11.0). For older Ansible/ansible-base versions, - # we include from the original location. - from ansible.utils.color import codeCodes - DONT_COLORIZE = False COLORS = { 'normal': '\033[0m', - 'ok': '\033[{0}m'.format(codeCodes[C.COLOR_OK]), + 'ok': f'\x1b[{C.COLOR_CODES[C.COLOR_OK]}m', 'bold': '\033[1m', 'not_so_bold': '\033[1m\033[34m', - 'changed': '\033[{0}m'.format(codeCodes[C.COLOR_CHANGED]), - 'failed': '\033[{0}m'.format(codeCodes[C.COLOR_ERROR]), + 'changed': f'\x1b[{C.COLOR_CODES[C.COLOR_CHANGED]}m', + 'failed': f'\x1b[{C.COLOR_CODES[C.COLOR_ERROR]}m', 'endc': '\033[0m', - 'skipped': '\033[{0}m'.format(codeCodes[C.COLOR_SKIP]), + 'skipped': f'\x1b[{C.COLOR_CODES[C.COLOR_SKIP]}m', } @@ -81,7 +71,7 @@ def colorize(msg, color): if DONT_COLORIZE: return msg else: - return '{0}{1}{2}'.format(COLORS[color], msg, COLORS['endc']) + return f"{COLORS[color]}{msg}{COLORS['endc']}" class CallbackModule(CallbackBase): @@ -114,15 +104,15 @@ class CallbackModule(CallbackBase): line_length = 120 if self.last_skipped: print() - msg = colorize("# {0} {1}".format(task_name, - '*' * (line_length - len(task_name))), 'bold') + line = f"# {task_name} " + msg = colorize(f"{line}{'*' * (line_length - len(line))}", 'bold') print(msg) def _indent_text(self, text, indent_level): lines = text.splitlines() result_lines = [] for l in lines: - result_lines.append("{0}{1}".format(' ' * indent_level, l)) + result_lines.append(f"{' ' * indent_level}{l}") return '\n'.join(result_lines) def _print_diff(self, diff, indent_level): @@ -155,19 +145,19 @@ class CallbackModule(CallbackBase): change_string = colorize('FAILED!!!', color) else: color = 'changed' if changed else 'ok' - change_string = colorize("changed={0}".format(changed), color) + change_string = colorize(f"changed={changed}", color) msg = colorize(msg, color) line_length = 120 spaces = ' ' * (40 - len(name) - indent_level) - line = "{0} * {1}{2}- {3}".format(' ' * indent_level, name, spaces, change_string) + line = f"{' ' * indent_level} * {name}{spaces}- {change_string}" if len(msg) < 50: - line += ' -- {0}'.format(msg) - print("{0} {1}---------".format(line, '-' * (line_length - len(line)))) + line += f' -- {msg}' + print(f"{line} {'-' * (line_length - len(line))}---------") else: - print("{0} {1}".format(line, '-' * (line_length - len(line)))) + print(f"{line} {'-' * (line_length - len(line))}") print(self._indent_text(msg, indent_level + 4)) if diff: @@ -217,7 +207,7 @@ class CallbackModule(CallbackBase): stderr = [r.get('exception', None), r.get('module_stderr', None)] stderr = "\n".join([e for e in stderr if e]).strip() - self._print_host_or_item(r['item'], + self._print_host_or_item(r[r['ansible_loop_var']], r.get('changed', False), to_text(r.get('msg', '')), r.get('diff', None), @@ -247,8 +237,10 @@ class CallbackModule(CallbackBase): else: color = 'ok' - msg = '{0} : ok={1}\tchanged={2}\tfailed={3}\tunreachable={4}\trescued={5}\tignored={6}'.format( - host, s['ok'], s['changed'], s['failures'], s['unreachable'], s['rescued'], s['ignored']) + msg = ( + f"{host} : ok={s['ok']}\tchanged={s['changed']}\tfailed={s['failures']}\tunreachable=" + f"{s['unreachable']}\trescued={s['rescued']}\tignored={s['ignored']}" + ) print(colorize(msg, color)) def v2_runner_on_skipped(self, result, **kwargs): @@ -260,17 +252,15 @@ class CallbackModule(CallbackBase): line_length = 120 spaces = ' ' * (31 - len(result._host.name) - 4) - line = " * {0}{1}- {2}".format(colorize(result._host.name, 'not_so_bold'), - spaces, - colorize("skipped", 'skipped'),) + line = f" * {colorize(result._host.name, 'not_so_bold')}{spaces}- {colorize('skipped', 'skipped')}" reason = result._result.get('skipped_reason', '') or \ result._result.get('skip_reason', '') if len(reason) < 50: - line += ' -- {0}'.format(reason) - print("{0} {1}---------".format(line, '-' * (line_length - len(line)))) + line += f' -- {reason}' + print(f"{line} {'-' * (line_length - len(line))}---------") else: - print("{0} {1}".format(line, '-' * (line_length - len(line)))) + print(f"{line} {'-' * (line_length - len(line))}") print(self._indent_text(reason, 8)) print(reason) diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py index 5cb402b109..e1d95abe06 100644 --- a/plugins/callback/slack.py +++ b/plugins/callback/slack.py @@ -1,65 +1,70 @@ -# -*- coding: utf-8 -*- -# (C) 2014-2015, Matt Martz -# (C) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014-2015, Matt Martz +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: slack - type: notification - requirements: - - whitelist in configuration - - prettytable (python library) - short_description: Sends play events to a Slack channel +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: slack +type: notification +requirements: + - whitelist in configuration + - prettytable (python library) +short_description: Sends play events to a Slack channel +description: + - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution. +options: + http_agent: description: - - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution. - - Before 2.4 only environment variables were available for configuring this plugin - options: - webhook_url: - required: True - description: Slack Webhook URL - env: - - name: SLACK_WEBHOOK_URL - ini: - - section: callback_slack - key: webhook_url - channel: - default: "#ansible" - description: Slack room to post in. - env: - - name: SLACK_CHANNEL - ini: - - section: callback_slack - key: channel - username: - description: Username to post as. - env: - - name: SLACK_USERNAME - default: ansible - ini: - - section: callback_slack - key: username - validate_certs: - description: validate the SSL certificate of the Slack server. (For HTTPS URLs) - env: - - name: SLACK_VALIDATE_CERTS - ini: - - section: callback_slack - key: validate_certs - default: True - type: bool -''' + - HTTP user agent to use for requests to Slack. + type: string + version_added: "10.5.0" + webhook_url: + required: true + description: Slack Webhook URL. + type: str + env: + - name: SLACK_WEBHOOK_URL + ini: + - section: callback_slack + key: webhook_url + channel: + default: "#ansible" + description: Slack room to post in. + type: str + env: + - name: SLACK_CHANNEL + ini: + - section: callback_slack + key: channel + username: + description: Username to post as. + type: str + env: + - name: SLACK_USERNAME + default: ansible + ini: + - section: callback_slack + key: username + validate_certs: + description: Validate the SSL certificate of the Slack server for HTTPS URLs. + env: + - name: SLACK_VALIDATE_CERTS + ini: + - section: callback_slack + key: validate_certs + default: true + type: bool +""" import json import os import uuid from ansible import context -from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.urls import open_url from ansible.plugins.callback import CallbackBase @@ -105,7 +110,7 @@ class CallbackModule(CallbackBase): self.username = self.get_option('username') self.show_invocation = (self._display.verbosity > 1) self.validate_certs = self.get_option('validate_certs') - + self.http_agent = self.get_option('http_agent') if self.webhook_url is None: self.disabled = True self._display.warning('Slack Webhook URL was not provided. The ' @@ -131,18 +136,22 @@ class CallbackModule(CallbackBase): self._display.debug(data) self._display.debug(self.webhook_url) try: - response = open_url(self.webhook_url, data=data, validate_certs=self.validate_certs, - headers=headers) + response = open_url( + self.webhook_url, + data=data, + validate_certs=self.validate_certs, + headers=headers, + http_agent=self.http_agent, + ) return response.read() except Exception as e: - self._display.warning(u'Could not submit message to Slack: %s' % - to_text(e)) + self._display.warning(f'Could not submit message to Slack: {e}') def v2_playbook_on_start(self, playbook): self.playbook_name = os.path.basename(playbook._file_name) title = [ - '*Playbook initiated* (_%s_)' % self.guid + f'*Playbook initiated* (_{self.guid}_)' ] invocation_items = [] @@ -153,23 +162,23 @@ class CallbackModule(CallbackBase): subset = context.CLIARGS['subset'] inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']] - invocation_items.append('Inventory: %s' % ', '.join(inventory)) + invocation_items.append(f"Inventory: {', '.join(inventory)}") if tags and tags != ['all']: - invocation_items.append('Tags: %s' % ', '.join(tags)) + invocation_items.append(f"Tags: {', '.join(tags)}") if skip_tags: - invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags)) + invocation_items.append(f"Skip Tags: {', '.join(skip_tags)}") if subset: - invocation_items.append('Limit: %s' % subset) + invocation_items.append(f'Limit: {subset}') if extra_vars: - invocation_items.append('Extra Vars: %s' % - ' '.join(extra_vars)) + invocation_items.append(f"Extra Vars: {' '.join(extra_vars)}") - title.append('by *%s*' % context.CLIARGS['remote_user']) + title.append(f"by *{context.CLIARGS['remote_user']}*") - title.append('\n\n*%s*' % self.playbook_name) + title.append(f'\n\n*{self.playbook_name}*') msg_items = [' '.join(title)] if invocation_items: - msg_items.append('```\n%s\n```' % '\n'.join(invocation_items)) + _inv_item = '\n'.join(invocation_items) + msg_items.append(f'```\n{_inv_item}\n```') msg = '\n'.join(msg_items) @@ -189,8 +198,8 @@ class CallbackModule(CallbackBase): def v2_playbook_on_play_start(self, play): """Display Play start messages""" - name = play.name or 'Play name not specified (%s)' % play._uuid - msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name) + name = play.name or f'Play name not specified ({play._uuid})' + msg = f'*Starting play* (_{self.guid}_)\n\n*{name}*' attachments = [ { 'fallback': msg, @@ -225,7 +234,7 @@ class CallbackModule(CallbackBase): attachments = [] msg_items = [ - '*Playbook Complete* (_%s_)' % self.guid + f'*Playbook Complete* (_{self.guid}_)' ] if failures or unreachable: color = 'danger' @@ -234,7 +243,7 @@ class CallbackModule(CallbackBase): color = 'good' msg_items.append('\n*Success!*') - msg_items.append('```\n%s\n```' % t) + msg_items.append(f'```\n{t}\n```') msg = '\n'.join(msg_items) diff --git a/plugins/callback/splunk.py b/plugins/callback/splunk.py index cb63d3b23f..635a3109bc 100644 --- a/plugins/callback/splunk.py +++ b/plugins/callback/splunk.py @@ -1,87 +1,76 @@ -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: splunk - type: aggregate - short_description: Sends task result events to Splunk HTTP Event Collector - author: "Stuart Hirst (!UNKNOWN) " +DOCUMENTATION = r""" +name: splunk +type: notification +short_description: Sends task result events to Splunk HTTP Event Collector +author: "Stuart Hirst (!UNKNOWN) " +description: + - This callback plugin sends task results as JSON formatted events to a Splunk HTTP collector. + - The companion Splunk Monitoring & Diagnostics App is available here U(https://splunkbase.splunk.com/app/4023/). + - Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based. +requirements: + - Whitelisting this callback plugin + - 'Create a HTTP Event Collector in Splunk' + - 'Define the URL and token in C(ansible.cfg)' +options: + url: + description: URL to the Splunk HTTP collector source. + type: str + env: + - name: SPLUNK_URL + ini: + - section: callback_splunk + key: url + authtoken: + description: Token to authenticate the connection to the Splunk HTTP collector. + type: str + env: + - name: SPLUNK_AUTHTOKEN + ini: + - section: callback_splunk + key: authtoken + validate_certs: + description: Whether to validate certificates for connections to HEC. It is not recommended to set to V(false) except + when you are sure that nobody can intercept the connection between this plugin and HEC, as setting it to V(false) allows + man-in-the-middle attacks! + env: + - name: SPLUNK_VALIDATE_CERTS + ini: + - section: callback_splunk + key: validate_certs + type: bool + default: true + version_added: '1.0.0' + include_milliseconds: + description: Whether to include milliseconds as part of the generated timestamp field in the event sent to the Splunk + HTTP collector. + env: + - name: SPLUNK_INCLUDE_MILLISECONDS + ini: + - section: callback_splunk + key: include_milliseconds + type: bool + default: false + version_added: 2.0.0 + batch: description: - - This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector. - - The companion Splunk Monitoring & Diagnostics App is available here "https://splunkbase.splunk.com/app/4023/" - - Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based. - requirements: - - Whitelisting this callback plugin - - 'Create a HTTP Event Collector in Splunk' - - 'Define the url and token in ansible.cfg' - options: - url: - description: URL to the Splunk HTTP collector source - env: - - name: SPLUNK_URL - ini: - - section: callback_splunk - key: url - authtoken: - description: Token to authenticate the connection to the Splunk HTTP collector - env: - - name: SPLUNK_AUTHTOKEN - ini: - - section: callback_splunk - key: authtoken - validate_certs: - description: Whether to validate certificates for connections to HEC. It is not recommended to set to - C(false) except when you are sure that nobody can intercept the connection - between this plugin and HEC, as setting it to C(false) allows man-in-the-middle attacks! - env: - - name: SPLUNK_VALIDATE_CERTS - ini: - - section: callback_splunk - key: validate_certs - type: bool - default: true - version_added: '1.0.0' - include_milliseconds: - description: Whether to include milliseconds as part of the generated timestamp field in the event - sent to the Splunk HTTP collector - env: - - name: SPLUNK_INCLUDE_MILLISECONDS - ini: - - section: callback_splunk - key: include_milliseconds - type: bool - default: false - version_added: 2.0.0 - batch: - description: - - Correlation ID which can be set across multiple playbook executions. - env: - - name: SPLUNK_BATCH - ini: - - section: callback_splunk - key: batch - type: str - version_added: 3.3.0 -''' + - Correlation ID which can be set across multiple playbook executions. + env: + - name: SPLUNK_BATCH + ini: + - section: callback_splunk + key: batch + type: str + version_added: 3.3.0 +""" -EXAMPLES = ''' -examples: > +EXAMPLES = r""" +examples: >- To enable, add this to your ansible.cfg file in the defaults block [defaults] callback_whitelist = community.general.splunk @@ -92,26 +81,29 @@ examples: > [callback_splunk] url = http://mysplunkinstance.datapaas.io:8088/services/collector/event authtoken = f23blad6-5965-4537-bf69-5b5a545blabla88 -''' +""" import json import uuid import socket import getpass -from datetime import datetime from os.path import basename +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.urls import open_url from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class SplunkHTTPCollectorSource(object): def __init__(self): self.ansible_check_mode = False self.ansible_playbook = "" - self.ansible_version = "" self.session = str(uuid.uuid4()) self.host = socket.gethostname() self.ip_address = socket.gethostbyname(socket.gethostname()) @@ -121,10 +113,6 @@ class SplunkHTTPCollectorSource(object): if result._task_fields['args'].get('_ansible_check_mode') is True: self.ansible_check_mode = True - if result._task_fields['args'].get('_ansible_version'): - self.ansible_version = \ - result._task_fields['args'].get('_ansible_version') - if result._task._role: ansible_role = str(result._task._role) else: @@ -145,12 +133,12 @@ class SplunkHTTPCollectorSource(object): else: time_format = '%Y-%m-%d %H:%M:%S +0000' - data['timestamp'] = datetime.utcnow().strftime(time_format) + data['timestamp'] = now().strftime(time_format) data['host'] = self.host data['ip_address'] = self.ip_address data['user'] = self.user data['runtime'] = runtime - data['ansible_version'] = self.ansible_version + data['ansible_version'] = ansible_version data['ansible_check_mode'] = self.ansible_check_mode data['ansible_host'] = result._host.name data['ansible_playbook'] = self.ansible_playbook @@ -159,15 +147,14 @@ class SplunkHTTPCollectorSource(object): data['ansible_result'] = result._result # This wraps the json payload in and outer json event needed by Splunk - jsondata = json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True) - jsondata = '{"event":' + jsondata + "}" + jsondata = json.dumps({"event": data}, cls=AnsibleJSONEncoder, sort_keys=True) open_url( url, jsondata, headers={ 'Content-type': 'application/json', - 'Authorization': 'Splunk ' + authtoken + 'Authorization': f"Splunk {authtoken}" }, method='POST', validate_certs=validate_certs @@ -176,7 +163,7 @@ class SplunkHTTPCollectorSource(object): class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'aggregate' + CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'community.general.splunk' CALLBACK_NEEDS_WHITELIST = True @@ -192,7 +179,7 @@ class CallbackModule(CallbackBase): def _runtime(self, result): return ( - datetime.utcnow() - + now() - self.start_datetimes[result._task._uuid] ).total_seconds() @@ -231,10 +218,10 @@ class CallbackModule(CallbackBase): self.splunk.ansible_playbook = basename(playbook._file_name) def v2_playbook_on_task_start(self, task, is_conditional): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_playbook_on_handler_task_start(self, task): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_runner_on_ok(self, result, **kwargs): self.splunk.send_event( diff --git a/plugins/callback/sumologic.py b/plugins/callback/sumologic.py index b1ce85af77..3f99bf216a 100644 --- a/plugins/callback/sumologic.py +++ b/plugins/callback/sumologic.py @@ -1,45 +1,33 @@ -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: sumologic -type: aggregate +type: notification short_description: Sends task result events to Sumologic author: "Ryan Currah (@ryancurrah)" description: - - This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source + - This callback plugin sends task results as JSON formatted events to a Sumologic HTTP collector source. requirements: - Whitelisting this callback plugin - - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of C(yyyy-MM-dd HH:mm:ss ZZZZ) and a custom timestamp locator - of C("timestamp": "(.*)")' + - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of V(yyyy-MM-dd HH:mm:ss ZZZZ) and + a custom timestamp locator of V("timestamp": "(.*\)")' options: url: - description: URL to the Sumologic HTTP collector source + description: URL to the Sumologic HTTP collector source. + type: str env: - name: SUMOLOGIC_URL ini: - section: callback_sumologic key: url -''' +""" -EXAMPLES = ''' -examples: > +EXAMPLES = r""" +examples: |- To enable, add this to your ansible.cfg file in the defaults block [defaults] callback_whitelist = community.general.sumologic @@ -50,26 +38,29 @@ examples: > Set the ansible.cfg variable in the callback_sumologic block [callback_sumologic] url = https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp== -''' +""" import json import uuid import socket import getpass -from datetime import datetime from os.path import basename +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.urls import open_url from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class SumologicHTTPCollectorSource(object): def __init__(self): self.ansible_check_mode = False self.ansible_playbook = "" - self.ansible_version = "" self.session = str(uuid.uuid4()) self.host = socket.gethostname() self.ip_address = socket.gethostbyname(socket.gethostname()) @@ -79,10 +70,6 @@ class SumologicHTTPCollectorSource(object): if result._task_fields['args'].get('_ansible_check_mode') is True: self.ansible_check_mode = True - if result._task_fields['args'].get('_ansible_version'): - self.ansible_version = \ - result._task_fields['args'].get('_ansible_version') - if result._task._role: ansible_role = str(result._task._role) else: @@ -95,13 +82,12 @@ class SumologicHTTPCollectorSource(object): data['uuid'] = result._task._uuid data['session'] = self.session data['status'] = state - data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S ' - '+0000') + data['timestamp'] = now().strftime('%Y-%m-%d %H:%M:%S +0000') data['host'] = self.host data['ip_address'] = self.ip_address data['user'] = self.user data['runtime'] = runtime - data['ansible_version'] = self.ansible_version + data['ansible_version'] = ansible_version data['ansible_check_mode'] = self.ansible_check_mode data['ansible_host'] = result._host.name data['ansible_playbook'] = self.ansible_playbook @@ -122,7 +108,7 @@ class SumologicHTTPCollectorSource(object): class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'aggregate' + CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'community.general.sumologic' CALLBACK_NEEDS_WHITELIST = True @@ -134,7 +120,7 @@ class CallbackModule(CallbackBase): def _runtime(self, result): return ( - datetime.utcnow() - + now() - self.start_datetimes[result._task._uuid] ).total_seconds() @@ -155,10 +141,10 @@ class CallbackModule(CallbackBase): self.sumologic.ansible_playbook = basename(playbook._file_name) def v2_playbook_on_task_start(self, task, is_conditional): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_playbook_on_handler_task_start(self, task): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_runner_on_ok(self, result, **kwargs): self.sumologic.send_event( diff --git a/plugins/callback/syslog_json.py b/plugins/callback/syslog_json.py index e6fc1ee261..657ca017f6 100644 --- a/plugins/callback/syslog_json.py +++ b/plugins/callback/syslog_json.py @@ -1,60 +1,58 @@ -# -*- coding: utf-8 -*- -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: syslog_json - type: notification - requirements: - - whitelist in configuration - short_description: sends JSON events to syslog - description: - - This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format - - Before Ansible 2.9 only environment variables were available for configuration - options: - server: - description: syslog server that will receive the event - env: - - name: SYSLOG_SERVER - default: localhost - ini: - - section: callback_syslog_json - key: syslog_server - port: - description: port on which the syslog server is listening - env: - - name: SYSLOG_PORT - default: 514 - ini: - - section: callback_syslog_json - key: syslog_port - facility: - description: syslog facility to log as - env: - - name: SYSLOG_FACILITY - default: user - ini: - - section: callback_syslog_json - key: syslog_facility - setup: - description: Log setup tasks. - env: - - name: ANSIBLE_SYSLOG_SETUP - type: bool - default: true - ini: - - section: callback_syslog_json - key: syslog_setup - version_added: 4.5.0 -''' - -import os -import json +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: syslog_json +type: notification +requirements: + - whitelist in configuration +short_description: Sends JSON events to syslog +description: + - This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format. +options: + server: + description: Syslog server that receives the event. + type: str + env: + - name: SYSLOG_SERVER + default: localhost + ini: + - section: callback_syslog_json + key: syslog_server + port: + description: Port on which the syslog server is listening. + type: int + env: + - name: SYSLOG_PORT + default: 514 + ini: + - section: callback_syslog_json + key: syslog_port + facility: + description: Syslog facility to log as. + type: str + env: + - name: SYSLOG_FACILITY + default: user + ini: + - section: callback_syslog_json + key: syslog_facility + setup: + description: Log setup tasks. + env: + - name: ANSIBLE_SYSLOG_SETUP + type: bool + default: true + ini: + - section: callback_syslog_json + key: syslog_setup + version_added: 4.5.0 +""" import logging import logging.handlers @@ -70,7 +68,7 @@ class CallbackModule(CallbackBase): """ CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'aggregate' + CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'community.general.syslog_json' CALLBACK_NEEDS_WHITELIST = True diff --git a/plugins/callback/tasks_only.py b/plugins/callback/tasks_only.py new file mode 100644 index 0000000000..3de81fc2db --- /dev/null +++ b/plugins/callback/tasks_only.py @@ -0,0 +1,68 @@ + +# Copyright (c) 2025, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: Felix Fontein (@felixfontein) +name: tasks_only +type: stdout +version_added: 11.1.0 +short_description: Only show tasks +description: + - Removes play start and stats marker from P(ansible.builtin.default#callback)'s output. + - Can be used to generate output for documentation examples. + For this, the O(number_of_columns) option should be set to an explicit value. +extends_documentation_fragment: + - ansible.builtin.default_callback + - ansible.builtin.result_format_callback +options: + number_of_columns: + description: + - Sets the number of columns for Ansible's display. + type: int + env: + - name: ANSIBLE_COLLECTIONS_TASKS_ONLY_NUMBER_OF_COLUMNS + result_format: + # Part of the ansible.builtin.result_format_callback doc fragment + version_added: 11.2.0 + pretty_results: + # Part of the ansible.builtin.result_format_callback doc fragment + version_added: 11.2.0 +""" + +EXAMPLES = r""" +--- +# Enable callback in ansible.cfg: +ansible_config: |- + [defaults] + stdout_callback = community.general.tasks_only + +--- +# Enable callback with environment variables: +environment_variable: |- + ANSIBLE_STDOUT_CALLBACK=community.general.tasks_only +""" + +from ansible.plugins.callback.default import CallbackModule as Default + + +class CallbackModule(Default): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'community.general.tasks_only' + + def v2_playbook_on_play_start(self, play): + pass + + def v2_playbook_on_stats(self, stats): + pass + + def set_options(self, *args, **kwargs): + result = super(CallbackModule, self).set_options(*args, **kwargs) + self.number_of_columns = self.get_option("number_of_columns") + if self.number_of_columns is not None: + self._display.columns = self.number_of_columns + return result diff --git a/plugins/callback/timestamp.py b/plugins/callback/timestamp.py new file mode 100644 index 0000000000..f733fa8cb7 --- /dev/null +++ b/plugins/callback/timestamp.py @@ -0,0 +1,124 @@ + +# Copyright (c) 2024, kurokobo +# Copyright (c) 2014, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +name: timestamp +type: stdout +short_description: Adds simple timestamp for each header +version_added: 9.0.0 +description: + - This callback adds simple timestamp for each header. +author: kurokobo (@kurokobo) +options: + timezone: + description: + - Timezone to use for the timestamp in IANA time zone format. + - For example V(America/New_York), V(Asia/Tokyo)). Ignored on Python < 3.9. + ini: + - section: callback_timestamp + key: timezone + env: + - name: ANSIBLE_CALLBACK_TIMESTAMP_TIMEZONE + type: string + format_string: + description: + - Format of the timestamp shown to user in 1989 C standard format. + - Refer to L(the Python documentation,https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) + for the available format codes. + ini: + - section: callback_timestamp + key: format_string + env: + - name: ANSIBLE_CALLBACK_TIMESTAMP_FORMAT_STRING + default: "%H:%M:%S" + type: string +seealso: + - plugin: ansible.posix.profile_tasks + plugin_type: callback + description: >- + You can use P(ansible.posix.profile_tasks#callback) callback plugin to time individual tasks and overall execution time + with detailed timestamps. +extends_documentation_fragment: + - ansible.builtin.default_callback + - ansible.builtin.result_format_callback +""" + + +from ansible.plugins.callback.default import CallbackModule as Default +from ansible.utils.display import get_text_width +from ansible.module_utils.common.text.converters import to_text +from datetime import datetime +import types +import sys + +# Store whether the zoneinfo module is available +_ZONEINFO_AVAILABLE = sys.version_info >= (3, 9) + + +def get_datetime_now(tz): + """ + Returns the current timestamp with the specified timezone + """ + return datetime.now(tz=tz) + + +def banner(self, msg, color=None, cows=True): + """ + Prints a header-looking line with cowsay or stars with length depending on terminal width (3 minimum) with trailing timestamp + + Based on the banner method of Display class from ansible.utils.display + + https://github.com/ansible/ansible/blob/4403519afe89138042108e237aef317fd5f09c33/lib/ansible/utils/display.py#L511 + """ + timestamp = get_datetime_now(self.timestamp_tzinfo).strftime(self.timestamp_format_string) + timestamp_len = get_text_width(timestamp) + 1 # +1 for leading space + + msg = to_text(msg) + if self.b_cowsay and cows: + try: + self.banner_cowsay(f"{msg} @ {timestamp}") + return + except OSError: + self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.") + + msg = msg.strip() + try: + star_len = self.columns - get_text_width(msg) - timestamp_len + except EnvironmentError: + star_len = self.columns - len(msg) - timestamp_len + if star_len <= 3: + star_len = 3 + stars = "*" * star_len + self.display(f"\n{msg} {stars} {timestamp}", color=color) + + +class CallbackModule(Default): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = "stdout" + CALLBACK_NAME = "community.general.timestamp" + + def __init__(self): + super(CallbackModule, self).__init__() + + # Replace the banner method of the display object with the custom one + self._display.banner = types.MethodType(banner, self._display) + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + # Store zoneinfo for specified timezone if available + tzinfo = None + if _ZONEINFO_AVAILABLE and self.get_option("timezone"): + from zoneinfo import ZoneInfo + + tzinfo = ZoneInfo(self.get_option("timezone")) + + # Inject options into the display object + setattr(self._display, "timestamp_tzinfo", tzinfo) + setattr(self._display, "timestamp_format_string", self.get_option("format_string")) diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py index fd00fae71b..d155aefc66 100644 --- a/plugins/callback/unixy.py +++ b/plugins/callback/unixy.py @@ -1,24 +1,23 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Allyson Bowles <@akatch> -# Copyright: (c) 2012-2014, Michael DeHaan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2023, Al Bowles <@akatch> +# Copyright (c) 2012-2014, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: unixy - type: stdout - author: Allyson Bowles (@akatch) - short_description: condensed Ansible output - description: - - Consolidated Ansible output in the style of LINUX/UNIX startup logs. - extends_documentation_fragment: - - default_callback - requirements: - - set as stdout in configuration -''' +DOCUMENTATION = r""" +name: unixy +type: stdout +author: Al Bowles (@akatch) +short_description: Condensed Ansible output +description: + - Consolidated Ansible output in the style of LINUX/UNIX startup logs. +extends_documentation_fragment: + - default_callback +requirements: + - set as stdout in configuration +""" from os.path import basename from ansible import constants as C @@ -39,7 +38,6 @@ class CallbackModule(CallbackModule_default): - Only display task names if the task runs on at least one host - Add option to display all hostnames on a single line in the appropriate result color (failures may have a separate line) - Consolidate stats display - - Display whether run is in --check mode - Don't show play name if no hosts found ''' @@ -62,59 +60,71 @@ class CallbackModule(CallbackModule_default): def _preprocess_result(self, result): self.delegated_vars = result._result.get('_ansible_delegated_vars', None) - self._handle_exception(result._result, use_stderr=self.display_failed_stderr) + self._handle_exception(result._result, use_stderr=self.get_option('display_failed_stderr')) self._handle_warnings(result._result) def _process_result_output(self, result, msg): task_host = result._host.get_name() - task_result = "%s %s" % (task_host, msg) + task_result = f"{task_host} {msg}" if self._run_is_verbose(result): - task_result = "%s %s: %s" % (task_host, msg, self._dump_results(result._result, indent=4)) + task_result = f"{task_host} {msg}: {self._dump_results(result._result, indent=4)}" return task_result if self.delegated_vars: task_delegate_host = self.delegated_vars['ansible_host'] - task_result = "%s -> %s %s" % (task_host, task_delegate_host, msg) + task_result = f"{task_host} -> {task_delegate_host} {msg}" if result._result.get('msg') and result._result.get('msg') != "All items completed": - task_result += " | msg: " + to_text(result._result.get('msg')) + task_result += f" | msg: {to_text(result._result.get('msg'))}" if result._result.get('stdout'): - task_result += " | stdout: " + result._result.get('stdout') + task_result += f" | stdout: {result._result.get('stdout')}" if result._result.get('stderr'): - task_result += " | stderr: " + result._result.get('stderr') + task_result += f" | stderr: {result._result.get('stderr')}" return task_result def v2_playbook_on_task_start(self, task, is_conditional): self._get_task_display_name(task) if self.task_display_name is not None: - self._display.display("%s..." % self.task_display_name) + if task.check_mode and self.get_option('check_mode_markers'): + self._display.display(f"{self.task_display_name} (check mode)...") + else: + self._display.display(f"{self.task_display_name}...") def v2_playbook_on_handler_task_start(self, task): self._get_task_display_name(task) if self.task_display_name is not None: - self._display.display("%s (via handler)... " % self.task_display_name) + if task.check_mode and self.get_option('check_mode_markers'): + self._display.display(f"{self.task_display_name} (via handler in check mode)... ") + else: + self._display.display(f"{self.task_display_name} (via handler)... ") def v2_playbook_on_play_start(self, play): name = play.get_name().strip() - if name and play.hosts: - msg = u"\n- %s on hosts: %s -" % (name, ",".join(play.hosts)) + if play.check_mode and self.get_option('check_mode_markers'): + if name and play.hosts: + msg = f"\n- {name} (in check mode) on hosts: {','.join(play.hosts)} -" + else: + msg = "- check mode -" else: - msg = u"---" + if name and play.hosts: + msg = f"\n- {name} on hosts: {','.join(play.hosts)} -" + else: + msg = "---" self._display.display(msg) def v2_runner_on_skipped(self, result, ignore_errors=False): - if self.display_skipped_hosts: + if self.get_option('display_skipped_hosts'): self._preprocess_result(result) display_color = C.COLOR_SKIP msg = "skipped" task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color) + self._display.display(f" {task_result}", display_color) else: return @@ -124,10 +134,10 @@ class CallbackModule(CallbackModule_default): msg = "failed" item_value = self._get_item_label(result._result) if item_value: - msg += " | item: %s" % (item_value,) + msg += f" | item: {item_value}" task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color, stderr=self.display_failed_stderr) + self._display.display(f" {task_result}", display_color, stderr=self.get_option('display_failed_stderr')) def v2_runner_on_ok(self, result, msg="ok", display_color=C.COLOR_OK): self._preprocess_result(result) @@ -137,13 +147,13 @@ class CallbackModule(CallbackModule_default): msg = "done" item_value = self._get_item_label(result._result) if item_value: - msg += " | item: %s" % (item_value,) + msg += f" | item: {item_value}" display_color = C.COLOR_CHANGED task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color) - elif self.display_ok_hosts: + self._display.display(f" {task_result}", display_color) + elif self.get_option('display_ok_hosts'): task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color) + self._display.display(f" {task_result}", display_color) def v2_runner_item_on_skipped(self, result): self.v2_runner_on_skipped(result) @@ -161,7 +171,7 @@ class CallbackModule(CallbackModule_default): display_color = C.COLOR_UNREACHABLE task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color, stderr=self.display_failed_stderr) + self._display.display(f" {task_result}", display_color, stderr=self.get_option('display_failed_stderr')) def v2_on_file_diff(self, result): if result._task.loop and 'results' in result._result: @@ -183,40 +193,34 @@ class CallbackModule(CallbackModule_default): # TODO how else can we display these? t = stats.summarize(h) - self._display.display(u" %s : %s %s %s %s %s %s" % ( - hostcolor(h, t), - colorize(u'ok', t['ok'], C.COLOR_OK), - colorize(u'changed', t['changed'], C.COLOR_CHANGED), - colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), - colorize(u'failed', t['failures'], C.COLOR_ERROR), - colorize(u'rescued', t['rescued'], C.COLOR_OK), - colorize(u'ignored', t['ignored'], C.COLOR_WARN)), + self._display.display( + f" {hostcolor(h, t)} : {colorize('ok', t['ok'], C.COLOR_OK)} {colorize('changed', t['changed'], C.COLOR_CHANGED)} " + f"{colorize('unreachable', t['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', t['failures'], C.COLOR_ERROR)} " + f"{colorize('rescued', t['rescued'], C.COLOR_OK)} {colorize('ignored', t['ignored'], C.COLOR_WARN)}", screen_only=True ) - self._display.display(u" %s : %s %s %s %s %s %s" % ( - hostcolor(h, t, False), - colorize(u'ok', t['ok'], None), - colorize(u'changed', t['changed'], None), - colorize(u'unreachable', t['unreachable'], None), - colorize(u'failed', t['failures'], None), - colorize(u'rescued', t['rescued'], None), - colorize(u'ignored', t['ignored'], None)), + self._display.display( + f" {hostcolor(h, t, False)} : {colorize('ok', t['ok'], None)} {colorize('changed', t['changed'], None)} " + f"{colorize('unreachable', t['unreachable'], None)} {colorize('failed', t['failures'], None)} {colorize('rescued', t['rescued'], None)} " + f"{colorize('ignored', t['ignored'], None)}", log_only=True ) - if stats.custom and self.show_custom_stats: + if stats.custom and self.get_option('show_custom_stats'): self._display.banner("CUSTOM STATS: ") # per host # TODO: come up with 'pretty format' for k in sorted(stats.custom.keys()): if k == '_run': continue - self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', ''))) + stat_val = self._dump_results(stats.custom[k], indent=1).replace('\n', '') + self._display.display(f'\t{k}: {stat_val}') # print per run custom stats if '_run' in stats.custom: self._display.display("", screen_only=True) - self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', '')) + stat_val_run = self._dump_results(stats.custom['_run'], indent=1).replace('\n', '') + self._display.display(f'\tRUN: {stat_val_run}') self._display.display("", screen_only=True) def v2_playbook_on_no_hosts_matched(self): @@ -226,22 +230,24 @@ class CallbackModule(CallbackModule_default): self._display.display(" Ran out of hosts!", color=C.COLOR_ERROR) def v2_playbook_on_start(self, playbook): - # TODO display whether this run is happening in check mode - self._display.display("Executing playbook %s" % basename(playbook._file_name)) + if context.CLIARGS['check'] and self.get_option('check_mode_markers'): + self._display.display(f"Executing playbook {basename(playbook._file_name)} in check mode") + else: + self._display.display(f"Executing playbook {basename(playbook._file_name)}") # show CLI arguments if self._display.verbosity > 3: if context.CLIARGS.get('args'): - self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']), + self._display.display(f"Positional arguments: {' '.join(context.CLIARGS['args'])}", color=C.COLOR_VERBOSE, screen_only=True) for argument in (a for a in context.CLIARGS if a != 'args'): val = context.CLIARGS[argument] if val: - self._display.vvvv('%s: %s' % (argument, val)) + self._display.vvvv(f'{argument}: {val}') def v2_runner_retry(self, result): - msg = " Retrying... (%d of %d)" % (result._result['attempts'], result._result['retries']) + msg = f" Retrying... ({result._result['attempts']} of {result._result['retries']})" if self._run_is_verbose(result): - msg += "Result was: %s" % self._dump_results(result._result) + msg += f"Result was: {self._dump_results(result._result)}" self._display.display(msg, color=C.COLOR_DEBUG) diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py deleted file mode 100644 index 59fb350934..0000000000 --- a/plugins/callback/yaml.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: yaml - type: stdout - short_description: yaml-ized Ansible screen output - description: - - Ansible output that can be quite a bit easier to read than the - default JSON formatting. - extends_documentation_fragment: - - default_callback - requirements: - - set as stdout in configuration -''' - -import yaml -import json -import re -import string -import sys - -from ansible.module_utils.common.text.converters import to_bytes, to_text -from ansible.module_utils.six import string_types -from ansible.parsing.yaml.dumper import AnsibleDumper -from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy -from ansible.plugins.callback.default import CallbackModule as Default - - -# from http://stackoverflow.com/a/15423007/115478 -def should_use_block(value): - """Returns true if string should be in block format""" - for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029": - if c in value: - return True - return False - - -class MyDumper(AnsibleDumper): - def represent_scalar(self, tag, value, style=None): - """Uses block style for multi-line strings""" - if style is None: - if should_use_block(value): - style = '|' - # we care more about readable than accuracy, so... - # ...no trailing space - value = value.rstrip() - # ...and non-printable characters - value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0) - # ...tabs prevent blocks from expanding - value = value.expandtabs() - # ...and odd bits of whitespace - value = re.sub(r'[\x0b\x0c\r]', '', value) - # ...as does trailing space - value = re.sub(r' +\n', '\n', value) - else: - style = self.default_style - node = yaml.representer.ScalarNode(tag, value, style=style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - return node - - -class CallbackModule(Default): - - """ - Variation of the Default output which uses nicely readable YAML instead - of JSON for printing results. - """ - - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'community.general.yaml' - - def __init__(self): - super(CallbackModule, self).__init__() - - def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): - if result.get('_ansible_no_log', False): - return json.dumps(dict(censored="The output has been hidden due to the fact that 'no_log: true' was specified for this result")) - - # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything. - abridged_result = strip_internal_keys(module_response_deepcopy(result)) - - # remove invocation unless specifically wanting it - if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result: - del abridged_result['invocation'] - - # remove diff information from screen output - if self._display.verbosity < 3 and 'diff' in result: - del abridged_result['diff'] - - # remove exception from screen output - if 'exception' in abridged_result: - del abridged_result['exception'] - - dumped = '' - - # put changed and skipped into a header line - if 'changed' in abridged_result: - dumped += 'changed=' + str(abridged_result['changed']).lower() + ' ' - del abridged_result['changed'] - - if 'skipped' in abridged_result: - dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' ' - del abridged_result['skipped'] - - # if we already have stdout, we don't need stdout_lines - if 'stdout' in abridged_result and 'stdout_lines' in abridged_result: - abridged_result['stdout_lines'] = '' - - # if we already have stderr, we don't need stderr_lines - if 'stderr' in abridged_result and 'stderr_lines' in abridged_result: - abridged_result['stderr_lines'] = '' - - if abridged_result: - dumped += '\n' - dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=MyDumper, default_flow_style=False)) - - # indent by a couple of spaces - dumped = '\n '.join(dumped.split('\n')).rstrip() - return dumped - - def _serialize_diff(self, diff): - return to_text(yaml.dump(diff, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False)) diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index 295bd4046b..35f7312326 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -1,61 +1,85 @@ -# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # # (c) 2013, Maykel Moya # (c) 2015, Toshio Kuratomi # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Maykel Moya (!UNKNOWN) - name: chroot - short_description: Interact with local chroot +DOCUMENTATION = r""" +author: Maykel Moya (!UNKNOWN) +name: chroot +short_description: Interact with local chroot +description: + - Run commands or put/fetch files to an existing chroot on the Ansible controller. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing chroot on the Ansible controller. - options: - remote_addr: - description: - - The path of the chroot you want to access. - default: inventory_hostname - vars: - - name: ansible_host - executable: - description: - - User specified executable shell - ini: - - section: defaults - key: executable - env: - - name: ANSIBLE_EXECUTABLE - vars: - - name: ansible_executable - default: /bin/sh - chroot_exe: - description: - - User specified chroot binary - ini: - - section: chroot_connection - key: exe - env: - - name: ANSIBLE_CHROOT_EXE - vars: - - name: ansible_chroot_exe - default: chroot -''' + - The path of the chroot you want to access. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + executable: + description: + - User specified executable shell. + type: string + ini: + - section: defaults + key: executable + env: + - name: ANSIBLE_EXECUTABLE + vars: + - name: ansible_executable + default: /bin/sh + chroot_exe: + description: + - User specified chroot binary. + type: string + ini: + - section: chroot_connection + key: exe + env: + - name: ANSIBLE_CHROOT_EXE + vars: + - name: ansible_chroot_exe + default: chroot + disable_root_check: + description: + - Do not check that the user is not root. + ini: + - section: chroot_connection + key: disable_root_check + env: + - name: ANSIBLE_CHROOT_DISABLE_ROOT_CHECK + vars: + - name: ansible_chroot_disable_root_check + default: false + type: bool + version_added: 7.3.0 +""" + +EXAMPLES = r""" +- hosts: chroots + connection: community.general.chroot + tasks: + - debug: + msg: "This is coming from chroot environment" +""" import os import os.path import subprocess import traceback +from shlex import quote as shlex_quote from ansible.errors import AnsibleError from ansible.module_utils.basic import is_executable from ansible.module_utils.common.process import get_bin_path -from ansible.module_utils.six.moves import shlex_quote -from ansible.module_utils.common.text.converters import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils.display import Display @@ -79,31 +103,32 @@ class Connection(ConnectionBase): self.chroot = self._play_context.remote_addr - if os.geteuid() != 0: - raise AnsibleError("chroot connection requires running as root") - - # we're running as root on the local system so do some - # trivial checks for ensuring 'host' is actually a chroot'able dir + # do some trivial checks for ensuring 'host' is actually a chroot'able dir if not os.path.isdir(self.chroot): - raise AnsibleError("%s is not a directory" % self.chroot) + raise AnsibleError(f"{self.chroot} is not a directory") chrootsh = os.path.join(self.chroot, 'bin/sh') # Want to check for a usable bourne shell inside the chroot. # is_executable() == True is sufficient. For symlinks it # gets really complicated really fast. So we punt on finding that - # out. As long as it's a symlink we assume that it will work + # out. As long as it is a symlink we assume that it will work if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))): - raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot) + raise AnsibleError(f"{self.chroot} does not look like a chrootable dir (/bin/sh missing)") def _connect(self): """ connect to the chroot """ + if not self.get_option('disable_root_check') and os.geteuid() != 0: + raise AnsibleError( + "chroot connection requires running as root. " + "You can override this check with the `disable_root_check` option.") + if os.path.isabs(self.get_option('chroot_exe')): self.chroot_cmd = self.get_option('chroot_exe') else: try: self.chroot_cmd = get_bin_path(self.get_option('chroot_exe')) except ValueError as e: - raise AnsibleError(to_native(e)) + raise AnsibleError(str(e)) super(Connection, self)._connect() if not self._connected: @@ -121,7 +146,7 @@ class Connection(ConnectionBase): executable = self.get_option('executable') local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] - display.vvv("EXEC %s" % local_cmd, host=self.chroot) + display.vvv(f"EXEC {local_cmd}", host=self.chroot) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -146,7 +171,7 @@ class Connection(ConnectionBase): exist in any given chroot. So for now we're choosing "/" instead. This also happens to be the former default. - Can revisit using $HOME instead if it's a problem + Can revisit using $HOME instead if it is a problem """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) @@ -155,7 +180,7 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): """ transfer a file from local to chroot """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.chroot) out_path = shlex_quote(self._prefix_login_path(out_path)) try: @@ -165,27 +190,27 @@ class Connection(ConnectionBase): else: count = '' try: - p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file) except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") try: stdout, stderr = p.communicate() except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") except IOError: - raise AnsibleError("file or module does not exist at: %s" % in_path) + raise AnsibleError(f"file or module does not exist at: {in_path}") def fetch_file(self, in_path, out_path): """ fetch a file from chroot to local """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.chroot) in_path = shlex_quote(self._prefix_login_path(in_path)) try: - p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}') except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") @@ -197,10 +222,10 @@ class Connection(ConnectionBase): chunk = p.stdout.read(BUFSIZE) except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") def close(self): """ terminate the connection; nothing to do here """ diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index 94d1a3bd9c..86d050c1db 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -1,30 +1,30 @@ -# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # Based on chroot.py (c) 2013, Maykel Moya # Copyright (c) 2013, Michael Scherer # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Michael Scherer (@mscherer) - name: funcd - short_description: Use funcd to connect to target +DOCUMENTATION = r""" +author: Michael Scherer (@mscherer) +name: funcd +short_description: Use funcd to connect to target +description: + - This transport permits you to use Ansible over Func. + - For people who have already setup func and that wish to play with ansible, this permit to move gradually to ansible without + having to redo completely the setup of the network. +options: + remote_addr: description: - - This transport permits you to use Ansible over Func. - - For people who have already setup func and that wish to play with ansible, - this permit to move gradually to ansible without having to redo completely the setup of the network. - options: - remote_addr: - description: - - The path of the chroot you want to access. - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_func_host -''' + - The path of the chroot you want to access. + type: string + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_func_host +""" HAVE_FUNC = False try: @@ -63,14 +63,14 @@ class Connection(ConnectionBase): self.client = fc.Client(self.host) return self - def exec_command(self, cmd, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): + def exec_command(self, cmd, in_data=None, sudoable=True): """ run a command on the remote minion """ if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") - # totally ignores privlege escalation - display.vvv("EXEC %s" % cmd, host=self.host) + # totally ignores privilege escalation + display.vvv(f"EXEC {cmd}", host=self.host) p = self.client.command.run(cmd)[self.host] return p[0], p[1], p[2] @@ -85,14 +85,14 @@ class Connection(ConnectionBase): """ transfer a file from local to remote """ out_path = self._normalize_path(out_path, '/') - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.host) self.client.local.copyfile.send(in_path, out_path) def fetch_file(self, in_path, out_path): """ fetch a file from remote to local """ in_path = self._normalize_path(in_path, '/') - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.host) # need to use a tmp dir due to difference of semantic for getfile # ( who take a # directory as destination) and fetch_file, who # take a file directly diff --git a/plugins/connection/incus.py b/plugins/connection/incus.py new file mode 100644 index 0000000000..3dfd37764b --- /dev/null +++ b/plugins/connection/incus.py @@ -0,0 +1,274 @@ +# Based on lxd.py (c) 2016, Matt Clay +# (c) 2023, Stephane Graber +# Copyright (c) 2023 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: Stéphane Graber (@stgraber) +name: incus +short_description: Run tasks in Incus instances using the Incus CLI +description: + - Run commands or put/fetch files to an existing Incus instance using Incus CLI. +version_added: "8.2.0" +options: + remote_addr: + description: + - The instance identifier. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_incus_host + executable: + description: + - The shell to use for execution inside the instance. + type: string + default: /bin/sh + vars: + - name: ansible_executable + - name: ansible_incus_executable + incus_become_method: + description: + - Become command used to switch to a non-root user. + - Is only used when O(remote_user) is not V(root). + type: str + default: /bin/su + vars: + - name: incus_become_method + version_added: 10.4.0 + remote: + description: + - The name of the Incus remote to use (per C(incus remote list)). + - Remotes are used to access multiple servers from a single client. + type: string + default: local + vars: + - name: ansible_incus_remote + remote_user: + description: + - User to login/authenticate as. + - Can be set from the CLI with the C(--user) or C(-u) options. + type: string + default: root + vars: + - name: ansible_user + env: + - name: ANSIBLE_REMOTE_USER + ini: + - section: defaults + key: remote_user + keyword: + - name: remote_user + version_added: 10.4.0 + project: + description: + - The name of the Incus project to use (per C(incus project list)). + - Projects are used to divide the instances running on a server. + type: string + default: default + vars: + - name: ansible_incus_project +""" + +import os +from subprocess import call, Popen, PIPE + +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound +from ansible.module_utils.common.process import get_bin_path +from ansible.module_utils.common.text.converters import to_bytes, to_text +from ansible.plugins.connection import ConnectionBase + + +class Connection(ConnectionBase): + """ Incus based connections """ + + transport = "incus" + has_pipelining = True + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + self._incus_cmd = get_bin_path("incus") + + if not self._incus_cmd: + raise AnsibleError("incus command not found in PATH") + + def _connect(self): + """connect to Incus (nothing to do here) """ + super(Connection, self)._connect() + + if not self._connected: + self._display.vvv(f"ESTABLISH Incus CONNECTION FOR USER: {self.get_option('remote_user')}", + host=self._instance()) + self._connected = True + + def _build_command(self, cmd) -> str: + """build the command to execute on the incus host""" + + exec_cmd = [ + self._incus_cmd, + "--project", self.get_option("project"), + "exec", + f"{self.get_option('remote')}:{self._instance()}", + "--"] + + if self.get_option("remote_user") != "root": + self._display.vvv( + f"INFO: Running as non-root user: {self.get_option('remote_user')}, \ + trying to run 'incus exec' with become method: {self.get_option('incus_become_method')}", + host=self._instance(), + ) + exec_cmd.extend( + [self.get_option("incus_become_method"), self.get_option("remote_user"), "-c"] + ) + + exec_cmd.extend([self.get_option("executable"), "-c", cmd]) + + return exec_cmd + + def _instance(self): + # Return only the leading part of the FQDN as the instance name + # as Incus instance names cannot be a FQDN. + return self.get_option('remote_addr').split(".")[0] + + def exec_command(self, cmd, in_data=None, sudoable=True): + """ execute a command on the Incus host """ + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + self._display.vvv(f"EXEC {cmd}", + host=self._instance()) + + local_cmd = self._build_command(cmd) + self._display.vvvvv(f"EXEC {local_cmd}", host=self._instance()) + + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru') + + process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) + stdout, stderr = process.communicate(in_data) + + stdout = to_text(stdout) + stderr = to_text(stderr) + + if stderr.startswith("Error: ") and stderr.rstrip().endswith( + ": Instance is not running" + ): + raise AnsibleConnectionFailure( + f"instance not running: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) + + if stderr.startswith("Error: ") and stderr.rstrip().endswith( + ": Instance not found" + ): + raise AnsibleConnectionFailure( + f"instance not found: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) + + if ( + stderr.startswith("Error: ") + and ": User does not have permission " in stderr + ): + raise AnsibleConnectionFailure( + f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) + + if ( + stderr.startswith("Error: ") + and ": User does not have entitlement " in stderr + ): + raise AnsibleConnectionFailure( + f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) + + return process.returncode, stdout, stderr + + def _get_remote_uid_gid(self) -> tuple[int, int]: + """Get the user and group ID of 'remote_user' from the instance.""" + + rc, uid_out, err = self.exec_command("/bin/id -u") + if rc != 0: + raise AnsibleError( + f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}" + ) + uid = uid_out.strip() + + rc, gid_out, err = self.exec_command("/bin/id -g") + if rc != 0: + raise AnsibleError( + f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}" + ) + gid = gid_out.strip() + + return int(uid), int(gid) + + def put_file(self, in_path, out_path): + """ put a file from local to Incus """ + super(Connection, self).put_file(in_path, out_path) + + self._display.vvv(f"PUT {in_path} TO {out_path}", + host=self._instance()) + + if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound(f"input path is not a file: {in_path}") + + if self.get_option("remote_user") != "root": + uid, gid = self._get_remote_uid_gid() + local_cmd = [ + self._incus_cmd, + "--project", + self.get_option("project"), + "file", + "push", + "--uid", + str(uid), + "--gid", + str(gid), + "--quiet", + in_path, + f"{self.get_option('remote')}:{self._instance()}/{out_path}", + ] + else: + local_cmd = [ + self._incus_cmd, + "--project", + self.get_option("project"), + "file", + "push", + "--quiet", + in_path, + f"{self.get_option('remote')}:{self._instance()}/{out_path}", + ] + + self._display.vvvvv(f"PUT {local_cmd}", host=self._instance()) + + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + + call(local_cmd) + + def fetch_file(self, in_path, out_path): + """ fetch a file from Incus to local """ + super(Connection, self).fetch_file(in_path, out_path) + + self._display.vvv(f"FETCH {in_path} TO {out_path}", + host=self._instance()) + + local_cmd = [ + self._incus_cmd, + "--project", self.get_option("project"), + "file", "pull", "--quiet", + f"{self.get_option('remote')}:{self._instance()}/{in_path}", + out_path] + + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + + call(local_cmd) + + def close(self): + """ close the connection (nothing to do here) """ + super(Connection, self).close() + + self._connected = False diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index 2fd74313bc..fa4973bae1 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -1,34 +1,35 @@ -# -*- coding: utf-8 -*- # Based on jail.py # (c) 2013, Michael Scherer # (c) 2015, Toshio Kuratomi # (c) 2016, Stephan Lohse # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Stephan Lohse (!UNKNOWN) - name: iocage - short_description: Run tasks in iocage jails +DOCUMENTATION = r""" +author: Stephan Lohse (!UNKNOWN) +name: iocage +short_description: Run tasks in iocage jails +description: + - Run commands or put/fetch files to an existing iocage jail. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing iocage jail - options: - remote_addr: - description: - - Path to the jail - vars: - - name: ansible_host - - name: ansible_iocage_host - remote_user: - description: - - User to execute as inside the jail - vars: - - name: ansible_user - - name: ansible_iocage_user -''' + - Path to the jail. + type: string + vars: + - name: ansible_host + - name: ansible_iocage_host + remote_user: + description: + - User to execute as inside the jail. + type: string + vars: + - name: ansible_user + - name: ansible_iocage_user +""" import subprocess @@ -52,11 +53,12 @@ class Connection(Jail): jail_uuid = self.get_jail_uuid() - kwargs[Jail.modified_jailname_key] = 'ioc-{0}'.format(jail_uuid) + kwargs[Jail.modified_jailname_key] = f'ioc-{jail_uuid}' - display.vvv(u"Jail {iocjail} has been translated to {rawjail}".format( - iocjail=self.ioc_jail, rawjail=kwargs[Jail.modified_jailname_key]), - host=kwargs[Jail.modified_jailname_key]) + display.vvv( + f"Jail {self.ioc_jail} has been translated to {kwargs[Jail.modified_jailname_key]}", + host=kwargs[Jail.modified_jailname_key] + ) super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) @@ -78,6 +80,6 @@ class Connection(Jail): p.wait() if p.returncode != 0: - raise AnsibleError(u"iocage returned an error: {0}".format(stdout)) + raise AnsibleError(f"iocage returned an error: {stdout}") return stdout.strip('\n') diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index 3c820be175..7f25c3fe01 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -1,43 +1,45 @@ -# -*- coding: utf-8 -*- # Based on local.py by Michael DeHaan # and chroot.py by Maykel Moya # Copyright (c) 2013, Michael Scherer # Copyright (c) 2015, Toshio Kuratomi # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Ansible Core Team - name: jail - short_description: Run tasks in jails +DOCUMENTATION = r""" +author: Ansible Core Team +name: jail +short_description: Run tasks in jails +description: + - Run commands or put/fetch files to an existing jail. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing jail - options: - remote_addr: - description: - - Path to the jail - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_jail_host - remote_user: - description: - - User to execute as inside the jail - vars: - - name: ansible_user - - name: ansible_jail_user -''' + - Path to the jail. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_jail_host + remote_user: + description: + - User to execute as inside the jail. + type: string + vars: + - name: ansible_user + - name: ansible_jail_user +""" import os import os.path import subprocess import traceback +from shlex import quote as shlex_quote from ansible.errors import AnsibleError -from ansible.module_utils.six.moves import shlex_quote from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.plugins.connection import ConnectionBase, BUFSIZE @@ -71,14 +73,14 @@ class Connection(ConnectionBase): self.jexec_cmd = self._search_executable('jexec') if self.jail not in self.list_jails(): - raise AnsibleError("incorrect jail name %s" % self.jail) + raise AnsibleError(f"incorrect jail name {self.jail}") @staticmethod def _search_executable(executable): try: return get_bin_path(executable) except ValueError: - raise AnsibleError("%s command not found in PATH" % executable) + raise AnsibleError(f"{executable} command not found in PATH") def list_jails(self): p = subprocess.Popen([self.jls_cmd, '-q', 'name'], @@ -93,7 +95,7 @@ class Connection(ConnectionBase): """ connect to the jail; nothing to do here """ super(Connection, self)._connect() if not self._connected: - display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail) + display.vvv(f"ESTABLISH JAIL CONNECTION FOR USER: {self._play_context.remote_user}", host=self.jail) self._connected = True def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): @@ -111,11 +113,11 @@ class Connection(ConnectionBase): if self._play_context.remote_user is not None: local_cmd += ['-U', self._play_context.remote_user] # update HOME since -U does not update the jail environment - set_env = 'HOME=~' + self._play_context.remote_user + ' ' + set_env = f"HOME=~{self._play_context.remote_user} " local_cmd += [self.jail, self._play_context.executable, '-c', set_env + cmd] - display.vvv("EXEC %s" % (local_cmd,), host=self.jail) + display.vvv(f"EXEC {local_cmd}", host=self.jail) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -140,7 +142,7 @@ class Connection(ConnectionBase): exist in any given chroot. So for now we're choosing "/" instead. This also happens to be the former default. - Can revisit using $HOME instead if it's a problem + Can revisit using $HOME instead if it is a problem """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) @@ -149,7 +151,7 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): """ transfer a file from local to jail """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.jail) out_path = shlex_quote(self._prefix_login_path(out_path)) try: @@ -159,27 +161,27 @@ class Connection(ConnectionBase): else: count = '' try: - p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file) except OSError: raise AnsibleError("jail connection requires dd command in the jail") try: stdout, stderr = p.communicate() except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr))) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}") except IOError: - raise AnsibleError("file or module does not exist at: %s" % in_path) + raise AnsibleError(f"file or module does not exist at: {in_path}") def fetch_file(self, in_path, out_path): """ fetch a file from jail to local """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.jail) in_path = shlex_quote(self._prefix_login_path(in_path)) try: - p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}') except OSError: raise AnsibleError("jail connection requires dd command in the jail") @@ -191,10 +193,10 @@ class Connection(ConnectionBase): chunk = p.stdout.read(BUFSIZE) except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr))) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}") def close(self): """ terminate the connection; nothing to do here """ diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index 2aaf1619dc..e8e28ed804 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -1,33 +1,35 @@ -# -*- coding: utf-8 -*- # (c) 2015, Joerg Thalheim # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Joerg Thalheim (!UNKNOWN) - name: lxc - short_description: Run tasks in lxc containers via lxc python library +DOCUMENTATION = r""" +author: Joerg Thalheim (!UNKNOWN) +name: lxc +short_description: Run tasks in LXC containers using lxc python library +description: + - Run commands or put/fetch files to an existing LXC container using lxc python library. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing lxc container using lxc python library - options: - remote_addr: - description: - - Container identifier - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_lxc_host - executable: - default: /bin/sh - description: - - Shell executable - vars: - - name: ansible_executable - - name: ansible_lxc_executable -''' + - Container identifier. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_lxc_host + executable: + default: /bin/sh + description: + - Shell executable. + type: string + vars: + - name: ansible_executable + - name: ansible_lxc_executable +""" import os import shutil @@ -58,7 +60,7 @@ class Connection(ConnectionBase): def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) - self.container_name = self._play_context.remote_addr + self.container_name = None self.container = None def _connect(self): @@ -66,16 +68,19 @@ class Connection(ConnectionBase): super(Connection, self)._connect() if not HAS_LIBLXC: - msg = "lxc bindings for python2 are not installed" + msg = "lxc python bindings are not installed" raise errors.AnsibleError(msg) - if self.container: + container_name = self.get_option('remote_addr') + if self.container and self.container_name == container_name: return + self.container_name = container_name + self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name) self.container = _lxc.Container(self.container_name) if self.container.state == "STOPPED": - raise errors.AnsibleError("%s is not running" % self.container_name) + raise errors.AnsibleError(f"{self.container_name} is not running") @staticmethod def _communicate(pid, in_data, stdin, stdout, stderr): @@ -116,7 +121,7 @@ class Connection(ConnectionBase): super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) # python2-lxc needs bytes. python3-lxc needs text. - executable = to_native(self._play_context.executable, errors='surrogate_or_strict') + executable = to_native(self.get_option('executable'), errors='surrogate_or_strict') local_cmd = [executable, '-c', to_native(cmd, errors='surrogate_or_strict')] read_stdout, write_stdout = None, None @@ -137,10 +142,10 @@ class Connection(ConnectionBase): read_stdin, write_stdin = os.pipe() kwargs['stdin'] = self._set_nonblocking(read_stdin) - self._display.vvv("EXEC %s" % (local_cmd), host=self.container_name) + self._display.vvv(f"EXEC {local_cmd}", host=self.container_name) pid = self.container.attach(_lxc.attach_run_command, local_cmd, **kwargs) if pid == -1: - msg = "failed to attach to container %s" % self.container_name + msg = f"failed to attach to container {self.container_name}" raise errors.AnsibleError(msg) write_stdout = os.close(write_stdout) @@ -167,18 +172,18 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): ''' transfer a file from local to lxc ''' super(Connection, self).put_file(in_path, out_path) - self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.container_name) + self._display.vvv(f"PUT {in_path} TO {out_path}", host=self.container_name) in_path = to_bytes(in_path, errors='surrogate_or_strict') out_path = to_bytes(out_path, errors='surrogate_or_strict') if not os.path.exists(in_path): - msg = "file or module does not exist: %s" % in_path + msg = f"file or module does not exist: {in_path}" raise errors.AnsibleFileNotFound(msg) try: src_file = open(in_path, "rb") except IOError: traceback.print_exc() - raise errors.AnsibleError("failed to open input file to %s" % in_path) + raise errors.AnsibleError(f"failed to open input file to {in_path}") try: def write_file(args): with open(out_path, 'wb+') as dst_file: @@ -187,7 +192,7 @@ class Connection(ConnectionBase): self.container.attach_wait(write_file, None) except IOError: traceback.print_exc() - msg = "failed to transfer file to %s" % out_path + msg = f"failed to transfer file to {out_path}" raise errors.AnsibleError(msg) finally: src_file.close() @@ -195,7 +200,7 @@ class Connection(ConnectionBase): def fetch_file(self, in_path, out_path): ''' fetch a file from lxc to local ''' super(Connection, self).fetch_file(in_path, out_path) - self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.container_name) + self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self.container_name) in_path = to_bytes(in_path, errors='surrogate_or_strict') out_path = to_bytes(out_path, errors='surrogate_or_strict') @@ -203,7 +208,7 @@ class Connection(ConnectionBase): dst_file = open(out_path, "wb") except IOError: traceback.print_exc() - msg = "failed to open output file %s" % out_path + msg = f"failed to open output file {out_path}" raise errors.AnsibleError(msg) try: def write_file(args): @@ -218,7 +223,7 @@ class Connection(ConnectionBase): self.container.attach_wait(write_file, None) except IOError: traceback.print_exc() - msg = "failed to transfer file from %s to %s" % (in_path, out_path) + msg = f"failed to transfer file from {in_path} to {out_path}" raise errors.AnsibleError(msg) finally: dst_file.close() diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py index f3b06e6e39..d4d3b45d0a 100644 --- a/plugins/connection/lxd.py +++ b/plugins/connection/lxd.py @@ -1,46 +1,77 @@ -# -*- coding: utf-8 -*- -# (c) 2016 Matt Clay -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016 Matt Clay +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Matt Clay (@mattclay) - name: lxd - short_description: Run tasks in lxc containers via lxc CLI +DOCUMENTATION = r""" +author: Matt Clay (@mattclay) +name: lxd +short_description: Run tasks in LXD instances using C(lxc) CLI +description: + - Run commands or put/fetch files to an existing instance using C(lxc) CLI. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing lxc container using lxc CLI - options: - remote_addr: - description: - - Container identifier. - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_lxd_host - executable: - description: - - shell to use for execution inside container - default: /bin/sh - vars: - - name: ansible_executable - - name: ansible_lxd_executable - remote: - description: - - Name of the LXD remote to use. - default: local - vars: - - name: ansible_lxd_remote - version_added: 2.0.0 - project: - description: - - Name of the LXD project to use. - vars: - - name: ansible_lxd_project - version_added: 2.0.0 -''' + - Instance (container/VM) identifier. + - Since community.general 8.0.0, a FQDN can be provided; in that case, the first component (the part before C(.)) is + used as the instance identifier. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_lxd_host + executable: + description: + - Shell to use for execution inside instance. + type: string + default: /bin/sh + vars: + - name: ansible_executable + - name: ansible_lxd_executable + lxd_become_method: + description: + - Become command used to switch to a non-root user. + - Is only used when O(remote_user) is not V(root). + type: str + default: /bin/su + vars: + - name: lxd_become_method + version_added: 10.4.0 + remote: + description: + - Name of the LXD remote to use. + type: string + default: local + vars: + - name: ansible_lxd_remote + version_added: 2.0.0 + remote_user: + description: + - User to login/authenticate as. + - Can be set from the CLI with the C(--user) or C(-u) options. + type: string + default: root + vars: + - name: ansible_user + env: + - name: ANSIBLE_REMOTE_USER + ini: + - section: defaults + key: remote_user + keyword: + - name: remote_user + version_added: 10.4.0 + project: + description: + - Name of the LXD project to use. + type: string + vars: + - name: ansible_lxd_project + version_added: 2.0.0 +""" import os from subprocess import Popen, PIPE @@ -56,43 +87,59 @@ class Connection(ConnectionBase): transport = 'community.general.lxd' has_pipelining = True - default_user = 'root' def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) - self._host = self._play_context.remote_addr try: self._lxc_cmd = get_bin_path("lxc") except ValueError: raise AnsibleError("lxc command not found in PATH") - if self._play_context.remote_user is not None and self._play_context.remote_user != 'root': - self._display.warning('lxd does not support remote_user, using container default: root') + def _host(self): + """ translate remote_addr to lxd (short) hostname """ + return self.get_option("remote_addr").split(".", 1)[0] def _connect(self): """connect to lxd (nothing to do here) """ super(Connection, self)._connect() if not self._connected: - self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self._host) + self._display.vvv(f"ESTABLISH LXD CONNECTION FOR USER: {self.get_option('remote_user')}", host=self._host()) self._connected = True + def _build_command(self, cmd) -> str: + """build the command to execute on the lxd host""" + + exec_cmd = [self._lxc_cmd] + + if self.get_option("project"): + exec_cmd.extend(["--project", self.get_option("project")]) + + exec_cmd.extend(["exec", f"{self.get_option('remote')}:{self._host()}", "--"]) + + if self.get_option("remote_user") != "root": + self._display.vvv( + f"INFO: Running as non-root user: {self.get_option('remote_user')}, \ + trying to run 'lxc exec' with become method: {self.get_option('lxd_become_method')}", + host=self._host(), + ) + exec_cmd.extend( + [self.get_option("lxd_become_method"), self.get_option("remote_user"), "-c"] + ) + + exec_cmd.extend([self.get_option("executable"), "-c", cmd]) + + return exec_cmd + def exec_command(self, cmd, in_data=None, sudoable=True): """ execute a command on the lxd host """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) - self._display.vvv(u"EXEC {0}".format(cmd), host=self._host) + self._display.vvv(f"EXEC {cmd}", host=self._host()) - local_cmd = [self._lxc_cmd] - if self.get_option("project"): - local_cmd.extend(["--project", self.get_option("project")]) - local_cmd.extend([ - "exec", - "%s:%s" % (self.get_option("remote"), self.get_option("remote_addr")), - "--", - self.get_option("executable"), "-c", cmd - ]) + local_cmd = self._build_command(cmd) + self._display.vvvvv(f"EXEC {local_cmd}", host=self._host()) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru') @@ -103,31 +150,73 @@ class Connection(ConnectionBase): stdout = to_text(stdout) stderr = to_text(stderr) - if stderr == "error: Container is not running.\n": - raise AnsibleConnectionFailure("container not running: %s" % self._host) + self._display.vvvvv(f"EXEC lxc output: {stdout} {stderr}", host=self._host()) - if stderr == "error: not found\n": - raise AnsibleConnectionFailure("container not found: %s" % self._host) + if "is not running" in stderr: + raise AnsibleConnectionFailure(f"instance not running: {self._host()}") + + if stderr.strip() == "Error: Instance not found" or stderr.strip() == "error: not found": + raise AnsibleConnectionFailure(f"instance not found: {self._host()}") return process.returncode, stdout, stderr + def _get_remote_uid_gid(self) -> tuple[int, int]: + """Get the user and group ID of 'remote_user' from the instance.""" + + rc, uid_out, err = self.exec_command("/bin/id -u") + if rc != 0: + raise AnsibleError( + f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}" + ) + uid = uid_out.strip() + + rc, gid_out, err = self.exec_command("/bin/id -g") + if rc != 0: + raise AnsibleError( + f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}" + ) + gid = gid_out.strip() + + return int(uid), int(gid) + def put_file(self, in_path, out_path): """ put a file from local to lxd """ super(Connection, self).put_file(in_path, out_path) - self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._host) + self._display.vvv(f"PUT {in_path} TO {out_path}", host=self._host()) if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')): - raise AnsibleFileNotFound("input path is not a file: %s" % in_path) + raise AnsibleFileNotFound(f"input path is not a file: {in_path}") local_cmd = [self._lxc_cmd] if self.get_option("project"): local_cmd.extend(["--project", self.get_option("project")]) - local_cmd.extend([ - "file", "push", - in_path, - "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), out_path) - ]) + + if self.get_option("remote_user") != "root": + uid, gid = self._get_remote_uid_gid() + local_cmd.extend( + [ + "file", + "push", + "--uid", + str(uid), + "--gid", + str(gid), + in_path, + f"{self.get_option('remote')}:{self._host()}/{out_path}", + ] + ) + else: + local_cmd.extend( + [ + "file", + "push", + in_path, + f"{self.get_option('remote')}:{self._host()}/{out_path}", + ] + ) + + self._display.vvvvv(f"PUT {local_cmd}", host=self._host()) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] @@ -138,14 +227,14 @@ class Connection(ConnectionBase): """ fetch a file from lxd to local """ super(Connection, self).fetch_file(in_path, out_path) - self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host) + self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self._host()) local_cmd = [self._lxc_cmd] if self.get_option("project"): local_cmd.extend(["--project", self.get_option("project")]) local_cmd.extend([ "file", "pull", - "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), in_path), + f"{self.get_option('remote')}:{self._host()}/{in_path}", out_path ]) diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index 1de9e10011..8d69594b22 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -1,42 +1,42 @@ -# -*- coding: utf-8 -*- # Based on the buildah connection plugin # Copyright (c) 2017 Ansible Project # 2018 Kushal Das -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # # # Written by: Kushal Das (https://github.com/kushaldas) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: qubes - short_description: Interact with an existing QubesOS AppVM +DOCUMENTATION = r""" +name: qubes +short_description: Interact with an existing QubesOS AppVM +description: + - Run commands or put/fetch files to an existing Qubes AppVM using qubes tools. +author: Kushal Das (@kushaldas) + + +options: + remote_addr: description: - - Run commands or put/fetch files to an existing Qubes AppVM using qubes tools. - - author: Kushal Das (@kushaldas) - - - options: - remote_addr: - description: - - vm name - default: inventory_hostname - vars: - - name: ansible_host - remote_user: - description: - - The user to execute as inside the vm. - default: The *user* account as default in Qubes OS. - vars: - - name: ansible_user + - VM name. + type: string + default: inventory_hostname + vars: + - name: ansible_host + remote_user: + description: + - The user to execute as inside the VM. + type: string + default: The I(user) account as default in Qubes OS. + vars: + - name: ansible_user # keyword: # - name: hosts -''' +""" import subprocess @@ -75,7 +75,7 @@ class Connection(ConnectionBase): """ display.vvvv("CMD: ", cmd) if not cmd.endswith("\n"): - cmd = cmd + "\n" + cmd = f"{cmd}\n" local_cmd = [] # For dom0 @@ -92,7 +92,7 @@ class Connection(ConnectionBase): display.vvvv("Local cmd: ", local_cmd) - display.vvv("RUN %s" % (local_cmd,), host=self._remote_vmname) + display.vvv(f"RUN {local_cmd}", host=self._remote_vmname) p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -111,42 +111,42 @@ class Connection(ConnectionBase): """Run specified command in a running QubesVM """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) - display.vvvv("CMD IS: %s" % cmd) + display.vvvv(f"CMD IS: {cmd}") rc, stdout, stderr = self._qubes(cmd) - display.vvvvv("STDOUT %r STDERR %r" % (stderr, stderr)) + display.vvvvv(f"STDOUT {stdout!r} STDERR {stderr!r}") return rc, stdout, stderr def put_file(self, in_path, out_path): """ Place a local file located in 'in_path' inside VM at 'out_path' """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._remote_vmname) + display.vvv(f"PUT {in_path} TO {out_path}", host=self._remote_vmname) with open(in_path, "rb") as fobj: source_data = fobj.read() - retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data, "qubes.VMRootShell") + retcode, dummy, dummy = self._qubes(f'cat > "{out_path}\"\n', source_data, "qubes.VMRootShell") # if qubes.VMRootShell service not supported, fallback to qubes.VMShell and # hope it will have appropriate permissions if retcode == 127: - retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data) + retcode, dummy, dummy = self._qubes(f'cat > "{out_path}\"\n', source_data) if retcode != 0: - raise AnsibleConnectionFailure('Failed to put_file to {0}'.format(out_path)) + raise AnsibleConnectionFailure(f'Failed to put_file to {out_path}') def fetch_file(self, in_path, out_path): """Obtain file specified via 'in_path' from the container and place it at 'out_path' """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._remote_vmname) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self._remote_vmname) # We are running in dom0 - cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, "cat {0}".format(in_path)] + cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, f"cat {in_path}"] with open(out_path, "wb") as fobj: p = subprocess.Popen(cmd_args_list, shell=False, stdout=fobj) p.communicate() if p.returncode != 0: - raise AnsibleConnectionFailure('Failed to fetch file to {0}'.format(out_path)) + raise AnsibleConnectionFailure(f'Failed to fetch file to {out_path}') def close(self): """ Closing the connection """ diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py index 95870ad2d0..b09ffcd787 100644 --- a/plugins/connection/saltstack.py +++ b/plugins/connection/saltstack.py @@ -1,21 +1,20 @@ -# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # Based on chroot.py (c) 2013, Maykel Moya # Based on func.py -# (c) 2014, Michael Scherer -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Michael Scherer +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Michael Scherer (@mscherer) - name: saltstack - short_description: Allow ansible to piggyback on salt minions - description: - - This allows you to use existing Saltstack infrastructure to connect to targets. -''' +DOCUMENTATION = r""" +author: Michael Scherer (@mscherer) +name: saltstack +short_description: Allow ansible to piggyback on salt minions +description: + - This allows you to use existing Saltstack infrastructure to connect to targets. +""" import os import base64 @@ -58,11 +57,11 @@ class Connection(ConnectionBase): if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - self._display.vvv("EXEC %s" % cmd, host=self.host) + self._display.vvv(f"EXEC {cmd}", host=self.host) # need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077 - res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd]) + res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', f"true;{cmd}"]) if self.host not in res: - raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host) + raise errors.AnsibleError(f"Minion {self.host} didn't answer, check if salt-minion is running and the name is correct") p = res[self.host] return p['retcode'], p['stdout'], p['stderr'] @@ -80,7 +79,7 @@ class Connection(ConnectionBase): super(Connection, self).put_file(in_path, out_path) out_path = self._normalize_path(out_path, '/') - self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) + self._display.vvv(f"PUT {in_path} TO {out_path}", host=self.host) with open(in_path, 'rb') as in_fh: content = in_fh.read() self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path]) @@ -92,7 +91,7 @@ class Connection(ConnectionBase): super(Connection, self).fetch_file(in_path, out_path) in_path = self._normalize_path(in_path, '/') - self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) + self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self.host) content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host] open(out_path, 'wb').write(content) diff --git a/plugins/connection/wsl.py b/plugins/connection/wsl.py new file mode 100644 index 0000000000..3b768eebf8 --- /dev/null +++ b/plugins/connection/wsl.py @@ -0,0 +1,790 @@ +# Derived from ansible/plugins/connection/proxmox_pct_remote.py (c) 2024 Nils Stein (@mietzen) +# Derived from ansible/plugins/connection/paramiko_ssh.py (c) 2012, Michael DeHaan +# Copyright (c) 2025 Rui Lopes (@rgl) +# Copyright (c) 2025 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: Rui Lopes (@rgl) +name: wsl +short_description: Run tasks in WSL distribution using wsl.exe CLI using SSH +requirements: + - paramiko +description: + - Run commands or put/fetch files to an existing WSL distribution using wsl.exe CLI using SSH. + - Uses the Python SSH implementation (Paramiko) to connect to the WSL host. +version_added: "10.6.0" +options: + remote_addr: + description: + - Address of the remote target. + default: inventory_hostname + type: string + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_ssh_host + - name: ansible_paramiko_host + port: + description: Remote port to connect to. + type: int + default: 22 + ini: + - section: defaults + key: remote_port + - section: paramiko_connection + key: remote_port + env: + - name: ANSIBLE_REMOTE_PORT + - name: ANSIBLE_REMOTE_PARAMIKO_PORT + vars: + - name: ansible_port + - name: ansible_ssh_port + - name: ansible_paramiko_port + keyword: + - name: port + remote_user: + description: + - User to login/authenticate as. + - Can be set from the CLI with the C(--user) or C(-u) options. + type: string + vars: + - name: ansible_user + - name: ansible_ssh_user + - name: ansible_paramiko_user + env: + - name: ANSIBLE_REMOTE_USER + - name: ANSIBLE_PARAMIKO_REMOTE_USER + ini: + - section: defaults + key: remote_user + - section: paramiko_connection + key: remote_user + keyword: + - name: remote_user + password: + description: + - Secret used to either login the SSH server or as a passphrase for SSH keys that require it. + - Can be set from the CLI with the C(--ask-pass) option. + type: string + vars: + - name: ansible_password + - name: ansible_ssh_pass + - name: ansible_ssh_password + - name: ansible_paramiko_pass + - name: ansible_paramiko_password + use_rsa_sha2_algorithms: + description: + - Whether or not to enable RSA SHA2 algorithms for pubkeys and hostkeys. + - On paramiko versions older than 2.9, this only affects hostkeys. + - For behavior matching paramiko<2.9 set this to V(false). + vars: + - name: ansible_paramiko_use_rsa_sha2_algorithms + ini: + - {key: use_rsa_sha2_algorithms, section: paramiko_connection} + env: + - {name: ANSIBLE_PARAMIKO_USE_RSA_SHA2_ALGORITHMS} + default: true + type: boolean + host_key_auto_add: + description: "Automatically add host keys to C(~/.ssh/known_hosts)." + env: + - name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD + ini: + - key: host_key_auto_add + section: paramiko_connection + type: boolean + look_for_keys: + default: true + description: "Set to V(false) to disable searching for private key files in C(~/.ssh/)." + env: + - name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS + ini: + - {key: look_for_keys, section: paramiko_connection} + type: boolean + proxy_command: + default: "" + description: + - Proxy information for running the connection through a jumphost. + - This option is supported by paramiko version 1.9.0 or newer. + type: string + env: + - name: ANSIBLE_PARAMIKO_PROXY_COMMAND + ini: + - {key: proxy_command, section: paramiko_connection} + vars: + - name: ansible_paramiko_proxy_command + record_host_keys: + default: true + description: "Save the host keys to a file." + env: + - name: ANSIBLE_PARAMIKO_RECORD_HOST_KEYS + ini: + - section: paramiko_connection + key: record_host_keys + type: boolean + host_key_checking: + description: "Set this to V(false) if you want to avoid host key checking by the underlying tools Ansible uses to connect + to the host." + type: boolean + default: true + env: + - name: ANSIBLE_HOST_KEY_CHECKING + - name: ANSIBLE_SSH_HOST_KEY_CHECKING + - name: ANSIBLE_PARAMIKO_HOST_KEY_CHECKING + ini: + - section: defaults + key: host_key_checking + - section: paramiko_connection + key: host_key_checking + vars: + - name: ansible_host_key_checking + - name: ansible_ssh_host_key_checking + - name: ansible_paramiko_host_key_checking + use_persistent_connections: + description: "Toggles the use of persistence for connections." + type: boolean + default: false + env: + - name: ANSIBLE_USE_PERSISTENT_CONNECTIONS + ini: + - section: defaults + key: use_persistent_connections + banner_timeout: + type: float + default: 30 + description: + - Configures, in seconds, the amount of time to wait for the SSH banner to be presented. + - This option is supported by paramiko version 1.15.0 or newer. + ini: + - section: paramiko_connection + key: banner_timeout + env: + - name: ANSIBLE_PARAMIKO_BANNER_TIMEOUT + timeout: + type: int + default: 10 + description: + - Number of seconds until the plugin gives up on failing to establish a TCP connection. + - This option is supported by paramiko version 2.2.0 or newer. + ini: + - section: defaults + key: timeout + - section: ssh_connection + key: timeout + - section: paramiko_connection + key: timeout + env: + - name: ANSIBLE_TIMEOUT + - name: ANSIBLE_SSH_TIMEOUT + - name: ANSIBLE_PARAMIKO_TIMEOUT + vars: + - name: ansible_ssh_timeout + - name: ansible_paramiko_timeout + cli: + - name: timeout + lock_file_timeout: + type: int + default: 60 + description: Number of seconds until the plugin gives up on trying to write a lock file when writing SSH known host keys. + vars: + - name: ansible_lock_file_timeout + env: + - name: ANSIBLE_LOCK_FILE_TIMEOUT + private_key_file: + description: + - Path to private key file to use for authentication. + type: path + ini: + - section: defaults + key: private_key_file + - section: paramiko_connection + key: private_key_file + env: + - name: ANSIBLE_PRIVATE_KEY_FILE + - name: ANSIBLE_PARAMIKO_PRIVATE_KEY_FILE + vars: + - name: ansible_private_key_file + - name: ansible_ssh_private_key_file + - name: ansible_paramiko_private_key_file + cli: + - name: private_key_file + option: "--private-key" + user_known_hosts_file: + description: + - Path to the user known hosts file. + - Used to verify the ssh hosts keys. + type: path + default: ~/.ssh/known_hosts + ini: + - section: paramiko_connection + key: user_known_hosts_file + vars: + - name: ansible_paramiko_user_known_hosts_file + wsl_distribution: + description: + - WSL distribution name. + type: string + required: true + vars: + - name: wsl_distribution + wsl_user: + description: + - WSL distribution user. + type: string + vars: + - name: wsl_user + become_user: + description: + - WSL distribution user. + type: string + default: root + vars: + - name: become_user + - name: ansible_become_user + become: + description: + - Whether to use the user defined by O(become_user). + type: bool + default: false + vars: + - name: become + - name: ansible_become +""" + +EXAMPLES = r""" +# ------------------------ +# Inventory: inventory.yml +# ------------------------ +--- +all: + children: + wsl: + hosts: + example-wsl-ubuntu: + ansible_host: 10.0.0.10 + wsl_distribution: ubuntu + wsl_user: ubuntu + vars: + ansible_connection: community.general.wsl + ansible_user: vagrant +# ---------------------- +# Playbook: playbook.yml +# ---------------------- +--- +- name: WSL Example + hosts: wsl + gather_facts: true + become: true + tasks: + - name: Ping + ansible.builtin.ping: + - name: Id (with become false) + become: false + changed_when: false + args: + executable: /bin/bash + ansible.builtin.shell: | + exec 2>&1 + set -x + echo "$0" + pwd + id + - name: Id (with become true) + changed_when: false + args: + executable: /bin/bash + ansible.builtin.shell: | + exec 2>&1 + set -x + echo "$0" + pwd + id + - name: Reboot + ansible.builtin.reboot: + boot_time_command: systemctl show -p ActiveEnterTimestamp init.scope +""" + +import io +import os +import pathlib +import shlex +import socket +import tempfile +import traceback +import typing as t + +from ansible.errors import ( + AnsibleAuthenticationFailure, + AnsibleConnectionFailure, + AnsibleError, +) +from ansible_collections.community.general.plugins.module_utils._filelock import FileLock, LockTimeout +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text +from ansible.playbook.play_context import PlayContext +from ansible.plugins.connection import ConnectionBase +from ansible.utils.display import Display +from ansible.utils.path import makedirs_safe +from binascii import hexlify +from subprocess import list2cmdline + +try: + import paramiko + PARAMIKO_IMPORT_ERR = None +except ImportError: + paramiko = None + PARAMIKO_IMPORT_ERR = traceback.format_exc() + + +if t.TYPE_CHECKING and PARAMIKO_IMPORT_ERR is None: + from paramiko import MissingHostKeyPolicy + from paramiko.client import SSHClient + from paramiko.pkey import PKey +else: + MissingHostKeyPolicy: type = object + SSHClient: type = object + PKey: type = object + + +display = Display() + + +def authenticity_msg(hostname: str, ktype: str, fingerprint: str) -> str: + msg = f""" + paramiko: The authenticity of host '{hostname}' can't be established. + The {ktype} key fingerprint is {fingerprint}. + Are you sure you want to continue connecting (yes/no)? + """ + return msg + + +class MyAddPolicy(MissingHostKeyPolicy): + """ + Based on AutoAddPolicy in paramiko so we can determine when keys are added + + and also prompt for input. + + Policy for automatically adding the hostname and new host key to the + local L{HostKeys} object, and saving it. This is used by L{SSHClient}. + """ + + def __init__(self, connection: Connection) -> None: + self.connection = connection + self._options = connection._options + + def missing_host_key(self, client: SSHClient, hostname: str, key: PKey) -> None: + + if all((self.connection.get_option('host_key_checking'), not self.connection.get_option('host_key_auto_add'))): + + fingerprint = hexlify(key.get_fingerprint()) + ktype = key.get_name() + + if self.connection.get_option('use_persistent_connections') or self.connection.force_persistence: + # don't print the prompt string since the user cannot respond + # to the question anyway + raise AnsibleError(authenticity_msg(hostname, ktype, fingerprint)[1:92]) + + inp = to_text( + display.prompt_until(authenticity_msg(hostname, ktype, fingerprint), private=False), + errors='surrogate_or_strict' + ) + + if inp.lower() not in ['yes', 'y', '']: + raise AnsibleError('host connection rejected by user') + + key._added_by_ansible_this_time = True + + # existing implementation below: + client._host_keys.add(hostname, key.get_name(), key) + + # host keys are actually saved in close() function below + # in order to control ordering. + + +class Connection(ConnectionBase): + """ SSH based connections (paramiko) to WSL """ + + transport = 'community.general.wsl' + _log_channel: str | None = None + + def __init__(self, play_context: PlayContext, new_stdin: io.TextIOWrapper | None = None, *args: t.Any, **kwargs: t.Any): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + def _set_log_channel(self, name: str) -> None: + """ Mimic paramiko.SSHClient.set_log_channel """ + self._log_channel = name + + def _parse_proxy_command(self, port: int = 22) -> dict[str, t.Any]: + proxy_command = self.get_option('proxy_command') or None + + sock_kwarg = {} + if proxy_command: + replacers: t.Dict[str, str] = { + '%h': self.get_option('remote_addr'), + '%p': str(port), + '%r': self.get_option('remote_user') + } + for find, replace in replacers.items(): + proxy_command = proxy_command.replace(find, replace) + try: + sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)} + display.vvv(f'CONFIGURE PROXY COMMAND FOR CONNECTION: {proxy_command}', host=self.get_option('remote_addr')) + except AttributeError: + display.warning('Paramiko ProxyCommand support unavailable. ' + 'Please upgrade to Paramiko 1.9.0 or newer. ' + 'Not using configured ProxyCommand') + + return sock_kwarg + + def _connect(self) -> Connection: + """ activates the connection object """ + + if PARAMIKO_IMPORT_ERR is not None: + raise AnsibleError(f'paramiko is not installed: {to_native(PARAMIKO_IMPORT_ERR)}') + + port = self.get_option('port') + display.vvv(f'ESTABLISH PARAMIKO SSH CONNECTION FOR USER: {self.get_option("remote_user")} on PORT {to_text(port)} TO {self.get_option("remote_addr")}', + host=self.get_option('remote_addr')) + + ssh = paramiko.SSHClient() + + # Set pubkey and hostkey algorithms to disable, the only manipulation allowed currently + # is keeping or omitting rsa-sha2 algorithms + # default_keys: t.Tuple[str] = () + paramiko_preferred_pubkeys = getattr(paramiko.Transport, '_preferred_pubkeys', ()) + paramiko_preferred_hostkeys = getattr(paramiko.Transport, '_preferred_keys', ()) + use_rsa_sha2_algorithms = self.get_option('use_rsa_sha2_algorithms') + disabled_algorithms: t.Dict[str, t.Iterable[str]] = {} + if not use_rsa_sha2_algorithms: + if paramiko_preferred_pubkeys: + disabled_algorithms['pubkeys'] = tuple(a for a in paramiko_preferred_pubkeys if 'rsa-sha2' in a) + if paramiko_preferred_hostkeys: + disabled_algorithms['keys'] = tuple(a for a in paramiko_preferred_hostkeys if 'rsa-sha2' in a) + + # override paramiko's default logger name + if self._log_channel is not None: + ssh.set_log_channel(self._log_channel) + + self.keyfile = os.path.expanduser(self.get_option('user_known_hosts_file')) + + if self.get_option('host_key_checking'): + for ssh_known_hosts in ('/etc/ssh/ssh_known_hosts', '/etc/openssh/ssh_known_hosts', self.keyfile): + try: + ssh.load_system_host_keys(ssh_known_hosts) + break + except IOError: + pass # file was not found, but not required to function + except paramiko.hostkeys.InvalidHostKey as e: + raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}') + try: + ssh.load_system_host_keys() + except paramiko.hostkeys.InvalidHostKey as e: + raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}') + + ssh_connect_kwargs = self._parse_proxy_command(port) + ssh.set_missing_host_key_policy(MyAddPolicy(self)) + conn_password = self.get_option('password') + allow_agent = True + + if conn_password is not None: + allow_agent = False + + try: + key_filename = None + if self.get_option('private_key_file'): + key_filename = os.path.expanduser(self.get_option('private_key_file')) + + # paramiko 2.2 introduced auth_timeout parameter + if LooseVersion(paramiko.__version__) >= LooseVersion('2.2.0'): + ssh_connect_kwargs['auth_timeout'] = self.get_option('timeout') + + # paramiko 1.15 introduced banner timeout parameter + if LooseVersion(paramiko.__version__) >= LooseVersion('1.15.0'): + ssh_connect_kwargs['banner_timeout'] = self.get_option('banner_timeout') + + ssh.connect( + self.get_option('remote_addr').lower(), + username=self.get_option('remote_user'), + allow_agent=allow_agent, + look_for_keys=self.get_option('look_for_keys'), + key_filename=key_filename, + password=conn_password, + timeout=self.get_option('timeout'), + port=port, + disabled_algorithms=disabled_algorithms, + **ssh_connect_kwargs, + ) + except paramiko.ssh_exception.BadHostKeyException as e: + raise AnsibleConnectionFailure(f'host key mismatch for {to_text(e.hostname)}') + except paramiko.ssh_exception.AuthenticationException as e: + msg = f'Failed to authenticate: {e}' + raise AnsibleAuthenticationFailure(msg) + except Exception as e: + msg = to_text(e) + if 'PID check failed' in msg: + raise AnsibleError('paramiko version issue, please upgrade paramiko on the machine running ansible') + elif 'Private key file is encrypted' in msg: + msg = ( + f'ssh {self.get_option("remote_user")}@{self.get_options("remote_addr")}:{port} : ' + f'{msg}\nTo connect as a different user, use -u .' + ) + raise AnsibleConnectionFailure(msg) + else: + raise AnsibleConnectionFailure(msg) + self.ssh = ssh + self._connected = True + return self + + def _any_keys_added(self) -> bool: + for hostname, keys in self.ssh._host_keys.items(): + for keytype, key in keys.items(): + added_this_time = getattr(key, '_added_by_ansible_this_time', False) + if added_this_time: + return True + return False + + def _save_ssh_host_keys(self, filename: str) -> None: + """ + not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks + don't complain about it :) + """ + + if not self._any_keys_added(): + return + + path = os.path.expanduser('~/.ssh') + makedirs_safe(path) + + with open(filename, 'w') as f: + for hostname, keys in self.ssh._host_keys.items(): + for keytype, key in keys.items(): + # was f.write + added_this_time = getattr(key, '_added_by_ansible_this_time', False) + if not added_this_time: + f.write(f'{hostname} {keytype} {key.get_base64()}\n') + + for hostname, keys in self.ssh._host_keys.items(): + for keytype, key in keys.items(): + added_this_time = getattr(key, '_added_by_ansible_this_time', False) + if added_this_time: + f.write(f'{hostname} {keytype} {key.get_base64()}\n') + + def _build_wsl_command(self, cmd: str) -> str: + wsl_distribution = self.get_option('wsl_distribution') + become = self.get_option('become') + become_user = self.get_option('become_user') + if become and become_user: + wsl_user = become_user + else: + wsl_user = self.get_option('wsl_user') + args = ['wsl.exe', '--distribution', wsl_distribution] + if wsl_user: + args.extend(['--user', wsl_user]) + args.extend(['--']) + args.extend(shlex.split(cmd)) + if os.getenv('_ANSIBLE_TEST_WSL_CONNECTION_PLUGIN_Waeri5tepheeSha2fae8'): + return shlex.join(args) + return list2cmdline(args) # see https://github.com/python/cpython/blob/3.11/Lib/subprocess.py#L576 + + def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]: + """ run a command on inside a WSL distribution """ + + cmd = self._build_wsl_command(cmd) + + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + bufsize = 4096 + + try: + self.ssh.get_transport().set_keepalive(5) + chan = self.ssh.get_transport().open_session() + except Exception as e: + text_e = to_text(e) + msg = 'Failed to open session' + if text_e: + msg += f': {text_e}' + raise AnsibleConnectionFailure(to_native(msg)) + + display.vvv(f'EXEC {cmd}', host=self.get_option('remote_addr')) + + cmd = to_bytes(cmd, errors='surrogate_or_strict') + + no_prompt_out = b'' + no_prompt_err = b'' + become_output = b'' + + try: + chan.exec_command(cmd) + if self.become and self.become.expect_prompt(): + password_prompt = False + become_success = False + while not (become_success or password_prompt): + display.debug('Waiting for Privilege Escalation input') + + chunk = chan.recv(bufsize) + display.debug(f'chunk is: {to_text(chunk)}') + if not chunk: + if b'unknown user' in become_output: + n_become_user = to_native(self.become.get_option('become_user')) + raise AnsibleError(f'user {n_become_user} does not exist') + else: + break + # raise AnsibleError('ssh connection closed waiting for password prompt') + become_output += chunk + + # need to check every line because we might get lectured + # and we might get the middle of a line in a chunk + for line in become_output.splitlines(True): + if self.become.check_success(line): + become_success = True + break + elif self.become.check_password_prompt(line): + password_prompt = True + break + + if password_prompt: + if self.become: + become_pass = self.become.get_option('become_pass') + chan.sendall(to_bytes(f"{become_pass}\n", errors='surrogate_or_strict')) + else: + raise AnsibleError('A password is required but none was supplied') + else: + no_prompt_out += become_output + no_prompt_err += become_output + + if in_data: + for i in range(0, len(in_data), bufsize): + chan.send(in_data[i:i + bufsize]) + chan.shutdown_write() + elif in_data == b'': + chan.shutdown_write() + + except socket.timeout: + raise AnsibleError(f'ssh timed out waiting for privilege escalation.\n{to_text(become_output)}') + + stdout = b''.join(chan.makefile('rb', bufsize)) + stderr = b''.join(chan.makefile_stderr('rb', bufsize)) + returncode = chan.recv_exit_status() + + # NB the full english error message is: + # 'wsl.exe' is not recognized as an internal or external command, + # operable program or batch file. + if "'wsl.exe' is not recognized" in stderr.decode('utf-8'): + raise AnsibleError( + f'wsl.exe not found in path of host: {to_text(self.get_option("remote_addr"))}') + + return (returncode, no_prompt_out + stdout, no_prompt_out + stderr) + + def put_file(self, in_path: str, out_path: str) -> None: + """ transfer a file from local to remote """ + + display.vvv(f'PUT {in_path} TO {out_path}', host=self.get_option('remote_addr')) + try: + with open(in_path, 'rb') as f: + data = f.read() + returncode, stdout, stderr = self.exec_command( + f"{self._shell.executable} -c {self._shell.quote(f'cat > {out_path}')}", + in_data=data, + sudoable=False) + if returncode != 0: + if 'cat: not found' in stderr.decode('utf-8'): + raise AnsibleError( + f'cat not found in path of WSL distribution: {to_text(self.get_option("wsl_distribution"))}') + raise AnsibleError( + f'{to_text(stdout)}\n{to_text(stderr)}') + except Exception as e: + raise AnsibleError( + f'error occurred while putting file from {in_path} to {out_path}!\n{to_text(e)}') + + def fetch_file(self, in_path: str, out_path: str) -> None: + """ save a remote file to the specified path """ + + display.vvv(f'FETCH {in_path} TO {out_path}', host=self.get_option('remote_addr')) + try: + returncode, stdout, stderr = self.exec_command( + f"{self._shell.executable} -c {self._shell.quote(f'cat {in_path}')}", + sudoable=False) + if returncode != 0: + if 'cat: not found' in stderr.decode('utf-8'): + raise AnsibleError( + f'cat not found in path of WSL distribution: {to_text(self.get_option("wsl_distribution"))}') + raise AnsibleError( + f'{to_text(stdout)}\n{to_text(stderr)}') + with open(out_path, 'wb') as f: + f.write(stdout) + except Exception as e: + raise AnsibleError( + f'error occurred while fetching file from {in_path} to {out_path}!\n{to_text(e)}') + + def reset(self) -> None: + """ reset the connection """ + + if not self._connected: + return + self.close() + self._connect() + + def close(self) -> None: + """ terminate the connection """ + + if self.get_option('host_key_checking') and self.get_option('record_host_keys') and self._any_keys_added(): + # add any new SSH host keys -- warning -- this could be slow + # (This doesn't acquire the connection lock because it needs + # to exclude only other known_hosts writers, not connections + # that are starting up.) + lockfile = os.path.basename(self.keyfile) + dirname = os.path.dirname(self.keyfile) + makedirs_safe(dirname) + tmp_keyfile_name = None + try: + with FileLock().lock_file(lockfile, dirname, self.get_option('lock_file_timeout')): + # just in case any were added recently + + self.ssh.load_system_host_keys() + self.ssh._host_keys.update(self.ssh._system_host_keys) + + # gather information about the current key file, so + # we can ensure the new file has the correct mode/owner + + key_dir = os.path.dirname(self.keyfile) + if os.path.exists(self.keyfile): + key_stat = os.stat(self.keyfile) + mode = key_stat.st_mode & 0o777 + uid = key_stat.st_uid + gid = key_stat.st_gid + else: + mode = 0o644 + uid = os.getuid() + gid = os.getgid() + + # Save the new keys to a temporary file and move it into place + # rather than rewriting the file. We set delete=False because + # the file will be moved into place rather than cleaned up. + + with tempfile.NamedTemporaryFile(dir=key_dir, delete=False) as tmp_keyfile: + tmp_keyfile_name = tmp_keyfile.name + os.chmod(tmp_keyfile_name, mode) + os.chown(tmp_keyfile_name, uid, gid) + self._save_ssh_host_keys(tmp_keyfile_name) + + os.rename(tmp_keyfile_name, self.keyfile) + except LockTimeout: + raise AnsibleError( + f'writing lock file for {self.keyfile} ran in to the timeout of {self.get_option("lock_file_timeout")}s') + except paramiko.hostkeys.InvalidHostKey as e: + raise AnsibleConnectionFailure(f'Invalid host key: {e.line}') + except Exception as e: + # unable to save keys, including scenario when key was invalid + # and caught earlier + raise AnsibleError( + f'error occurred while writing SSH host keys!\n{to_text(e)}') + finally: + if tmp_keyfile_name is not None: + pathlib.Path(tmp_keyfile_name).unlink(missing_ok=True) + + self.ssh.close() + self._connected = False diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index d7e127ca38..49b3188f44 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -1,38 +1,38 @@ -# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # and chroot.py (c) 2013, Maykel Moya # and jail.py (c) 2013, Michael Scherer # (c) 2015, Dagobert Michelsen # (c) 2015, Toshio Kuratomi # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Ansible Core Team - name: zone - short_description: Run tasks in a zone instance +DOCUMENTATION = r""" +author: Ansible Core Team +name: zone +short_description: Run tasks in a zone instance +description: + - Run commands or put/fetch files to an existing zone. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing zone - options: - remote_addr: - description: - - Zone identifier - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_zone_host -''' + - Zone identifier. + type: string + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_zone_host +""" import os import os.path import subprocess import traceback +from shlex import quote as shlex_quote from ansible.errors import AnsibleError -from ansible.module_utils.six.moves import shlex_quote from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.text.converters import to_bytes from ansible.plugins.connection import ConnectionBase, BUFSIZE @@ -60,14 +60,14 @@ class Connection(ConnectionBase): self.zlogin_cmd = to_bytes(self._search_executable('zlogin')) if self.zone not in self.list_zones(): - raise AnsibleError("incorrect zone name %s" % self.zone) + raise AnsibleError(f"incorrect zone name {self.zone}") @staticmethod def _search_executable(executable): try: return get_bin_path(executable) except ValueError: - raise AnsibleError("%s command not found in PATH" % executable) + raise AnsibleError(f"{executable} command not found in PATH") def list_zones(self): process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'], @@ -92,7 +92,7 @@ class Connection(ConnectionBase): # stdout, stderr = p.communicate() path = process.stdout.readlines()[0].split(':')[3] - return path + '/root' + return f"{path}/root" def _connect(self): """ connect to the zone; nothing to do here """ @@ -115,7 +115,7 @@ class Connection(ConnectionBase): local_cmd = [self.zlogin_cmd, self.zone, cmd] local_cmd = map(to_bytes, local_cmd) - display.vvv("EXEC %s" % (local_cmd), host=self.zone) + display.vvv(f"EXEC {local_cmd}", host=self.zone) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -138,7 +138,7 @@ class Connection(ConnectionBase): exist in any given chroot. So for now we're choosing "/" instead. This also happens to be the former default. - Can revisit using $HOME instead if it's a problem + Can revisit using $HOME instead if it is a problem """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) @@ -147,7 +147,7 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): """ transfer a file from local to zone """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.zone) out_path = shlex_quote(self._prefix_login_path(out_path)) try: @@ -157,27 +157,27 @@ class Connection(ConnectionBase): else: count = '' try: - p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file) except OSError: raise AnsibleError("jail connection requires dd command in the jail") try: stdout, stderr = p.communicate() except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") except IOError: - raise AnsibleError("file or module does not exist at: %s" % in_path) + raise AnsibleError(f"file or module does not exist at: {in_path}") def fetch_file(self, in_path, out_path): """ fetch a file from zone to local """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.zone) in_path = shlex_quote(self._prefix_login_path(in_path)) try: - p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}') except OSError: raise AnsibleError("zone connection requires dd command in the zone") @@ -189,10 +189,10 @@ class Connection(ConnectionBase): chunk = p.stdout.read(BUFSIZE) except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") def close(self): """ terminate the connection; nothing to do here """ diff --git a/plugins/doc_fragments/alicloud.py b/plugins/doc_fragments/alicloud.py index f9c9640b61..f0083c9946 100644 --- a/plugins/doc_fragments/alicloud.py +++ b/plugins/doc_fragments/alicloud.py @@ -1,108 +1,97 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Alicloud only documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: alicloud_access_key: description: - - Alibaba Cloud access key. If not set then the value of environment variable C(ALICLOUD_ACCESS_KEY), - C(ALICLOUD_ACCESS_KEY_ID) will be used instead. + - Alibaba Cloud access key. If not set then the value of environment variable E(ALICLOUD_ACCESS_KEY), E(ALICLOUD_ACCESS_KEY_ID) + is used instead. aliases: ['access_key_id', 'access_key'] type: str alicloud_secret_key: description: - - Alibaba Cloud secret key. If not set then the value of environment variable C(ALICLOUD_SECRET_KEY), - C(ALICLOUD_SECRET_ACCESS_KEY) will be used instead. + - Alibaba Cloud secret key. If not set then the value of environment variable E(ALICLOUD_SECRET_KEY), E(ALICLOUD_SECRET_ACCESS_KEY) + is used instead. aliases: ['secret_access_key', 'secret_key'] type: str alicloud_region: description: - - The Alibaba Cloud region to use. If not specified then the value of environment variable - C(ALICLOUD_REGION), C(ALICLOUD_REGION_ID) will be used instead. + - The Alibaba Cloud region to use. If not specified then the value of environment variable E(ALICLOUD_REGION), E(ALICLOUD_REGION_ID) + is used instead. aliases: ['region', 'region_id'] required: true type: str alicloud_security_token: description: - - The Alibaba Cloud security token. If not specified then the value of environment variable - C(ALICLOUD_SECURITY_TOKEN) will be used instead. + - The Alibaba Cloud security token. If not specified then the value of environment variable E(ALICLOUD_SECURITY_TOKEN) + is used instead. aliases: ['security_token'] type: str alicloud_assume_role: description: - - If provided with a role ARN, Ansible will attempt to assume this role using the supplied credentials. - - The nested assume_role block supports I(alicloud_assume_role_arn), I(alicloud_assume_role_session_name), - I(alicloud_assume_role_session_expiration) and I(alicloud_assume_role_policy) + - If provided with a role ARN, Ansible attempts to assume this role using the supplied credentials. + - The nested assume_role block supports C(alicloud_assume_role_arn), C(alicloud_assume_role_session_name), C(alicloud_assume_role_session_expiration) + and C(alicloud_assume_role_policy). type: dict aliases: ['assume_role'] alicloud_assume_role_arn: description: - - The Alibaba Cloud role_arn. The ARN of the role to assume. If ARN is set to an empty string, - it does not perform role switching. It supports environment variable ALICLOUD_ASSUME_ROLE_ARN. - ansible will execute with provided credentials. + - The Alibaba Cloud C(role_arn). The ARN of the role to assume. If ARN is set to an empty string, it does not perform + role switching. It supports environment variable E(ALICLOUD_ASSUME_ROLE_ARN). Ansible executes with provided credentials. aliases: ['assume_role_arn'] type: str alicloud_assume_role_session_name: description: - - The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted, - 'ansible' is passed to the AssumeRole call as session name. It supports environment variable - ALICLOUD_ASSUME_ROLE_SESSION_NAME + - The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted, 'ansible' is passed to + the AssumeRole call as session name. It supports environment variable E(ALICLOUD_ASSUME_ROLE_SESSION_NAME). aliases: ['assume_role_session_name'] type: str alicloud_assume_role_session_expiration: description: - - The Alibaba Cloud session_expiration. The time after which the established session for assuming - role expires. Valid value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default - value). It supports environment variable ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION + - The Alibaba Cloud C(session_expiration). The time after which the established session for assuming role expires. Valid + value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default value). It supports environment + variable E(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION). aliases: ['assume_role_session_expiration'] type: int ecs_role_name: description: - - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' - section of the Alibaba Cloud console. - - If you're running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible will just access the - metadata U(http://100.100.100.200/latest/meta-data/ram/security-credentials/) to obtain the STS - credential. This is a preferred approach over any other when running in ECS as you can avoid hard coding - credentials. Instead these are leased on-the-fly by Ansible which reduces the chance of leakage. + - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' section + of the Alibaba Cloud console. + - If you are running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible just accesses the metadata + U(http://100.100.100.200/latest/meta-data/ram/security-credentials/) to obtain the STS credential. + This is a preferred approach over any other when running in ECS as you can avoid hard coding credentials. Instead + these are leased on-the-fly by Ansible which reduces the chance of leakage. aliases: ['role_name'] type: str profile: description: - - This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the - ALICLOUD_PROFILE environment variable. + - This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the E(ALICLOUD_PROFILE) + environment variable. type: str shared_credentials_file: description: - - This is the path to the shared credentials file. It can also be sourced from the ALICLOUD_SHARED_CREDENTIALS_FILE + - This is the path to the shared credentials file. It can also be sourced from the E(ALICLOUD_SHARED_CREDENTIALS_FILE) environment variable. - - If this is not set and a profile is specified, ~/.aliyun/config.json will be used. + - If this is not set and a profile is specified, C(~/.aliyun/config.json) is used. type: str author: - - "He Guimin (@xiaozhu36)" + - "He Guimin (@xiaozhu36)" requirements: - - "python >= 3.6" + - "Python >= 3.6" notes: - - If parameters are not set within the module, the following - environment variables can be used in decreasing order of precedence - C(ALICLOUD_ACCESS_KEY) or C(ALICLOUD_ACCESS_KEY_ID), - C(ALICLOUD_SECRET_KEY) or C(ALICLOUD_SECRET_ACCESS_KEY), - C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID), - C(ALICLOUD_SECURITY_TOKEN), - C(ALICLOUD_ECS_ROLE_NAME), - C(ALICLOUD_SHARED_CREDENTIALS_FILE), - C(ALICLOUD_PROFILE), - C(ALICLOUD_ASSUME_ROLE_ARN), - C(ALICLOUD_ASSUME_ROLE_SESSION_NAME), - C(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION), - - C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID) can be typically be used to specify the - ALICLOUD region, when required, but this can also be configured in the footmark config file -''' + - If parameters are not set within the module, the following environment variables can be used in decreasing order of precedence + E(ALICLOUD_ACCESS_KEY) or E(ALICLOUD_ACCESS_KEY_ID), E(ALICLOUD_SECRET_KEY) or E(ALICLOUD_SECRET_ACCESS_KEY), E(ALICLOUD_REGION) + or E(ALICLOUD_REGION_ID), E(ALICLOUD_SECURITY_TOKEN), E(ALICLOUD_ECS_ROLE_NAME), E(ALICLOUD_SHARED_CREDENTIALS_FILE), + E(ALICLOUD_PROFILE), E(ALICLOUD_ASSUME_ROLE_ARN), E(ALICLOUD_ASSUME_ROLE_SESSION_NAME), E(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION). + - E(ALICLOUD_REGION) or E(ALICLOUD_REGION_ID) can be typically be used to specify the Alicloud region, when required, but + this can also be configured in the footmark config file. +""" diff --git a/plugins/doc_fragments/attributes.py b/plugins/doc_fragments/attributes.py new file mode 100644 index 0000000000..fdafe1aeaa --- /dev/null +++ b/plugins/doc_fragments/attributes.py @@ -0,0 +1,91 @@ + +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + + # Standard documentation fragment + DOCUMENTATION = r""" +options: {} +attributes: + check_mode: + description: Can run in C(check_mode) and return changed status prediction without modifying target. + diff_mode: + description: Returns details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode. +""" + + PLATFORM = r""" +options: {} +attributes: + platform: + description: Target OS/families that can be operated against. + support: N/A +""" + + # Should be used together with the standard fragment + INFO_MODULE = r''' +options: {} +attributes: + check_mode: + support: full + details: + - This action does not modify state. + diff_mode: + support: N/A + details: + - This action does not modify state. +''' + + CONN = r""" +options: {} +attributes: + become: + description: Is usable alongside C(become) keywords. + connection: + description: Uses the target's configured connection information to execute code on it. + delegation: + description: Can be used in conjunction with C(delegate_to) and related keywords. +""" + + FACTS = r""" +options: {} +attributes: + facts: + description: Action returns an C(ansible_facts) dictionary that updates existing host facts. +""" + + # Should be used together with the standard fragment and the FACTS fragment + FACTS_MODULE = r''' +options: {} +attributes: + check_mode: + support: full + details: + - This action does not modify state. + diff_mode: + support: N/A + details: + - This action does not modify state. + facts: + support: full +''' + + FILES = r""" +options: {} +attributes: + safe_file_operations: + description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption. +""" + + FLOW = r""" +options: {} +attributes: + action: + description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. + async: + description: Supports being used with the C(async) keyword. +""" diff --git a/plugins/doc_fragments/auth_basic.py b/plugins/doc_fragments/auth_basic.py index 6f590611d9..3d99466165 100644 --- a/plugins/doc_fragments/auth_basic.py +++ b/plugins/doc_fragments/auth_basic.py @@ -1,31 +1,30 @@ -# -*- coding: utf-8 -*- +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: api_url: description: - - The resolvable endpoint for the API + - The resolvable endpoint for the API. type: str api_username: description: - - The username to use for authentication against the API + - The username to use for authentication against the API. type: str api_password: description: - - The password to use for authentication against the API + - The password to use for authentication against the API. type: str validate_certs: description: - - Whether or not to validate SSL certs when supplying a https endpoint. + - Whether or not to validate SSL certs when supplying a HTTPS endpoint. type: bool - default: yes -''' + default: true +""" diff --git a/plugins/doc_fragments/bitbucket.py b/plugins/doc_fragments/bitbucket.py index 28489356b1..c96a010e71 100644 --- a/plugins/doc_fragments/bitbucket.py +++ b/plugins/doc_fragments/bitbucket.py @@ -1,41 +1,42 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Evgeniy Krysanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: client_id: description: - The OAuth consumer key. - - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used. + - If not set the environment variable E(BITBUCKET_CLIENT_ID) is used. type: str client_secret: description: - The OAuth consumer secret. - - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used. + - If not set the environment variable E(BITBUCKET_CLIENT_SECRET) is used. type: str user: description: - The username. - - If not set the environment variable C(BITBUCKET_USERNAME) will be used. + - If not set the environment variable E(BITBUCKET_USERNAME) is used. + - O(ignore:username) is an alias of O(user) since community.general 6.0.0. It was an alias of O(workspace) before. type: str version_added: 4.0.0 + aliases: [username] password: description: - The App password. - - If not set the environment variable C(BITBUCKET_PASSWORD) will be used. + - If not set the environment variable E(BITBUCKET_PASSWORD) is used. type: str version_added: 4.0.0 notes: - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth. - Bitbucket App password can be created from Bitbucket profile -> Personal Settings -> App passwords. - If both OAuth and Basic Auth credentials are passed, OAuth credentials take precedence. -''' +""" diff --git a/plugins/doc_fragments/consul.py b/plugins/doc_fragments/consul.py new file mode 100644 index 0000000000..fd9c1a6e6c --- /dev/null +++ b/plugins/doc_fragments/consul.py @@ -0,0 +1,55 @@ +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment: + # Common parameters for Consul modules + DOCUMENTATION = r""" +options: + host: + description: + - Host of the Consul agent. + default: localhost + type: str + port: + type: int + description: + - The port on which the consul agent is running. + default: 8500 + scheme: + description: + - The protocol scheme on which the Consul agent is running. Defaults to V(http) and can be set to V(https) for secure + connections. + default: http + type: str + validate_certs: + type: bool + description: + - Whether to verify the TLS certificate of the Consul agent. + default: true + ca_path: + description: + - The CA bundle to use for https connections. + type: str +""" + + TOKEN = r""" +options: + token: + description: + - The token to use for authorization. + type: str +""" + + ACTIONGROUP_CONSUL = r""" +options: {} +attributes: + action_group: + description: Use C(group/community.general.consul) in C(module_defaults) to set defaults for this module. + support: full + membership: + - community.general.consul +""" diff --git a/plugins/doc_fragments/dimensiondata.py b/plugins/doc_fragments/dimensiondata.py index 02435e25cc..1804c3c7ba 100644 --- a/plugins/doc_fragments/dimensiondata.py +++ b/plugins/doc_fragments/dimensiondata.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- # -# Copyright: (c) 2016, Dimension Data -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Dimension Data +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # Authors: # - Adam Friedman @@ -13,28 +12,27 @@ __metaclass__ = type class ModuleDocFragment(object): # Dimension Data doc fragment - DOCUMENTATION = r''' - + DOCUMENTATION = r""" options: region: description: - The target region. - - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py] - - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html) - - Note that the default value "na" stands for "North America". - - The module prepends 'dd-' to the region choice. + - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py]. + - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html). + - Note that the default value C(na) stands for "North America". + - The module prepends C(dd-) to the region choice. type: str default: na mcp_user: description: - The username used to authenticate to the CloudControl API. - - If not specified, will fall back to C(MCP_USER) from environment variable or C(~/.dimensiondata). + - If not specified, falls back to E(MCP_USER) from environment variable or C(~/.dimensiondata). type: str mcp_password: description: - The password used to authenticate to the CloudControl API. - - If not specified, will fall back to C(MCP_PASSWORD) from environment variable or C(~/.dimensiondata). - - Required if I(mcp_user) is specified. + - If not specified, falls back to E(MCP_PASSWORD) from environment variable or C(~/.dimensiondata). + - Required if O(mcp_user) is specified. type: str location: description: @@ -43,8 +41,8 @@ options: required: true validate_certs: description: - - If C(false), SSL certificates will not be validated. + - If V(false), SSL certificates are not validated. - This should only be used on private instances of the CloudControl API that use self-signed certificates. type: bool - default: yes -''' + default: true +""" diff --git a/plugins/doc_fragments/dimensiondata_wait.py b/plugins/doc_fragments/dimensiondata_wait.py index ac3deab154..40b3a1d6e8 100644 --- a/plugins/doc_fragments/dimensiondata_wait.py +++ b/plugins/doc_fragments/dimensiondata_wait.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- # -# Copyright: (c) 2016, Dimension Data -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Dimension Data +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # Authors: # - Adam Friedman @@ -13,24 +12,23 @@ __metaclass__ = type class ModuleDocFragment(object): # Dimension Data ("wait-for-completion" parameters) doc fragment - DOCUMENTATION = r''' - + DOCUMENTATION = r""" options: wait: description: - Should we wait for the task to complete before moving onto the next. type: bool - default: no + default: false wait_time: description: - The maximum amount of time (in seconds) to wait for the task to complete. - - Only applicable if I(wait=true). + - Only applicable if O(wait=true). type: int default: 600 wait_poll_interval: description: - The amount of time (in seconds) to wait between checks for task completion. - - Only applicable if I(wait=true). + - Only applicable if O(wait=true). type: int default: 2 - ''' +""" diff --git a/plugins/doc_fragments/django.py b/plugins/doc_fragments/django.py new file mode 100644 index 0000000000..f62e2224d8 --- /dev/null +++ b/plugins/doc_fragments/django.py @@ -0,0 +1,80 @@ +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + DOCUMENTATION = r""" +options: + venv: + description: + - Use the Python interpreter from this virtual environment. + - Pass the path to the root of the virtualenv, not the C(bin/) directory nor the C(python) executable. + type: path + settings: + description: + - Specifies the settings module to use. + - The value is passed as is to the C(--settings) argument in C(django-admin). + type: str + required: true + pythonpath: + description: + - Adds the given filesystem path to the Python import search path. + - The value is passed as is to the C(--pythonpath) argument in C(django-admin). + type: path + traceback: + description: + - Provides a full stack trace in the output when a C(CommandError) is raised. + type: bool + verbosity: + description: + - Specifies the amount of notification and debug information in the output of C(django-admin). + type: int + choices: [0, 1, 2, 3] + skip_checks: + description: + - Skips running system checks prior to running the command. + type: bool + + +notes: + - The C(django-admin) command is always executed using the C(C) locale, and the option C(--no-color) is always passed. +seealso: + - name: django-admin and manage.py in official Django documentation + description: >- + Refer to this documentation for the builtin commands and options of C(django-admin). Please make sure that you select + the right version of Django in the version selector on that page. + link: https://docs.djangoproject.com/en/5.0/ref/django-admin/ +""" + + DATABASE = r""" +options: + database: + description: + - Specify the database to be used. + type: str + default: default +""" + + DATA = r""" +options: + excludes: + description: + - Applications or models to be excluded. + - Format must be either V(app_label) or V(app_label.ModelName). + type: list + elements: str + format: + description: + - Serialization format of the output data. + type: str + default: json + choices: [xml, json, jsonl, yaml] +notes: + - As it is now, the module is B(not idempotent). Ensuring idempotency for this case can be a bit tricky, because it would + amount to ensuring beforehand that all the data in the fixture file is already in the database, which is not a trivial feat. + Unfortunately, neither C(django loaddata) nor C(django dumpdata) have a C(--dry-run) option, so the only way to know whether + there is a change or not is to actually load or dump the data. +""" diff --git a/plugins/doc_fragments/emc.py b/plugins/doc_fragments/emc.py index cce76823fe..9268b7fc42 100644 --- a/plugins/doc_fragments/emc.py +++ b/plugins/doc_fragments/emc.py @@ -1,45 +1,34 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Luca Lorenzetto (@remix_tj) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Luca Lorenzetto (@remix_tj) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): - DOCUMENTATION = r''' -options: - - See respective platform section for more details -requirements: - - See respective platform section for more details -notes: - - Ansible modules are available for EMC VNX. -''' - # Documentation fragment for VNX (emc_vnx) EMC_VNX = r''' options: - sp_address: - description: - - Address of the SP of target/secondary storage. - type: str - required: true - sp_user: - description: - - Username for accessing SP. - type: str - default: sysadmin - sp_password: - description: - - password for accessing SP. - type: str - default: sysadmin + sp_address: + description: + - Address of the SP of target/secondary storage. + type: str + required: true + sp_user: + description: + - Username for accessing SP. + type: str + default: sysadmin + sp_password: + description: + - password for accessing SP. + type: str + default: sysadmin requirements: - An EMC VNX Storage device. - - Ansible 2.7. - - storops (0.5.10 or greater). Install using 'pip install storops'. + - storops (0.5.10 or greater). Install using C(pip install storops). notes: - - The modules prefixed with emc_vnx are built to support the EMC VNX storage platform. + - The modules prefixed with C(emc_vnx) are built to support the EMC VNX storage platform. ''' diff --git a/plugins/doc_fragments/gitlab.py b/plugins/doc_fragments/gitlab.py index 21e4584fe1..af7a527a81 100644 --- a/plugins/doc_fragments/gitlab.py +++ b/plugins/doc_fragments/gitlab.py @@ -1,15 +1,14 @@ -# -*- coding: utf-8 -*- +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" requirements: - requests (Python library U(https://pypi.org/project/requests/)) @@ -28,4 +27,9 @@ options: - GitLab CI job token for logging in. type: str version_added: 4.2.0 -''' + ca_path: + description: + - The CA certificates bundle to use to verify GitLab server certificate. + type: str + version_added: 8.1.0 +""" diff --git a/plugins/doc_fragments/hpe3par.py b/plugins/doc_fragments/hpe3par.py index ad445205d8..e126c63c56 100644 --- a/plugins/doc_fragments/hpe3par.py +++ b/plugins/doc_fragments/hpe3par.py @@ -1,36 +1,33 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ -# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # HPE 3PAR doc fragment - DOCUMENTATION = ''' + DOCUMENTATION = r""" options: - storage_system_ip: - description: - - The storage system IP address. - type: str - required: true - storage_system_password: - description: - - The storage system password. - type: str - required: true - storage_system_username: - description: - - The storage system user name. - type: str - required: true + storage_system_ip: + description: + - The storage system IP address. + type: str + required: true + storage_system_password: + description: + - The storage system password. + type: str + required: true + storage_system_username: + description: + - The storage system user name. + type: str + required: true requirements: - - hpe3par_sdk >= 1.0.2. Install using 'pip install hpe3par_sdk' + - hpe3par_sdk >= 1.0.2. Install using C(pip install hpe3par_sdk). - WSAPI service should be enabled on the 3PAR storage array. notes: - - check_mode not supported - ''' +""" diff --git a/plugins/doc_fragments/hwc.py b/plugins/doc_fragments/hwc.py index ecba2adde8..99362243ec 100644 --- a/plugins/doc_fragments/hwc.py +++ b/plugins/doc_fragments/hwc.py @@ -1,66 +1,57 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Huawei Inc. -# GNU General Public License v3.0+ -# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Huawei Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # HWC doc fragment. - DOCUMENTATION = ''' + DOCUMENTATION = r""" options: - identity_endpoint: - description: - - The Identity authentication URL. - type: str - required: true - user: - description: - - The user name to login with (currently only user names are - supported, and not user IDs). - type: str - required: true - password: - description: - - The password to login with. - type: str - required: true - domain: - description: - - The name of the Domain to scope to (Identity v3). - (currently only domain names are supported, and not domain IDs). - type: str - required: true - project: - description: - - The name of the Tenant (Identity v2) or Project (Identity v3). - (currently only project names are supported, and not - project IDs). - type: str - required: true - region: - description: - - The region to which the project belongs. - type: str - id: - description: - - The id of resource to be managed. - type: str + identity_endpoint: + description: + - The Identity authentication URL. + type: str + required: true + user: + description: + - The user name to login with. + - Currently only user names are supported, and not user IDs. + type: str + required: true + password: + description: + - The password to login with. + type: str + required: true + domain: + description: + - The name of the Domain to scope to (Identity v3). + - Currently only domain names are supported, and not domain IDs. + type: str + required: true + project: + description: + - The name of the Tenant (Identity v2) or Project (Identity v3). + - Currently only project names are supported, and not project IDs. + type: str + required: true + region: + description: + - The region to which the project belongs. + type: str + id: + description: + - The ID of resource to be managed. + type: str notes: - - For authentication, you can set identity_endpoint using the - C(ANSIBLE_HWC_IDENTITY_ENDPOINT) env variable. - - For authentication, you can set user using the - C(ANSIBLE_HWC_USER) env variable. - - For authentication, you can set password using the C(ANSIBLE_HWC_PASSWORD) env - variable. - - For authentication, you can set domain using the C(ANSIBLE_HWC_DOMAIN) env - variable. - - For authentication, you can set project using the C(ANSIBLE_HWC_PROJECT) env - variable. - - For authentication, you can set region using the C(ANSIBLE_HWC_REGION) env variable. - - Environment variables values will only be used if the playbook values are - not set. -''' + - For authentication, you can set identity_endpoint using the E(ANSIBLE_HWC_IDENTITY_ENDPOINT) environment variable. + - For authentication, you can set user using the E(ANSIBLE_HWC_USER) environment variable. + - For authentication, you can set password using the E(ANSIBLE_HWC_PASSWORD) environment variable. + - For authentication, you can set domain using the E(ANSIBLE_HWC_DOMAIN) environment variable. + - For authentication, you can set project using the E(ANSIBLE_HWC_PROJECT) environment variable. + - For authentication, you can set region using the E(ANSIBLE_HWC_REGION) environment variable. + - Environment variables values are only used when the playbook values are not set. +""" diff --git a/plugins/doc_fragments/ibm_storage.py b/plugins/doc_fragments/ibm_storage.py index 0d8eb5fe22..ab61cd51c1 100644 --- a/plugins/doc_fragments/ibm_storage.py +++ b/plugins/doc_fragments/ibm_storage.py @@ -1,37 +1,34 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, IBM CORPORATION +# Copyright (c) 2018, IBM CORPORATION # Author(s): Tzur Eliyahu -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # ibm_storage documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - username: - description: - - Management user on the spectrum accelerate storage system. - type: str - required: True - password: - description: - - Password for username on the spectrum accelerate storage system. - type: str - required: True - endpoints: - description: - - The hostname or management IP of Spectrum Accelerate storage system. - type: str - required: True + username: + description: + - Management user on the Spectrum Accelerate storage system. + type: str + required: true + password: + description: + - Password for username on the Spectrum Accelerate storage system. + type: str + required: true + endpoints: + description: + - The hostname or management IP of Spectrum Accelerate storage system. + type: str + required: true notes: - - This module requires pyxcli python library. - Use 'pip install pyxcli' in order to get pyxcli. + - This module requires pyxcli python library. Use C(pip install pyxcli) in order to get pyxcli. requirements: - - python >= 2.7 - pyxcli -''' +""" diff --git a/plugins/doc_fragments/influxdb.py b/plugins/doc_fragments/influxdb.py index a31c84cbb1..7f0688b868 100644 --- a/plugins/doc_fragments/influxdb.py +++ b/plugins/doc_fragments/influxdb.py @@ -1,82 +1,80 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Parameters for influxdb modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: hostname: description: - - The hostname or IP address on which InfluxDB server is listening. - - Since Ansible 2.5, defaulted to localhost. + - The hostname or IP address on which InfluxDB server is listening. type: str default: localhost username: description: - - Username that will be used to authenticate against InfluxDB server. - - Alias C(login_username) added in Ansible 2.5. + - Username that is used to authenticate against InfluxDB server. type: str default: root - aliases: [ login_username ] + aliases: [login_username] password: description: - - Password that will be used to authenticate against InfluxDB server. - - Alias C(login_password) added in Ansible 2.5. + - Password that is used to authenticate against InfluxDB server. type: str default: root - aliases: [ login_password ] + aliases: [login_password] port: description: - - The port on which InfluxDB server is listening + - The port on which InfluxDB server is listening. type: int default: 8086 path: description: - - The path on which InfluxDB server is accessible - - Only available when using python-influxdb >= 5.1.0 + - The path on which InfluxDB server is accessible. + - Only available when using python-influxdb >= 5.1.0. type: str + default: '' version_added: '0.2.0' validate_certs: description: - - If set to C(no), the SSL certificates will not be validated. - - This should only set to C(no) used on personally controlled sites using self-signed certificates. + - If set to V(false), the SSL certificates are not validated. + - This should only set to V(false) used on personally controlled sites using self-signed certificates. type: bool - default: yes + default: true ssl: description: - - Use https instead of http to connect to InfluxDB server. + - Use https instead of http to connect to InfluxDB server. type: bool default: false timeout: description: - - Number of seconds Requests will wait for client to establish a connection. + - Number of seconds Requests waits for client to establish a connection. type: int retries: description: - - Number of retries client will try before aborting. - - C(0) indicates try until success. - - Only available when using python-influxdb >= 4.1.0 + - Number of retries client performs before aborting. + - V(0) indicates try until success. + - Only available when using C(python-influxdb) >= 4.1.0. type: int default: 3 use_udp: description: - - Use UDP to connect to InfluxDB server. + - Use UDP to connect to InfluxDB server. type: bool default: false udp_port: description: - - UDP port to connect to InfluxDB server. + - UDP port to connect to InfluxDB server. type: int default: 4444 proxies: description: - - HTTP(S) proxy to use for Requests to connect to InfluxDB server. + - HTTP(S) proxy to use for Requests to connect to InfluxDB server. type: dict -''' + default: {} +""" diff --git a/plugins/doc_fragments/ipa.py b/plugins/doc_fragments/ipa.py index 47bcee60ba..0b740ae8ed 100644 --- a/plugins/doc_fragments/ipa.py +++ b/plugins/doc_fragments/ipa.py @@ -1,75 +1,83 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2017-18, Ansible Project -# Copyright: (c) 2017-18, Abhijeet Kasurde (akasurde@redhat.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017-18, Ansible Project +# Copyright (c) 2017-18, Abhijeet Kasurde (akasurde@redhat.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Parameters for FreeIPA/IPA modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: ipa_port: description: - - Port of FreeIPA / IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_PORT) will be used instead. - - If both the environment variable C(IPA_PORT) and the value are not specified in the task, then default value is set. - - Environment variable fallback mechanism is added in Ansible 2.5. + - Port of FreeIPA / IPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_PORT) is used instead. + - If both the environment variable E(IPA_PORT) and the value are not specified in the task, then default value is set. type: int default: 443 ipa_host: description: - - IP or hostname of IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_HOST) will be used instead. - - If both the environment variable C(IPA_HOST) and the value are not specified in the task, then DNS will be used to try to discover the FreeIPA server. - - The relevant entry needed in FreeIPA is the 'ipa-ca' entry. - - If neither the DNS entry, nor the environment C(IPA_HOST), nor the value are available in the task, then the default value will be used. - - Environment variable fallback mechanism is added in Ansible 2.5. + - IP or hostname of IPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_HOST) is used instead. + - If both the environment variable E(IPA_HOST) and the value are not specified in the task, then DNS is used to try + to discover the FreeIPA server. + - The relevant entry needed in FreeIPA is the C(ipa-ca) entry. + - If neither the DNS entry, nor the environment E(IPA_HOST), nor the value are available in the task, then the default + value is used. type: str default: ipa.example.com ipa_user: description: - - Administrative account used on IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_USER) will be used instead. - - If both the environment variable C(IPA_USER) and the value are not specified in the task, then default value is set. - - Environment variable fallback mechanism is added in Ansible 2.5. + - Administrative account used on IPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_USER) is used instead. + - If both the environment variable E(IPA_USER) and the value are not specified in the task, then default value is set. type: str default: admin ipa_pass: description: - - Password of administrative user. - - If the value is not specified in the task, the value of environment variable C(IPA_PASS) will be used instead. - - Note that if the 'urllib_gssapi' library is available, it is possible to use GSSAPI to authenticate to FreeIPA. - - If the environment variable C(KRB5CCNAME) is available, the module will use this kerberos credentials cache to authenticate to the FreeIPA server. - - If the environment variable C(KRB5_CLIENT_KTNAME) is available, and C(KRB5CCNAME) is not; the module will use this kerberos keytab to authenticate. - - If GSSAPI is not available, the usage of 'ipa_pass' is required. - - Environment variable fallback mechanism is added in Ansible 2.5. + - Password of administrative user. + - If the value is not specified in the task, the value of environment variable E(IPA_PASS) is used instead. + - Note that if the C(urllib_gssapi) library is available, it is possible to use GSSAPI to authenticate to FreeIPA. + - If the environment variable E(KRB5CCNAME) is available, the module uses this Kerberos credentials cache to authenticate + to the FreeIPA server. + - If the environment variable E(KRB5_CLIENT_KTNAME) is available, and E(KRB5CCNAME) is not; the module uses this Kerberos + keytab to authenticate. + - If GSSAPI is not available, the usage of O(ipa_pass) is required. type: str ipa_prot: description: - - Protocol used by IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_PROT) will be used instead. - - If both the environment variable C(IPA_PROT) and the value are not specified in the task, then default value is set. - - Environment variable fallback mechanism is added in Ansible 2.5. + - Protocol used by IPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_PROT) is used instead. + - If both the environment variable E(IPA_PROT) and the value are not specified in the task, then default value is set. type: str - choices: [ http, https ] + choices: [http, https] default: https validate_certs: description: - - This only applies if C(ipa_prot) is I(https). - - If set to C(no), the SSL certificates will not be validated. - - This should only set to C(no) used on personally controlled sites using self-signed certificates. + - This only applies if O(ipa_prot) is V(https). + - If set to V(false), the SSL certificates are not validated. + - This should only set to V(false) used on personally controlled sites using self-signed certificates. type: bool - default: yes + default: true ipa_timeout: description: - - Specifies idle timeout (in seconds) for the connection. - - For bulk operations, you may want to increase this in order to avoid timeout from IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_TIMEOUT) will be used instead. - - If both the environment variable C(IPA_TIMEOUT) and the value are not specified in the task, then default value is set. + - Specifies idle timeout (in seconds) for the connection. + - For bulk operations, you may want to increase this in order to avoid timeout from IPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_TIMEOUT) is used instead. + - If both the environment variable E(IPA_TIMEOUT) and the value are not specified in the task, then default value is + set. type: int default: 10 -''' +""" + + CONNECTION_NOTES = r""" +options: {} +notes: + - This module uses JSON-RPC over HTTP(S) to communicate with the FreeIPA server. + If you need to enroll the managed node into FreeIPA realm, you might want to consider using the collection + L(freeipa.ansible_freeipa, https://galaxy.ansible.com/ui/repo/published/freeipa/ansible_freeipa/), but shell access to one + node from the realm is required to manage the deployment. +""" diff --git a/plugins/doc_fragments/keycloak.py b/plugins/doc_fragments/keycloak.py index fab9a6e894..2ec693eb99 100644 --- a/plugins/doc_fragments/keycloak.py +++ b/plugins/doc_fragments/keycloak.py @@ -1,71 +1,93 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Eike Frost -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Eike Frost +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - auth_keycloak_url: - description: - - URL to the Keycloak instance. - type: str - required: true - aliases: - - url + auth_keycloak_url: + description: + - URL to the Keycloak instance. + type: str + required: true + aliases: + - url - auth_client_id: - description: - - OpenID Connect I(client_id) to authenticate to the API with. - type: str - default: admin-cli + auth_client_id: + description: + - OpenID Connect C(client_id) to authenticate to the API with. + type: str + default: admin-cli - auth_realm: - description: - - Keycloak realm name to authenticate to for API access. - type: str + auth_realm: + description: + - Keycloak realm name to authenticate to for API access. + type: str - auth_client_secret: - description: - - Client Secret to use in conjunction with I(auth_client_id) (if required). - type: str + auth_client_secret: + description: + - Client Secret to use in conjunction with O(auth_client_id) (if required). + type: str - auth_username: - description: - - Username to authenticate for API access with. - type: str - aliases: - - username + auth_username: + description: + - Username to authenticate for API access with. + type: str + aliases: + - username - auth_password: - description: - - Password to authenticate for API access with. - type: str - aliases: - - password + auth_password: + description: + - Password to authenticate for API access with. + type: str + aliases: + - password - token: - description: - - Authentication token for Keycloak API. - type: str - version_added: 3.0.0 + token: + description: + - Authentication token for Keycloak API. + type: str + version_added: 3.0.0 - validate_certs: - description: - - Verify TLS certificates (do not disable this in production). - type: bool - default: yes + refresh_token: + description: + - Authentication refresh token for Keycloak API. + type: str + version_added: 10.3.0 - connection_timeout: - description: - - Controls the HTTP connections timeout period (in seconds) to Keycloak API. - type: int - default: 10 - version_added: 4.5.0 -''' + validate_certs: + description: + - Verify TLS certificates (do not disable this in production). + type: bool + default: true + + connection_timeout: + description: + - Controls the HTTP connections timeout period (in seconds) to Keycloak API. + type: int + default: 10 + version_added: 4.5.0 + + http_agent: + description: + - Configures the HTTP User-Agent header. + type: str + default: Ansible + version_added: 5.4.0 +""" + + ACTIONGROUP_KEYCLOAK = r""" +options: {} +attributes: + action_group: + description: Use C(group/community.general.keycloak) in C(module_defaults) to set defaults for this module. + support: full + membership: + - community.general.keycloak +""" diff --git a/plugins/doc_fragments/ldap.py b/plugins/doc_fragments/ldap.py index e79fe3a681..d787bfd65d 100644 --- a/plugins/doc_fragments/ldap.py +++ b/plugins/doc_fragments/ldap.py @@ -1,27 +1,49 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Peter Sagerson -# Copyright: (c) 2016, Jiri Tyr -# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Peter Sagerson +# Copyright (c) 2016, Jiri Tyr +# Copyright (c) 2017-2018 Keller Fuchs (@KellerFuchs) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard LDAP documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" +notes: + - The default authentication settings attempts to use a SASL EXTERNAL bind over a UNIX domain socket. This works well with + the default Ubuntu install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL rule allowing root to + modify the server configuration. If you need to use a simple bind to access your server, pass the credentials in O(bind_dn) + and O(bind_pw). options: bind_dn: description: - - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default. - - If this is blank, we'll use an anonymous bind. + - A DN to bind with. Try to use a SASL bind with the EXTERNAL mechanism as default when this parameter is omitted. + - Use an anonymous bind if the parameter is blank. type: str bind_pw: description: - - The password to use with I(bind_dn). + - The password to use with O(bind_dn). type: str + default: '' + ca_path: + description: + - Set the path to PEM file with CA certs. + type: path + version_added: "6.5.0" + client_cert: + type: path + description: + - PEM formatted certificate chain file to be used for SSL client authentication. + - Required if O(client_key) is defined. + version_added: "7.1.0" + client_key: + type: path + description: + - PEM formatted file that contains your private key to be used for SSL client authentication. + - Required if O(client_cert) is defined. + version_added: "7.1.0" dn: required: true description: @@ -33,12 +55,13 @@ options: type: str description: - Set the referrals chasing behavior. - - C(anonymous) follow referrals anonymously. This is the default behavior. - - C(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off. + - V(anonymous) follow referrals anonymously. This is the default behavior. + - V(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off. version_added: 2.0.0 server_uri: description: - - The I(server_uri) parameter may be a comma- or whitespace-separated list of URIs containing only the schema, the host, and the port fields. + - The O(server_uri) parameter may be a comma- or whitespace-separated list of URIs containing only the schema, the host, + and the port fields. - The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location. - Note that when using multiple URIs you cannot determine to which URI your client gets connected. - For URIs containing additional fields, particularly when using commas, behavior is undefined. @@ -46,21 +69,30 @@ options: default: ldapi:/// start_tls: description: - - If true, we'll use the START_TLS LDAP extension. + - Use the START_TLS LDAP extension if set to V(true). type: bool - default: no + default: false validate_certs: description: - - If set to C(no), SSL certificates will not be validated. + - If set to V(false), SSL certificates are not validated. - This should only be used on sites using self-signed certificates. type: bool - default: yes + default: true sasl_class: description: - The class to use for SASL authentication. - - possible choices are C(external), C(gssapi). type: str choices: ['external', 'gssapi'] default: external version_added: "2.0.0" -''' + xorder_discovery: + description: + - Set the behavior on how to process Xordered DNs. + - V(enable) performs a C(ONELEVEL) search below the superior RDN to find the matching DN. + - V(disable) always uses the DN unmodified (as passed by the O(dn) parameter). + - V(auto) only performs a search if the first RDN does not contain an index number (C({x})). + type: str + choices: ['enable', 'auto', 'disable'] + default: auto + version_added: "6.4.0" +""" diff --git a/plugins/doc_fragments/lxca_common.py b/plugins/doc_fragments/lxca_common.py index c55eca16ac..72bc3b7054 100644 --- a/plugins/doc_fragments/lxca_common.py +++ b/plugins/doc_fragments/lxca_common.py @@ -1,15 +1,14 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2017 Lenovo, Inc. -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard Pylxca documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" author: - Naval Patel (@navalkp) - Prashant Bhosale (@prabhosa) @@ -17,19 +16,19 @@ author: options: login_user: description: - - The username for use in HTTP basic authentication. + - The username for use in HTTP basic authentication. type: str required: true login_password: description: - - The password for use in HTTP basic authentication. + - The password for use in HTTP basic authentication. type: str required: true auth_url: description: - - lxca https full web address + - Lxca HTTPS full web address. type: str required: true @@ -37,7 +36,6 @@ requirements: - pylxca notes: - - Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca) - - Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca) - - Check mode is not supported. -''' + - Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca). + - Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca). +""" diff --git a/plugins/doc_fragments/manageiq.py b/plugins/doc_fragments/manageiq.py index b610b512b7..e7351e4f5e 100644 --- a/plugins/doc_fragments/manageiq.py +++ b/plugins/doc_fragments/manageiq.py @@ -1,16 +1,15 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Daniel Korn -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Daniel Korn +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard ManageIQ documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: manageiq_connection: description: @@ -20,33 +19,34 @@ options: suboptions: url: description: - - ManageIQ environment url. C(MIQ_URL) env var if set. otherwise, it is required to pass it. + - ManageIQ environment URL. E(MIQ_URL) environment variable if set. Otherwise, it is required to pass it. type: str required: false username: description: - - ManageIQ username. C(MIQ_USERNAME) env var if set. otherwise, required if no token is passed in. + - ManageIQ username. E(MIQ_USERNAME) environment variable if set. Otherwise, required if no token is passed in. type: str password: description: - - ManageIQ password. C(MIQ_PASSWORD) env var if set. otherwise, required if no token is passed in. + - ManageIQ password. E(MIQ_PASSWORD) environment variable if set. Otherwise, required if no token is passed in. type: str token: description: - - ManageIQ token. C(MIQ_TOKEN) env var if set. otherwise, required if no username or password is passed in. + - ManageIQ token. E(MIQ_TOKEN) environment variable if set. Otherwise, required if no username or password is passed + in. type: str validate_certs: description: - - Whether SSL certificates should be verified for HTTPS requests. defaults to True. + - Whether SSL certificates should be verified for HTTPS requests. type: bool - default: yes - aliases: [ verify_ssl ] + default: true + aliases: [verify_ssl] ca_cert: description: - - The path to a CA bundle file or directory with certificates. defaults to None. + - The path to a CA bundle file or directory with certificates. type: str - aliases: [ ca_bundle_path ] + aliases: [ca_bundle_path] requirements: - 'manageiq-client U(https://github.com/ManageIQ/manageiq-api-client-python/)' -''' +""" diff --git a/plugins/doc_fragments/nomad.py b/plugins/doc_fragments/nomad.py index 3845c54120..37485ef9a7 100644 --- a/plugins/doc_fragments/nomad.py +++ b/plugins/doc_fragments/nomad.py @@ -1,51 +1,56 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2020 FERREIRA Christophe -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020 FERREIRA Christophe +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - host: - description: - - FQDN of Nomad server. - required: true - type: str - use_ssl: - description: - - Use TLS/SSL connection. - type: bool - default: true - timeout: - description: - - Timeout (in seconds) for the request to Nomad. - type: int - default: 5 - validate_certs: - description: - - Enable TLS/SSL certificate validation. - type: bool - default: true - client_cert: - description: - - Path of certificate for TLS/SSL. - type: path - client_key: - description: - - Path of certificate's private key for TLS/SSL. - type: path - namespace: - description: - - Namespace for Nomad. - type: str - token: - description: - - ACL token for authentification. - type: str -''' + host: + description: + - FQDN of Nomad server. + required: true + type: str + port: + description: + - Port of Nomad server. + type: int + default: 4646 + version_added: 8.0.0 + use_ssl: + description: + - Use TLS/SSL connection. + type: bool + default: true + timeout: + description: + - Timeout (in seconds) for the request to Nomad. + type: int + default: 5 + validate_certs: + description: + - Enable TLS/SSL certificate validation. + type: bool + default: true + client_cert: + description: + - Path of certificate for TLS/SSL. + type: path + client_key: + description: + - Path of certificate's private key for TLS/SSL. + type: path + namespace: + description: + - Namespace for Nomad. + type: str + token: + description: + - ACL token for authentication. + type: str +""" diff --git a/plugins/doc_fragments/onepassword.py b/plugins/doc_fragments/onepassword.py new file mode 100644 index 0000000000..7a2c7566c3 --- /dev/null +++ b/plugins/doc_fragments/onepassword.py @@ -0,0 +1,77 @@ + +# Copyright (c) 2023, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + DOCUMENTATION = r""" +requirements: + - See U(https://support.1password.com/command-line/) +options: + master_password: + description: The password used to unlock the specified vault. + aliases: ['vault_password'] + type: str + section: + description: Item section containing the field to retrieve (case-insensitive). If absent, returns first match from any + section. + domain: + description: Domain of 1Password. + default: '1password.com' + type: str + subdomain: + description: The 1Password subdomain to authenticate against. + type: str + account_id: + description: The account ID to target. + type: str + username: + description: The username used to sign in. + type: str + secret_key: + description: The secret key used when performing an initial sign in. + type: str + service_account_token: + description: + - The access key for a service account. + - Only works with 1Password CLI version 2 or later. + type: str + vault: + description: Vault containing the item to retrieve (case-insensitive). If absent, searches all vaults. + type: str + connect_host: + description: The host for 1Password Connect. Must be used in combination with O(connect_token). + type: str + env: + - name: OP_CONNECT_HOST + version_added: 8.1.0 + connect_token: + description: The token for 1Password Connect. Must be used in combination with O(connect_host). + type: str + env: + - name: OP_CONNECT_TOKEN + version_added: 8.1.0 +""" + + LOOKUP = r""" +options: + service_account_token: + env: + - name: OP_SERVICE_ACCOUNT_TOKEN + version_added: 8.2.0 +notes: + - This lookup uses an existing 1Password session if one exists. If not, and you have already performed an initial sign in + (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the O(master_password) + is required. You may optionally specify O(subdomain) in this scenario, otherwise the last used subdomain is used by C(op). + - This lookup can perform an initial login by providing O(subdomain), O(username), O(secret_key), and O(master_password). + - Can target a specific account by providing the O(account_id). + - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal + credentials needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or + greater in strength to the 1Password master password. + - This lookup stores potentially sensitive data from 1Password as Ansible facts. Facts are subject to caching if enabled, + which means this data could be stored in clear text on disk or in a database. + - Tested with C(op) version 2.7.2. +""" diff --git a/plugins/doc_fragments/oneview.py b/plugins/doc_fragments/oneview.py index 0d385e99aa..9e64f02e1a 100644 --- a/plugins/doc_fragments/oneview.py +++ b/plugins/doc_fragments/oneview.py @@ -1,79 +1,75 @@ -# -*- coding: utf-8 -*- # -# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # OneView doc fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - config: - description: - - Path to a .json configuration file containing the OneView client configuration. - The configuration file is optional and when used should be present in the host running the ansible commands. - If the file path is not provided, the configuration will be loaded from environment variables. - For links to example configuration files or how to use the environment variables verify the notes section. - type: path - api_version: - description: - - OneView API Version. - type: int - image_streamer_hostname: - description: - - IP address or hostname for the HPE Image Streamer REST API. - type: str - hostname: - description: - - IP address or hostname for the appliance. - type: str - username: - description: - - Username for API authentication. - type: str - password: - description: - - Password for API authentication. - type: str + config: + description: + - Path to a JSON configuration file containing the OneView client configuration. The configuration file is optional + and when used should be present in the host running the ansible commands. If the file path is not provided, the configuration + is loaded from environment variables. For links to example configuration files or how to use the environment variables + verify the notes section. + type: path + api_version: + description: + - OneView API Version. + type: int + image_streamer_hostname: + description: + - IP address or hostname for the HPE Image Streamer REST API. + type: str + hostname: + description: + - IP address or hostname for the appliance. + type: str + username: + description: + - Username for API authentication. + type: str + password: + description: + - Password for API authentication. + type: str requirements: - - python >= 2.7.9 + - Python >= 2.7.9 notes: - - "A sample configuration file for the config parameter can be found at: - U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json)" - - "Check how to use environment variables for configuration at: - U(https://github.com/HewlettPackard/oneview-ansible#environment-variables)" - - "Additional Playbooks for the HPE OneView Ansible modules can be found at: - U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples)" - - "The OneView API version used will directly affect returned and expected fields in resources. - Information on setting the desired API version and can be found at: - U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version)" - ''' + - 'A sample configuration file for the config parameter can be found at: + U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json).' + - 'Check how to use environment variables for configuration at: U(https://github.com/HewlettPackard/oneview-ansible#environment-variables).' + - 'Additional Playbooks for the HPE OneView Ansible modules can be found at: U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples).' + - 'The OneView API version used directly affects returned and expected fields in resources. Information on setting the desired + API version and can be found at: U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version).' +""" - VALIDATEETAG = r''' + VALIDATEETAG = r""" options: - validate_etag: - description: - - When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag - for the resource matches the ETag provided in the data. - type: bool - default: yes -''' + validate_etag: + description: + - When the ETag Validation is enabled, the request is conditionally processed only if the current ETag for the resource + matches the ETag provided in the data. + type: bool + default: true +""" - FACTSPARAMS = r''' + FACTSPARAMS = r""" options: - params: - description: - - List of params to delimit, filter and sort the list of resources. - - "params allowed: - - C(start): The first item to return, using 0-based indexing. - - C(count): The number of resources to return. - - C(filter): A general filter/query string to narrow the list of items returned. - - C(sort): The sort order of the returned data set." - type: dict -''' + params: + description: + - List of parameters to delimit, filter and sort the list of resources. + - 'Parameter keys allowed are:' + - 'V(start): The first item to return, using 0-based indexing.' + - 'V(count): The number of resources to return.' + - 'V(filter): A general filter/query string to narrow the list of items returned.' + - 'V(sort): The sort order of the returned data set.' + type: dict +""" diff --git a/plugins/doc_fragments/online.py b/plugins/doc_fragments/online.py index 4ad35bab20..c2b130e7a0 100644 --- a/plugins/doc_fragments/online.py +++ b/plugins/doc_fragments/online.py @@ -1,44 +1,41 @@ -# -*- coding: utf-8 -*- +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: api_token: description: - Online OAuth token. type: str required: true - aliases: [ oauth_token ] + aliases: [oauth_token] api_url: description: - - Online API URL + - Online API URL. type: str default: 'https://api.online.net' - aliases: [ base_url ] + aliases: [base_url] api_timeout: description: - HTTP timeout to Online API in seconds. type: int default: 30 - aliases: [ timeout ] + aliases: [timeout] validate_certs: description: - Validate SSL certs of the Online API. type: bool - default: yes + default: true notes: - - Also see the API documentation on U(https://console.online.net/en/api/) - - If C(api_token) is not set within the module, the following - environment variables can be used in decreasing order of precedence - C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN) - - If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL) - environment variable. -''' + - Also see the API documentation on U(https://console.online.net/en/api/). + - If O(api_token) is not set within the module, the following environment variables can be used in decreasing order of precedence + E(ONLINE_TOKEN), E(ONLINE_API_KEY), E(ONLINE_OAUTH_TOKEN), E(ONLINE_API_TOKEN). + - If one wants to use a different O(api_url) one can also set the E(ONLINE_API_URL) environment variable. +""" diff --git a/plugins/doc_fragments/opennebula.py b/plugins/doc_fragments/opennebula.py index 08b614a6fc..72ccf7d70d 100644 --- a/plugins/doc_fragments/opennebula.py +++ b/plugins/doc_fragments/opennebula.py @@ -1,44 +1,43 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, www.privaz.io Valletech AB -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, www.privaz.io Valletech AB +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # OpenNebula common documentation - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - api_url: - description: - - The ENDPOINT URL of the XMLRPC server. - - If not specified then the value of the ONE_URL environment variable, if any, is used. - type: str - aliases: - - api_endpoint - api_username: - description: - - The name of the user for XMLRPC authentication. - - If not specified then the value of the ONE_USERNAME environment variable, if any, is used. - type: str - api_password: - description: - - The password or token for XMLRPC authentication. - - If not specified then the value of the ONE_PASSWORD environment variable, if any, is used. - type: str - aliases: - - api_token - validate_certs: - description: - - Whether to validate the SSL certificates or not. - - This parameter is ignored if PYTHONHTTPSVERIFY environment variable is used. - type: bool - default: yes - wait_timeout: - description: - - Time to wait for the desired state to be reached before timeout, in seconds. - type: int - default: 300 -''' + api_url: + description: + - The ENDPOINT URL of the XMLRPC server. + - If not specified then the value of the E(ONE_URL) environment variable, if any, is used. + type: str + aliases: + - api_endpoint + api_username: + description: + - The name of the user for XMLRPC authentication. + - If not specified then the value of the E(ONE_USERNAME) environment variable, if any, is used. + type: str + api_password: + description: + - The password or token for XMLRPC authentication. + - If not specified then the value of the E(ONE_PASSWORD) environment variable, if any, is used. + type: str + aliases: + - api_token + validate_certs: + description: + - Whether to validate the TLS/SSL certificates or not. + - This parameter is ignored if E(PYTHONHTTPSVERIFY) environment variable is used. + type: bool + default: true + wait_timeout: + description: + - Time to wait for the desired state to be reached before timeout, in seconds. + type: int + default: 300 +""" diff --git a/plugins/doc_fragments/openswitch.py b/plugins/doc_fragments/openswitch.py index 7ab7c15540..aac90e020f 100644 --- a/plugins/doc_fragments/openswitch.py +++ b/plugins/doc_fragments/openswitch.py @@ -1,84 +1,69 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2015, Peter Sprygada -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Peter Sprygada +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: host: description: - - Specifies the DNS host name or address for connecting to the remote - device over the specified transport. The value of host is used as - the destination address for the transport. Note this argument - does not affect the SSH argument. + - Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value + of host is used as the destination address for the transport. Note this argument does not affect the SSH argument. type: str port: description: - - Specifies the port to use when building the connection to the remote - device. This value applies to either I(cli) or I(rest). The port - value will default to the appropriate transport common port if - none is provided in the task. (cli=22, http=80, https=443). Note - this argument does not affect the SSH transport. + - Specifies the port to use when building the connection to the remote device. This value applies to either O(transport=cli) + or O(transport=rest). The port value defaults to the appropriate transport common port if none is provided in the + task. (cli=22, http=80, https=443). Note this argument does not affect the SSH transport. type: int default: 0 (use common port) username: description: - - Configures the username to use to authenticate the connection to - the remote device. This value is used to authenticate - either the CLI login or the eAPI authentication depending on which - transport is used. Note this argument does not affect the SSH - transport. If the value is not specified in the task, the value of - environment variable C(ANSIBLE_NET_USERNAME) will be used instead. + - Configures the username to use to authenticate the connection to the remote device. This value is used to authenticate + either the CLI login or the eAPI authentication depending on which transport is used. Note this argument does not + affect the SSH transport. If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_USERNAME) + is used instead. type: str password: description: - - Specifies the password to use to authenticate the connection to - the remote device. This is a common argument used for either I(cli) - or I(rest) transports. Note this argument does not affect the SSH - transport. If the value is not specified in the task, the value of - environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. + - Specifies the password to use to authenticate the connection to the remote device. This is a common argument used + for either O(transport=cli) or O(transport=rest). Note this argument does not affect the SSH transport. If the value + is not specified in the task, the value of environment variable E(ANSIBLE_NET_PASSWORD) is used instead. type: str timeout: description: - - Specifies the timeout in seconds for communicating with the network device - for either connecting or sending commands. If the timeout is - exceeded before the operation is completed, the module will error. + - Specifies the timeout in seconds for communicating with the network device for either connecting or sending commands. + If the timeout is exceeded before the operation is completed, the module fails. type: int default: 10 ssh_keyfile: description: - - Specifies the SSH key to use to authenticate the connection to - the remote device. This argument is only used for the I(cli) - transports. If the value is not specified in the task, the value of - environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. + - Specifies the SSH key to use to authenticate the connection to the remote device. This argument is only used for O(transport=cli). + If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_SSH_KEYFILE) is used instead. type: path transport: description: - - Configures the transport connection to use when connecting to the - remote device. The transport argument supports connectivity to the - device over ssh, cli or REST. + - Configures the transport connection to use when connecting to the remote device. The transport argument supports connectivity + to the device over SSH (V(ssh)), CLI (V(cli)), or REST (V(rest)). required: true type: str - choices: [ cli, rest, ssh ] + choices: [cli, rest, ssh] default: ssh use_ssl: description: - - Configures the I(transport) to use SSL if set to C(yes) only when the - I(transport) argument is configured as rest. If the transport - argument is not I(rest), this value is ignored. + - Configures the O(transport) to use SSL if set to V(true) only when the O(transport) argument is configured as rest. + If the transport argument is not V(rest), this value is ignored. type: bool - default: yes + default: true provider: description: - - Convenience method that allows all I(openswitch) arguments to be passed as - a dict object. All constraints (required, choices, etc) must be - met either by individual arguments or values in this dict. + - Convenience method that allows all C(openswitch) arguments to be passed as a dict object. All constraints (required, + choices, and so on) must be met either by individual arguments or values in this dict. type: dict -''' +""" diff --git a/plugins/doc_fragments/oracle.py b/plugins/doc_fragments/oracle.py index 94999c04ec..05120f7aa3 100644 --- a/plugins/doc_fragments/oracle.py +++ b/plugins/doc_fragments/oracle.py @@ -1,83 +1,80 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - requirements: - - "python >= 2.7" - - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io) - notes: - - For OCI python sdk configuration, please refer to - U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html) - options: - config_file_location: - description: - - Path to configuration file. If not set then the value of the OCI_CONFIG_FILE environment variable, - if any, is used. Otherwise, defaults to ~/.oci/config. - type: str - config_profile_name: - description: - - The profile to load from the config file referenced by C(config_file_location). If not set, then the - value of the OCI_CONFIG_PROFILE environment variable, if any, is used. Otherwise, defaults to the - "DEFAULT" profile in C(config_file_location). - default: "DEFAULT" - type: str - api_user: - description: - - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the - value of the OCI_USER_OCID environment variable, if any, is used. This option is required if the user - is not specified through a configuration file (See C(config_file_location)). To get the user's OCID, - please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). - type: str - api_user_fingerprint: - description: - - Fingerprint for the key pair being used. If not set, then the value of the OCI_USER_FINGERPRINT - environment variable, if any, is used. This option is required if the key fingerprint is not - specified through a configuration file (See C(config_file_location)). To get the key pair's - fingerprint value please refer - U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). - type: str - api_user_key_file: - description: - - Full path and filename of the private key (in PEM format). If not set, then the value of the - OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is - not specified through a configuration file (See C(config_file_location)). If the key is encrypted - with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided. - type: path - api_user_key_pass_phrase: - description: - - Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then - the value of the OCI_USER_KEY_PASS_PHRASE variable, if any, is used. This option is required if the - key passphrase is not specified through a configuration file (See C(config_file_location)). - type: str - auth_type: - description: - - The type of authentication to use for making API requests. By default C(auth_type="api_key") based - authentication is performed and the API key (see I(api_user_key_file)) in your config file will be - used. If this 'auth_type' module option is not specified, the value of the OCI_ANSIBLE_AUTH_TYPE, - if any, is used. Use C(auth_type="instance_principal") to use instance principal based authentication - when running ansible playbooks within an OCI compute instance. - choices: ['api_key', 'instance_principal'] - default: 'api_key' - type: str - tenancy: - description: - - OCID of your tenancy. If not set, then the value of the OCI_TENANCY variable, if any, is - used. This option is required if the tenancy OCID is not specified through a configuration file - (See C(config_file_location)). To get the tenancy OCID, please refer - U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm) - type: str - region: - description: - - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the - value of the OCI_REGION variable, if any, is used. This option is required if the region is - not specified through a configuration file (See C(config_file_location)). Please refer to - U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) for more information - on OCI regions. - type: str - """ + DOCUMENTATION = r""" +requirements: + - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io) +notes: + - For OCI Python SDK configuration, please refer to U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html). +options: + config_file_location: + description: + - Path to configuration file. If not set then the value of the E(OCI_CONFIG_FILE) environment variable, if any, is used. + Otherwise, defaults to C(~/.oci/config). + type: str + config_profile_name: + description: + - The profile to load from the config file referenced by O(config_file_location). If not set, then the value of the + E(OCI_CONFIG_PROFILE) environment variable, if any, is used. Otherwise, defaults to the C(DEFAULT) profile in O(config_file_location). + default: "DEFAULT" + type: str + api_user: + description: + - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the value of the E(OCI_USER_OCID) environment + variable, if any, is used. This option is required if the user is not specified through a configuration file (See + O(config_file_location)). To get the user's OCID, please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). + type: str + api_user_fingerprint: + description: + - Fingerprint for the key pair being used. If not set, then the value of the E(OCI_USER_FINGERPRINT) environment variable, + if any, is used. This option is required if the key fingerprint is not specified through a configuration file (See + O(config_file_location)). To get the key pair's fingerprint value please refer to + U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). + type: str + api_user_key_file: + description: + - Full path and filename of the private key (in PEM format). If not set, then the value of the E(OCI_USER_KEY_FILE) + variable, if any, is used. This option is required if the private key is not specified through a configuration file + (See O(config_file_location)). If the key is encrypted with a pass-phrase, the O(api_user_key_pass_phrase) option + must also be provided. + type: path + api_user_key_pass_phrase: + description: + - Passphrase used by the key referenced in O(api_user_key_file), if it is encrypted. If not set, then the value of the + E(OCI_USER_KEY_PASS_PHRASE) variable, if any, is used. This option is required if the key passphrase is not specified + through a configuration file (See O(config_file_location)). + type: str + auth_type: + description: + - The type of authentication to use for making API requests. By default O(auth_type=api_key) based authentication is + performed and the API key (see O(api_user_key_file)) in your config file is used. If O(auth_type) is not specified, + the value of the E(OCI_ANSIBLE_AUTH_TYPE), if any, is used. Use O(auth_type=instance_principal) to use instance principal + based authentication when running ansible playbooks within an OCI compute instance. + choices: ['api_key', 'instance_principal'] + default: 'api_key' + type: str + tenancy: + description: + - OCID of your tenancy. If not set, then the value of the E(OCI_TENANCY) variable, if any, is used. This option is required + if the tenancy OCID is not specified through a configuration file (See O(config_file_location)). To get the tenancy + OCID, please refer to U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). + type: str + region: + description: + - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the value of the E(OCI_REGION) + variable, if any, is used. This option is required if the region is not specified through a configuration file (See + O(config_file_location)). Please refer to U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) + for more information on OCI regions. + type: str +""" diff --git a/plugins/doc_fragments/oracle_creatable_resource.py b/plugins/doc_fragments/oracle_creatable_resource.py index 211ca6f9c1..1728e56d81 100644 --- a/plugins/doc_fragments/oracle_creatable_resource.py +++ b/plugins/doc_fragments/oracle_creatable_resource.py @@ -1,25 +1,29 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - force_create: - description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an - idempotent operation, and doesn't create the resource if it already exists. Setting this option - to true, forcefully creates a copy of the resource, even if it already exists.This option is - mutually exclusive with I(key_by). - default: False - type: bool - key_by: - description: The list of comma-separated attributes of this resource which should be used to uniquely - identify an instance of the resource. By default, all the attributes of a resource except - I(freeform_tags) are used to uniquely identify a resource. - type: list - elements: str - """ + DOCUMENTATION = r""" +options: + force_create: + description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an idempotent operation, + and does not create the resource if it already exists. Setting this option to V(true), forcefully creates a copy of + the resource, even if it already exists. This option is mutually exclusive with O(key_by). + default: false + type: bool + key_by: + description: The list of comma-separated attributes of this resource which should be used to uniquely identify an instance + of the resource. By default, all the attributes of a resource except O(freeform_tags) are used to uniquely identify + a resource. + type: list + elements: str +""" diff --git a/plugins/doc_fragments/oracle_display_name_option.py b/plugins/doc_fragments/oracle_display_name_option.py index ff70d45dd9..1ac210bbd4 100644 --- a/plugins/doc_fragments/oracle_display_name_option.py +++ b/plugins/doc_fragments/oracle_display_name_option.py @@ -1,16 +1,21 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - display_name: - description: Use I(display_name) along with the other options to return only resources that match the given - display name exactly. - type: str - """ + DOCUMENTATION = r""" +options: + display_name: + description: Use O(display_name) along with the other options to return only resources that match the given display name + exactly. + type: str +""" diff --git a/plugins/doc_fragments/oracle_name_option.py b/plugins/doc_fragments/oracle_name_option.py index 8c4f9c1e39..a281bc5e68 100644 --- a/plugins/doc_fragments/oracle_name_option.py +++ b/plugins/doc_fragments/oracle_name_option.py @@ -1,16 +1,20 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - name: - description: Use I(name) along with the other options to return only resources that match the given name - exactly. - type: str - """ + DOCUMENTATION = r""" +options: + name: + description: Use O(name) along with the other options to return only resources that match the given name exactly. + type: str +""" diff --git a/plugins/doc_fragments/oracle_tags.py b/plugins/doc_fragments/oracle_tags.py index f95b22c8ed..ec0096ba33 100644 --- a/plugins/doc_fragments/oracle_tags.py +++ b/plugins/doc_fragments/oracle_tags.py @@ -1,22 +1,25 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - defined_tags: - description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more - information, see - U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). - type: dict - freeform_tags: - description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, - type, or namespace. For more information, see - U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). - type: dict - """ + DOCUMENTATION = r""" +options: + defined_tags: + description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see + U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). + type: dict + freeform_tags: + description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. + For more information, see U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). + type: dict +""" diff --git a/plugins/doc_fragments/oracle_wait_options.py b/plugins/doc_fragments/oracle_wait_options.py index 0312755ffa..868fb3cb04 100644 --- a/plugins/doc_fragments/oracle_wait_options.py +++ b/plugins/doc_fragments/oracle_wait_options.py @@ -1,26 +1,30 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - wait: - description: Whether to wait for create or delete operation to complete. - default: yes - type: bool - wait_timeout: - description: Time, in seconds, to wait when I(wait=yes). - default: 1200 - type: int - wait_until: - description: The lifecycle state to wait for the resource to transition into when I(wait=yes). By default, - when I(wait=yes), we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/ - RUNNING applicable lifecycle state during create operation & to get into DELETED/DETACHED/ - TERMINATED lifecycle state during delete operation. - type: str - """ + DOCUMENTATION = r""" +options: + wait: + description: Whether to wait for create or delete operation to complete. + default: true + type: bool + wait_timeout: + description: Time, in seconds, to wait when O(wait=true). + default: 1200 + type: int + wait_until: + description: The lifecycle state to wait for the resource to transition into when O(wait=true). By default, when O(wait=true), + we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/ RUNNING applicable lifecycle state during + create operation and to get into DELETED/DETACHED/ TERMINATED lifecycle state during delete operation. + type: str +""" diff --git a/plugins/doc_fragments/pipx.py b/plugins/doc_fragments/pipx.py new file mode 100644 index 0000000000..70a502ddda --- /dev/null +++ b/plugins/doc_fragments/pipx.py @@ -0,0 +1,40 @@ + +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + DOCUMENTATION = r""" +options: + global: + description: + - The module passes the C(--global) argument to C(pipx), to execute actions in global scope. + type: bool + default: false + executable: + description: + - Path to the C(pipx) installed in the system. + - If not specified, the module uses C(python -m pipx) to run the tool, using the same Python interpreter as ansible + itself. + type: path +requirements: + - This module requires C(pipx) version 1.7.0 or above. + - Please note that C(pipx) 1.7.0 requires Python 3.8 or above. + - Please note that C(pipx) 1.8.0 requires Python 3.9 or above. +notes: + - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). + - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module, meaning + that C(python -m pipx) must work. + - This module honors C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) passed using + the R(environment Ansible keyword, playbooks_environment). + - This module disabled emojis in the output of C(pipx) commands to reduce clutter. In C(pipx) 1.8.0, the environment variable + E(USE_EMOJI) was renamed to E(PIPX_USE_EMOJI) and for compatibility with both versions, starting in community.general + 11.4.0, this module sets them both to C(0) to disable emojis. +seealso: + - name: C(pipx) command manual page + description: Manual page for the command. + link: https://pipx.pypa.io/latest/docs/ +""" diff --git a/plugins/doc_fragments/pritunl.py b/plugins/doc_fragments/pritunl.py index e2eaff2889..17e03fc716 100644 --- a/plugins/doc_fragments/pritunl.py +++ b/plugins/doc_fragments/pritunl.py @@ -1,43 +1,37 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Florian Dambrine -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function - -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): DOCUMENTATION = r""" options: - pritunl_url: - type: str - required: true - description: - - URL and port of the Pritunl server on which the API is enabled. - - pritunl_api_token: - type: str - required: true - description: - - API Token of a Pritunl admin user. - - It needs to be enabled in Administrators > USERNAME > Enable Token Authentication. - - pritunl_api_secret: - type: str - required: true - description: - - API Secret found in Administrators > USERNAME > API Secret. - - validate_certs: - type: bool - required: false - default: true - description: - - If certificates should be validated or not. - - This should never be set to C(false), except if you are very sure that - your connection to the server can not be subject to a Man In The Middle - attack. + pritunl_url: + type: str + required: true + description: + - URL and port of the Pritunl server on which the API is enabled. + pritunl_api_token: + type: str + required: true + description: + - API Token of a Pritunl admin user. + - It needs to be enabled in Administrators > USERNAME > Enable Token Authentication. + pritunl_api_secret: + type: str + required: true + description: + - API Secret found in Administrators > USERNAME > API Secret. + validate_certs: + type: bool + required: false + default: true + description: + - If certificates should be validated or not. + - This should never be set to V(false), except if you are very sure that your connection to the server can not be subject + to a Man In The Middle attack. """ diff --git a/plugins/doc_fragments/proxmox.py b/plugins/doc_fragments/proxmox.py deleted file mode 100644 index 165a78527a..0000000000 --- a/plugins/doc_fragments/proxmox.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - # Common parameters for Proxmox VE modules - DOCUMENTATION = r''' -options: - api_host: - description: - - Specify the target host of the Proxmox VE cluster. - type: str - required: true - api_user: - description: - - Specify the user to authenticate with. - type: str - required: true - api_password: - description: - - Specify the password to authenticate with. - - You can use C(PROXMOX_PASSWORD) environment variable. - type: str - api_token_id: - description: - - Specify the token ID. - type: str - version_added: 1.3.0 - api_token_secret: - description: - - Specify the token secret. - type: str - version_added: 1.3.0 - validate_certs: - description: - - If C(no), SSL certificates will not be validated. - - This should only be used on personally controlled sites using self-signed certificates. - type: bool - default: no -requirements: [ "proxmoxer", "requests" ] -''' - - SELECTION = r''' -options: - vmid: - description: - - Specifies the instance ID. - - If not set the next available ID will be fetched from ProxmoxAPI. - type: int - node: - description: - - Proxmox VE node on which to operate. - - Only required for I(state=present). - - For every other states it will be autodiscovered. - type: str - pool: - description: - - Add the new VM to the specified pool. - type: str -''' diff --git a/plugins/doc_fragments/purestorage.py b/plugins/doc_fragments/purestorage.py deleted file mode 100644 index f35f026711..0000000000 --- a/plugins/doc_fragments/purestorage.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017, Simon Dodsley -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard Pure Storage documentation fragment - DOCUMENTATION = r''' -options: - - See separate platform section for more details -requirements: - - See separate platform section for more details -notes: - - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade -''' - - # Documentation fragment for FlashBlade - FB = r''' -options: - fb_url: - description: - - FlashBlade management IP address or Hostname. - type: str - api_token: - description: - - FlashBlade API token for admin privileged user. - type: str -notes: - - This module requires the C(purity_fb) Python library - - You must set C(PUREFB_URL) and C(PUREFB_API) environment variables - if I(fb_url) and I(api_token) arguments are not passed to the module directly -requirements: - - python >= 2.7 - - purity_fb >= 1.1 -''' - - # Documentation fragment for FlashArray - FA = r''' -options: - fa_url: - description: - - FlashArray management IPv4 address or Hostname. - type: str - required: true - api_token: - description: - - FlashArray API token for admin privileged user. - type: str - required: true -notes: - - This module requires the C(purestorage) Python library - - You must set C(PUREFA_URL) and C(PUREFA_API) environment variables - if I(fa_url) and I(api_token) arguments are not passed to the module directly -requirements: - - python >= 2.7 - - purestorage -''' diff --git a/plugins/doc_fragments/rackspace.py b/plugins/doc_fragments/rackspace.py deleted file mode 100644 index 0f57dd8899..0000000000 --- a/plugins/doc_fragments/rackspace.py +++ /dev/null @@ -1,117 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Matt Martz -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard Rackspace only documentation fragment - DOCUMENTATION = r''' -options: - api_key: - description: - - Rackspace API key, overrides I(credentials). - type: str - aliases: [ password ] - credentials: - description: - - File to find the Rackspace credentials in. Ignored if I(api_key) and - I(username) are provided. - type: path - aliases: [ creds_file ] - env: - description: - - Environment as configured in I(~/.pyrax.cfg), - see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration). - type: str - region: - description: - - Region to create an instance in. - type: str - username: - description: - - Rackspace username, overrides I(credentials). - type: str - validate_certs: - description: - - Whether or not to require SSL validation of API endpoints. - type: bool - aliases: [ verify_ssl ] -requirements: - - python >= 2.6 - - pyrax -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) -''' - - # Documentation fragment including attributes to enable communication - # of other OpenStack clouds. Not all rax modules support this. - OPENSTACK = r''' -options: - api_key: - type: str - description: - - Rackspace API key, overrides I(credentials). - aliases: [ password ] - auth_endpoint: - type: str - description: - - The URI of the authentication service. - - If not specified will be set to U(https://identity.api.rackspacecloud.com/v2.0/) - credentials: - type: path - description: - - File to find the Rackspace credentials in. Ignored if I(api_key) and - I(username) are provided. - aliases: [ creds_file ] - env: - type: str - description: - - Environment as configured in I(~/.pyrax.cfg), - see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration). - identity_type: - type: str - description: - - Authentication mechanism to use, such as rackspace or keystone. - default: rackspace - region: - type: str - description: - - Region to create an instance in. - tenant_id: - type: str - description: - - The tenant ID used for authentication. - tenant_name: - type: str - description: - - The tenant name used for authentication. - username: - type: str - description: - - Rackspace username, overrides I(credentials). - validate_certs: - description: - - Whether or not to require SSL validation of API endpoints. - type: bool - aliases: [ verify_ssl ] -requirements: - - python >= 2.6 - - pyrax -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) -''' diff --git a/plugins/doc_fragments/redfish.py b/plugins/doc_fragments/redfish.py new file mode 100644 index 0000000000..ed95eeab83 --- /dev/null +++ b/plugins/doc_fragments/redfish.py @@ -0,0 +1,35 @@ + +# Copyright (c) 2025 Ansible community +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + + # Use together with the community.general.redfish module utils' REDFISH_COMMON_ARGUMENT_SPEC + DOCUMENTATION = r""" +options: + validate_certs: + description: + - If V(false), TLS/SSL certificates are not validated. + - Set this to V(true) to enable certificate checking. Should be used together with O(ca_path). + type: bool + default: false + ca_path: + description: + - PEM formatted file that contains a CA certificate to be used for validation. + - Only used if O(validate_certs=true). + type: path + ciphers: + required: false + description: + - TLS/SSL Ciphers to use for the request. + - When a list is provided, all ciphers are joined in order with V(:). + - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) + for more details. + - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. + type: list + elements: str +""" diff --git a/plugins/doc_fragments/redis.py b/plugins/doc_fragments/redis.py index e7af25ec8f..38889a3cbd 100644 --- a/plugins/doc_fragments/redis.py +++ b/plugins/doc_fragments/redis.py @@ -1,15 +1,14 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Andreas Botzner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Common parameters for Redis modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: login_host: description: @@ -39,19 +38,26 @@ options: validate_certs: description: - Specify whether or not to validate TLS certificates. - - This should only be turned off for personally controlled sites or with - C(localhost) as target. + - This should only be turned off for personally controlled sites or with C(localhost) as target. type: bool default: true ca_certs: description: - - Path to root certificates file. If not set and I(tls) is - set to C(true), certifi ca-certificates will be used. + - Path to root certificates file. If not set and O(tls) is set to V(true), certifi's CA certificates are used. type: str -requirements: [ "redis", "certifi" ] + client_cert_file: + description: + - Path to the client certificate file. + type: str + version_added: 9.3.0 + client_key_file: + description: + - Path to the client private key file. + type: str + version_added: 9.3.0 +requirements: ["redis", "certifi"] notes: - - Requires the C(redis) Python package on the remote host. You can - install it with pip (C(pip install redis)) or with a package manager. - Information on the library can be found at U(https://github.com/andymccurdy/redis-py). -''' + - Requires the C(redis) Python package on the remote host. You can install it with pip (C(pip install redis)) or with a + package manager. Information on the library can be found at U(https://github.com/andymccurdy/redis-py). +""" diff --git a/plugins/doc_fragments/rundeck.py b/plugins/doc_fragments/rundeck.py index 056a54f37f..3e9d99aa7a 100644 --- a/plugins/doc_fragments/rundeck.py +++ b/plugins/doc_fragments/rundeck.py @@ -1,16 +1,15 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Phillipe Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Phillipe Smith +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: url: type: str @@ -28,4 +27,4 @@ options: description: - Rundeck User API Token. required: true -''' +""" diff --git a/plugins/doc_fragments/scaleway.py b/plugins/doc_fragments/scaleway.py index c1e1b13d9d..7810deb901 100644 --- a/plugins/doc_fragments/scaleway.py +++ b/plugins/doc_fragments/scaleway.py @@ -1,50 +1,57 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: api_token: description: - Scaleway OAuth token. type: str required: true - aliases: [ oauth_token ] + aliases: [oauth_token] api_url: description: - Scaleway API URL. type: str default: https://api.scaleway.com - aliases: [ base_url ] + aliases: [base_url] api_timeout: description: - HTTP timeout to Scaleway API in seconds. type: int default: 30 - aliases: [ timeout ] + aliases: [timeout] query_parameters: description: - - List of parameters passed to the query string. + - List of parameters passed to the query string. type: dict default: {} validate_certs: description: - Validate SSL certs of the Scaleway API. type: bool - default: yes + default: true notes: - - Also see the API documentation on U(https://developer.scaleway.com/) - - If C(api_token) is not set within the module, the following - environment variables can be used in decreasing order of precedence - C(SCW_TOKEN), C(SCW_API_KEY), C(SCW_OAUTH_TOKEN) or C(SCW_API_TOKEN). - - If one wants to use a different C(api_url) one can also set the C(SCW_API_URL) - environment variable. -''' + - Also see the API documentation on U(https://developer.scaleway.com/). + - If O(api_token) is not set within the module, the following environment variables can be used in decreasing order of precedence + E(SCW_TOKEN), E(SCW_API_KEY), E(SCW_OAUTH_TOKEN) or E(SCW_API_TOKEN). + - If one wants to use a different O(api_url) one can also set the E(SCW_API_URL) environment variable. +""" + + ACTIONGROUP_SCALEWAY = r""" +options: {} +attributes: + action_group: + description: Use C(group/community.general.scaleway) in C(module_defaults) to set defaults for this module. + support: full + membership: + - community.general.scaleway +""" diff --git a/plugins/doc_fragments/scaleway_waitable_resource.py b/plugins/doc_fragments/scaleway_waitable_resource.py new file mode 100644 index 0000000000..2a14c7571e --- /dev/null +++ b/plugins/doc_fragments/scaleway_waitable_resource.py @@ -0,0 +1,31 @@ + +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + + # Standard documentation fragment + DOCUMENTATION = r""" +options: + wait: + description: + - Wait for the resource to reach its desired state before returning. + type: bool + default: true + wait_timeout: + type: int + description: + - Time to wait for the resource to reach the expected state. + required: false + default: 300 + wait_sleep_time: + type: int + description: + - Time to wait before every attempt to check the state of the resource. + required: false + default: 3 +""" diff --git a/plugins/doc_fragments/utm.py b/plugins/doc_fragments/utm.py index 413fb49675..831f4ccc96 100644 --- a/plugins/doc_fragments/utm.py +++ b/plugins/doc_fragments/utm.py @@ -1,54 +1,55 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Johannes Brunswicker -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Johannes Brunswicker +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - headers: - description: - - A dictionary of additional headers to be sent to POST and PUT requests. - - Is needed for some modules - type: dict - required: false - utm_host: - description: - - The REST Endpoint of the Sophos UTM. - type: str - required: true - utm_port: - description: - - The port of the REST interface. - type: int - default: 4444 - utm_token: - description: - - "The token used to identify at the REST-API. See U(https://www.sophos.com/en-us/medialibrary/\ - PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter 2.4.2." - type: str - required: true - utm_protocol: - description: - - The protocol of the REST Endpoint. - choices: [ http, https ] - type: str - default: https - validate_certs: - description: - - Whether the REST interface's ssl certificate should be verified or not. - type: bool - default: yes - state: - description: - - The desired state of the object. - - C(present) will create or update an object - - C(absent) will delete an object if it was present - type: str - choices: [ absent, present ] - default: present -''' + headers: + description: + - A dictionary of additional headers to be sent to POST and PUT requests. + - Is needed for some modules. + type: dict + required: false + default: {} + utm_host: + description: + - The REST Endpoint of the Sophos UTM. + type: str + required: true + utm_port: + description: + - The port of the REST interface. + type: int + default: 4444 + utm_token: + description: + - The token used to identify at the REST-API. + - See U(https://www.sophos.com/en-us/medialibrary/PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter + 2.4.2. + type: str + required: true + utm_protocol: + description: + - The protocol of the REST Endpoint. + choices: [http, https] + type: str + default: https + validate_certs: + description: + - Whether the REST interface's SSL certificate should be verified or not. + type: bool + default: true + state: + description: + - The desired state of the object. + - V(present) creates or updates an object. + - V(absent) deletes an object if present. + type: str + choices: [absent, present] + default: present +""" diff --git a/plugins/doc_fragments/vexata.py b/plugins/doc_fragments/vexata.py index d541d5ad85..3ca6684469 100644 --- a/plugins/doc_fragments/vexata.py +++ b/plugins/doc_fragments/vexata.py @@ -1,23 +1,13 @@ -# -*- coding: utf-8 -*- # -# Copyright: (c) 2019, Sandeep Kasargod -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Sandeep Kasargod +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): - DOCUMENTATION = r''' -options: - - See respective platform section for more details -requirements: - - See respective platform section for more details -notes: - - Ansible modules are available for Vexata VX100 arrays. -''' - # Documentation fragment for Vexata VX100 series VX100 = r''' options: @@ -29,25 +19,26 @@ options: user: description: - Vexata API user with administrative privileges. + - Uses the E(VEXATA_USER) environment variable as a fallback. required: false type: str password: description: - Vexata API user password. + - Uses the E(VEXATA_PASSWORD) environment variable as a fallback. required: false type: str validate_certs: description: - - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted. - - If set to C(yes), please make sure Python >= 2.7.9 is installed on the given machine. + - Allows connection when SSL certificates are not valid. Set to V(false) when certificates are not trusted. + - If set to V(true), please make sure Python >= 2.7.9 is installed on the given machine. required: false type: bool - default: 'no' + default: false requirements: - Vexata VX100 storage array with VXOS >= v3.5.0 on storage array - vexatapi >= 0.0.1 - - python >= 2.7 - - VEXATA_USER and VEXATA_PASSWORD environment variables must be set if + - E(VEXATA_USER) and E(VEXATA_PASSWORD) environment variables must be set if user and password arguments are not passed to the module directly. ''' diff --git a/plugins/doc_fragments/xenserver.py b/plugins/doc_fragments/xenserver.py index 747bf02f1b..7da1391420 100644 --- a/plugins/doc_fragments/xenserver.py +++ b/plugins/doc_fragments/xenserver.py @@ -1,40 +1,39 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Bojan Vitnik -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Bojan Vitnik +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Common parameters for XenServer modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: hostname: description: - - The hostname or IP address of the XenServer host or XenServer pool master. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_HOST) will be used instead. + - The hostname or IP address of the XenServer host or XenServer pool master. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_HOST) is used instead. type: str default: localhost - aliases: [ host, pool ] + aliases: [host, pool] username: description: - - The username to use for connecting to XenServer. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_USER) will be used instead. + - The username to use for connecting to XenServer. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_USER) is used instead. type: str default: root - aliases: [ admin, user ] + aliases: [admin, user] password: description: - - The password to use for connecting to XenServer. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_PASSWORD) will be used instead. + - The password to use for connecting to XenServer. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_PASSWORD) is used instead. type: str - aliases: [ pass, pwd ] + aliases: [pass, pwd] validate_certs: description: - - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_VALIDATE_CERTS) will be used instead. + - Allows connection when SSL certificates are not valid. Set to V(false) when certificates are not trusted. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_VALIDATE_CERTS) is used instead. type: bool - default: yes -''' + default: true +""" diff --git a/plugins/filter/accumulate.py b/plugins/filter/accumulate.py new file mode 100644 index 0000000000..da784ab12b --- /dev/null +++ b/plugins/filter/accumulate.py @@ -0,0 +1,62 @@ +# Copyright (c) Max Gautier +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION = r""" +name: accumulate +short_description: Produce a list of accumulated sums of the input list contents +version_added: 10.1.0 +author: Max Gautier (@VannTen) +description: + - Passthrough to the L(Python itertools.accumulate function,https://docs.python.org/3/library/itertools.html#itertools.accumulate). + - Transforms an input list into the cumulative list of results from applying addition to the elements of the input list. + - Addition means the default Python implementation of C(+) for input list elements type. +options: + _input: + description: A list. + type: list + elements: any + required: true +""" + +RETURN = r""" +_value: + description: A list of cumulated sums of the elements of the input list. + type: list + elements: any +""" + +EXAMPLES = r""" +- name: Enumerate parent directories of some path + ansible.builtin.debug: + var: > + "/some/path/to/my/file" + | split('/') | map('split', '/') + | community.general.accumulate | map('join', '/') + # Produces: ['', '/some', '/some/path', '/some/path/to', '/some/path/to/my', '/some/path/to/my/file'] + +- name: Growing string + ansible.builtin.debug: + var: "'abc' | community.general.accumulate" + # Produces ['a', 'ab', 'abc'] +""" + +from itertools import accumulate +from collections.abc import Sequence + +from ansible.errors import AnsibleFilterError + + +def list_accumulate(sequence): + if not isinstance(sequence, Sequence): + raise AnsibleFilterError(f'Invalid value type ({type(sequence)}) for accumulate ({sequence!r})') + + return accumulate(sequence) + + +class FilterModule(object): + + def filters(self): + return { + 'accumulate': list_accumulate, + } diff --git a/plugins/filter/counter.py b/plugins/filter/counter.py index 5d7f365f94..f89bfd6d1a 100644 --- a/plugins/filter/counter.py +++ b/plugins/filter/counter.py @@ -1,55 +1,54 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Remy Keil -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: counter - short_description: Counts hashable elements in a sequence - version_added: 4.3.0 - author: Rémy Keil (@keilr) - description: - - Counts hashable elements in a sequence. - options: - _input: - description: A sequence. - type: list - elements: any - required: true -''' +DOCUMENTATION = r""" +name: counter +short_description: Counts hashable elements in a sequence +version_added: 4.3.0 +author: Rémy Keil (@keilr) +description: + - Counts hashable elements in a sequence. +options: + _input: + description: A sequence. + type: list + elements: any + required: true +""" -EXAMPLES = ''' -- name: Count occurences +EXAMPLES = r""" +- name: Count occurrences ansible.builtin.debug: msg: >- {{ [1, 'a', 2, 2, 'a', 'b', 'a'] | community.general.counter }} # Produces: {1: 1, 'a': 3, 2: 2, 'b': 1} -''' +""" -RETURN = ''' - _value: - description: A dictionary with the elements of the sequence as keys, and their number of occurance in the sequence as values. - type: dictionary -''' +RETURN = r""" +_value: + description: A dictionary with the elements of the sequence as keys, and their number of occurrences in the sequence as + values. + type: dictionary +""" from ansible.errors import AnsibleFilterError -from ansible.module_utils.common._collections_compat import Sequence +from collections.abc import Sequence from collections import Counter def counter(sequence): ''' Count elements in a sequence. Returns dict with count result. ''' if not isinstance(sequence, Sequence): - raise AnsibleFilterError('Argument for community.general.counter must be a sequence (string or list). %s is %s' % - (sequence, type(sequence))) + raise AnsibleFilterError(f'Argument for community.general.counter must be a sequence (string or list). {sequence} is {type(sequence)}') try: result = dict(Counter(sequence)) except TypeError as e: raise AnsibleFilterError( - "community.general.counter needs a sequence with hashable elements (int, float or str) - %s" % (e) + f"community.general.counter needs a sequence with hashable elements (int, float or str) - {e}" ) return result diff --git a/plugins/filter/crc32.py b/plugins/filter/crc32.py new file mode 100644 index 0000000000..11a6e77495 --- /dev/null +++ b/plugins/filter/crc32.py @@ -0,0 +1,61 @@ +# Copyright (c) 2022, Julien Riou +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.common.text.converters import to_bytes +from ansible.module_utils.common.collections import is_string + +try: + from zlib import crc32 + HAS_ZLIB = True +except ImportError: + HAS_ZLIB = False + + +DOCUMENTATION = r""" +name: crc32 +short_description: Generate a CRC32 checksum +version_added: 5.4.0 +description: + - Checksum a string using CRC32 algorithm and return its hexadecimal representation. +options: + _input: + description: + - The string to checksum. + type: string + required: true +author: + - Julien Riou +""" + +EXAMPLES = r""" +- name: Checksum a test string + ansible.builtin.debug: + msg: "{{ 'test' | community.general.crc32 }}" +""" + +RETURN = r""" +_value: + description: CRC32 checksum. + type: string +""" + + +def crc32s(value): + if not is_string(value): + raise AnsibleFilterError(f'Invalid value type ({type(value)}) for crc32 ({value!r})') + + if not HAS_ZLIB: + raise AnsibleFilterError('Failed to import zlib module') + + data = to_bytes(value, errors='surrogate_or_strict') + return f"{crc32(data) & 0xffffffff:x}" + + +class FilterModule: + def filters(self): + return { + 'crc32': crc32s, + } diff --git a/plugins/filter/dict.py b/plugins/filter/dict.py index 866e8f8dc2..d2d8bb952c 100644 --- a/plugins/filter/dict.py +++ b/plugins/filter/dict.py @@ -1,27 +1,26 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Felix Fontein -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: dict - short_description: Convert a list of tuples into a dictionary - version_added: 3.0.0 - author: Felix Fontein (@felixfontein) - description: - - Convert a list of tuples into a dictionary. This is a filter version of the C(dict) function. - options: - _input: - description: A list of tuples (with exactly two elements). - type: list - elements: tuple - required: true -''' +DOCUMENTATION = r""" +name: dict +short_description: Convert a list of tuples into a dictionary +version_added: 3.0.0 +author: Felix Fontein (@felixfontein) +description: + - Convert a list of tuples into a dictionary. This is a filter version of the C(dict) function. +options: + _input: + description: A list of tuples (with exactly two elements). + type: list + elements: tuple + required: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Convert list of tuples into dictionary ansible.builtin.set_fact: dictionary: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}" @@ -52,13 +51,13 @@ EXAMPLES = ''' # "k2": 42, # "k3": "b" # } -''' +""" -RETURN = ''' - _value: - description: The dictionary having the provided key-value pairs. - type: boolean -''' +RETURN = r""" +_value: + description: A dictionary with the provided key-value pairs. + type: dictionary +""" def dict_filter(sequence): diff --git a/plugins/filter/dict_kv.py b/plugins/filter/dict_kv.py index 1a0957819e..79c8dd0fe6 100644 --- a/plugins/filter/dict_kv.py +++ b/plugins/filter/dict_kv.py @@ -1,41 +1,40 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2020 Stanislav German-Evtushenko (@giner) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: dict_kv - short_description: Convert a value to a dictionary with a single key-value pair - version_added: 1.3.0 - author: Stanislav German-Evtushenko (@giner) - description: - - Convert a value to a dictionary with a single key-value pair. - positional: key - options: - _input: - description: The value for the single key-value pair. - type: any - required: true - key: - description: The key for the single key-value pair. - type: any - required: true -''' +DOCUMENTATION = r""" +name: dict_kv +short_description: Convert a value to a dictionary with a single key-value pair +version_added: 1.3.0 +author: Stanislav German-Evtushenko (@giner) +description: + - Convert a value to a dictionary with a single key-value pair. +positional: key +options: + _input: + description: The value for the single key-value pair. + type: any + required: true + key: + description: The key for the single key-value pair. + type: any + required: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a one-element dictionary from a value ansible.builtin.debug: msg: "{{ 'myvalue' | dict_kv('mykey') }}" # Produces the dictionary {'mykey': 'myvalue'} -''' +""" -RETURN = ''' - _value: - description: A dictionary with a single key-value pair. - type: dictionary -''' +RETURN = r""" +_value: + description: A dictionary with a single key-value pair. + type: dictionary +""" def dict_kv(value, key): diff --git a/plugins/filter/from_csv.py b/plugins/filter/from_csv.py index 269cba046f..160eed959e 100644 --- a/plugins/filter/from_csv.py +++ b/plugins/filter/from_csv.py @@ -1,61 +1,60 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) -# Copyright: (c) 2018, Dag Wieers (@dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) +# Copyright (c) 2018, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: from_csv - short_description: Converts CSV text input into list of dicts - version_added: 2.3.0 - author: Andrew Pantuso (@Ajpantuso) - description: - - Converts CSV text input into list of dictionaries. - options: - _input: - description: A string containing a CSV document. - type: string - required: true - dialect: - description: - - The CSV dialect to use when parsing the CSV file. - - Possible values include C(excel), C(excel-tab) or C(unix). - type: str - default: excel - fieldnames: - description: - - A list of field names for every column. - - This is needed if the CSV does not have a header. - type: list - elements: str - delimiter: - description: - - A one-character string used to separate fields. - - When using this parameter, you change the default value used by I(dialect). - - The default value depends on the dialect used. - type: str - skipinitialspace: - description: - - Whether to ignore any whitespaces immediately following the delimiter. - - When using this parameter, you change the default value used by I(dialect). - - The default value depends on the dialect used. - type: bool - strict: - description: - - Whether to raise an exception on bad CSV input. - - When using this parameter, you change the default value used by I(dialect). - - The default value depends on the dialect used. - type: bool -''' +DOCUMENTATION = r""" +name: from_csv +short_description: Converts CSV text input into list of dicts +version_added: 2.3.0 +author: Andrew Pantuso (@Ajpantuso) +description: + - Converts CSV text input into list of dictionaries. +options: + _input: + description: A string containing a CSV document. + type: string + required: true + dialect: + description: + - The CSV dialect to use when parsing the CSV file. + - Possible values include V(excel), V(excel-tab) or V(unix). + type: str + default: excel + fieldnames: + description: + - A list of field names for every column. + - This is needed if the CSV does not have a header. + type: list + elements: str + delimiter: + description: + - A one-character string used to separate fields. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. + type: str + skipinitialspace: + description: + - Whether to ignore any whitespaces immediately following the delimiter. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. + type: bool + strict: + description: + - Whether to raise an exception on bad CSV input. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. + type: bool +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Parse a CSV file's contents ansible.builtin.debug: msg: >- - {{ csv_data | community.genera.from_csv(dialect='unix') }} + {{ csv_data | community.general.from_csv(dialect='unix') }} vars: csv_data: | Column 1,Value @@ -70,17 +69,16 @@ EXAMPLES = ''' # "Column 1": "bar", # "Value": "42", # } -''' +""" -RETURN = ''' - _value: - description: A list with one dictionary per row. - type: list - elements: dictionary -''' +RETURN = r""" +_value: + description: A list with one dictionary per row. + type: list + elements: dictionary +""" from ansible.errors import AnsibleFilterError -from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, DialectNotAvailableError, @@ -98,7 +96,7 @@ def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitial try: dialect = initialize_dialect(dialect, **dialect_params) except (CustomDialectFailureError, DialectNotAvailableError) as e: - raise AnsibleFilterError(to_native(e)) + raise AnsibleFilterError(str(e)) reader = read_csv(data, dialect, fieldnames) @@ -108,7 +106,7 @@ def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitial for row in reader: data_list.append(row) except CSVError as e: - raise AnsibleFilterError("Unable to process file: %s" % to_native(e)) + raise AnsibleFilterError(f"Unable to process file: {e}") return data_list diff --git a/plugins/filter/from_ini.py b/plugins/filter/from_ini.py new file mode 100644 index 0000000000..07b16d4ac2 --- /dev/null +++ b/plugins/filter/from_ini.py @@ -0,0 +1,95 @@ + +# Copyright (c) 2023, Steffen Scheib +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: from_ini +short_description: Converts INI text input into a dictionary +version_added: 8.2.0 +author: Steffen Scheib (@sscheib) +description: + - Converts INI text input into a dictionary. +options: + _input: + description: A string containing an INI document. + type: string + required: true +""" + +EXAMPLES = r""" +- name: Slurp an INI file + ansible.builtin.slurp: + src: /etc/rhsm/rhsm.conf + register: rhsm_conf + +- name: Display the INI file as dictionary + ansible.builtin.debug: + var: rhsm_conf.content | b64decode | community.general.from_ini + +- name: Set a new dictionary fact with the contents of the INI file + ansible.builtin.set_fact: + rhsm_dict: >- + {{ + rhsm_conf.content | b64decode | community.general.from_ini + }} +""" + +RETURN = r""" +_value: + description: A dictionary representing the INI file. + type: dictionary +""" + + +from io import StringIO +from configparser import ConfigParser + +from ansible.errors import AnsibleFilterError + + +class IniParser(ConfigParser): + ''' Implements a configparser which is able to return a dict ''' + + def __init__(self): + super().__init__(interpolation=None) + self.optionxform = str + + def as_dict(self): + d = dict(self._sections) + for k in d: + d[k] = dict(self._defaults, **d[k]) + d[k].pop('__name__', None) + + if self._defaults: + d['DEFAULT'] = dict(self._defaults) + + return d + + +def from_ini(obj): + ''' Read the given string as INI file and return a dict ''' + + if not isinstance(obj, str): + raise AnsibleFilterError(f'from_ini requires a str, got {type(obj)}') + + parser = IniParser() + + try: + parser.read_file(StringIO(obj)) + except Exception as ex: + raise AnsibleFilterError(f'from_ini failed to parse given string: {ex}', orig_exc=ex) + + return parser.as_dict() + + +class FilterModule(object): + ''' Query filter ''' + + def filters(self): + + return { + 'from_ini': from_ini + } diff --git a/plugins/filter/groupby_as_dict.py b/plugins/filter/groupby_as_dict.py index 386a8b44cf..766d365575 100644 --- a/plugins/filter/groupby_as_dict.py +++ b/plugins/filter/groupby_as_dict.py @@ -1,31 +1,32 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Felix Fontein -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: groupby_as_dict - short_description: Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute - version_added: 3.1.0 - author: Felix Fontein (@felixfontein) - description: - - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute. - positional: attribute - options: - _input: - description: A list of dictionaries - type: list - elements: dictionary - required: true - attribute: - description: The attribute to use as the key. - type: str - required: true -''' +DOCUMENTATION = r""" +name: groupby_as_dict +short_description: Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute +version_added: 3.1.0 +author: Felix Fontein (@felixfontein) +description: + - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute. + - This filter is similar to the Jinja2 C(groupby) filter. Use the Jinja2 C(groupby) filter if you have multiple entries + with the same value, or when you need a dictionary with list values, or when you need to use deeply nested attributes. +positional: attribute +options: + _input: + description: A list of dictionaries. + type: list + elements: dictionary + required: true + attribute: + description: The attribute to use as the key. + type: str + required: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Arrange a list of dictionaries as a dictionary of dictionaries ansible.builtin.debug: msg: "{{ sequence | community.general.groupby_as_dict('key') }}" @@ -43,16 +44,16 @@ EXAMPLES = ''' # other_value: # key: other_value # baz: bar -''' +""" -RETURN = ''' - _value: - description: A dictionary containing the dictionaries from the list as values. - type: dictionary -''' +RETURN = r""" +_value: + description: A dictionary containing the dictionaries from the list as values. + type: dictionary +""" from ansible.errors import AnsibleFilterError -from ansible.module_utils.common._collections_compat import Mapping, Sequence +from collections.abc import Mapping, Sequence def groupby_as_dict(sequence, attribute): @@ -69,12 +70,12 @@ def groupby_as_dict(sequence, attribute): result = dict() for list_index, element in enumerate(sequence): if not isinstance(element, Mapping): - raise AnsibleFilterError('Sequence element #{0} is not a mapping'.format(list_index)) + raise AnsibleFilterError(f'Sequence element #{list_index} is not a mapping') if attribute not in element: - raise AnsibleFilterError('Attribute not contained in element #{0} of sequence'.format(list_index)) + raise AnsibleFilterError(f'Attribute not contained in element #{list_index} of sequence') result_index = element[attribute] if result_index in result: - raise AnsibleFilterError('Multiple sequence entries have attribute value {0!r}'.format(result_index)) + raise AnsibleFilterError(f'Multiple sequence entries have attribute value {result_index!r}') result[result_index] = element return result diff --git a/plugins/filter/hashids.py b/plugins/filter/hashids.py index c4735afeae..c58ae4d70b 100644 --- a/plugins/filter/hashids.py +++ b/plugins/filter/hashids.py @@ -1,20 +1,23 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible.errors import ( AnsibleError, AnsibleFilterError, - AnsibleFilterTypeError, ) from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.collections import is_sequence +try: + from ansible.errors import AnsibleTypeError +except ImportError: + from ansible.errors import AnsibleFilterTypeError as AnsibleTypeError + try: from hashids import Hashids HAS_HASHIDS = True @@ -26,7 +29,7 @@ def initialize_hashids(**kwargs): if not HAS_HASHIDS: raise AnsibleError("The hashids library must be installed in order to use this plugin") - params = dict((k, v) for k, v in kwargs.items() if v) + params = {k: v for k, v in kwargs.items() if v} try: return Hashids(**params) @@ -63,9 +66,7 @@ def hashids_encode(nums, salt=None, alphabet=None, min_length=None): try: hashid = hashids.encode(*nums) except TypeError as e: - raise AnsibleFilterTypeError( - "Data to encode must by a tuple or list of ints: %s" % to_native(e) - ) + raise AnsibleTypeError(f"Data to encode must by a tuple or list of ints: {e}") return hashid diff --git a/plugins/filter/hashids_decode.yml b/plugins/filter/hashids_decode.yml index 50e07abc8c..3d2144f725 100644 --- a/plugins/filter/hashids_decode.yml +++ b/plugins/filter/hashids_decode.yml @@ -1,3 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + DOCUMENTATION: name: hashids_decode short_description: Decodes a sequence of numbers from a YouTube-like hash diff --git a/plugins/filter/hashids_encode.yml b/plugins/filter/hashids_encode.yml index 69816aac30..af19522d0a 100644 --- a/plugins/filter/hashids_encode.yml +++ b/plugins/filter/hashids_encode.yml @@ -1,3 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + DOCUMENTATION: name: hashids_encode short_description: Encodes YouTube-like hashes from a sequence of integers diff --git a/plugins/filter/jc.py b/plugins/filter/jc.py index 094da26632..92996e812c 100644 --- a/plugins/filter/jc.py +++ b/plugins/filter/jc.py @@ -1,60 +1,52 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Filipe Niero Felisbino -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) 2015, Filipe Niero Felisbino +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # # contributed by Kelly Brazil -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: jc - short_description: Convert output of many shell commands and file-types to JSON - version_added: 1.1.0 - author: Kelly Brazil (@kellyjonbrazil) - description: - - Convert output of many shell commands and file-types to JSON. - - Uses the L(jc library,https://github.com/kellyjonbrazil/jc). - positional: parser - options: - _input: - description: The data to convert. - type: string - required: true - parser: - description: - - The correct parser for the input data. - - For exmaple C(ifconfig). - - See U(https://github.com/kellyjonbrazil/jc#parsers) for the latest list of parsers. - type: string - required: true - quiet: - description: Set to C(false) to not suppress warnings. - type: boolean - default: true - raw: - description: Set to C(true) to return pre-processed JSON. - type: boolean - default: false - requirements: - - jc (https://github.com/kellyjonbrazil/jc) -''' +DOCUMENTATION = r""" +name: jc +short_description: Convert output of many shell commands and file-types to JSON +version_added: 1.1.0 +author: Kelly Brazil (@kellyjonbrazil) +description: + - Convert output of many shell commands and file-types to JSON. + - Uses the L(jc library,https://github.com/kellyjonbrazil/jc). +positional: parser +options: + _input: + description: The data to convert. + type: string + required: true + parser: + description: + - The correct parser for the input data. + - For example V(ifconfig). + - 'Note: use underscores instead of dashes (if any) in the parser module name.' + - See U(https://github.com/kellyjonbrazil/jc#parsers) for the latest list of parsers. + type: string + required: true + quiet: + description: Set to V(false) to not suppress warnings. + type: boolean + default: true + raw: + description: Set to V(true) to return pre-processed JSON. + type: boolean + default: false +requirements: + - jc installed as a Python library (U(https://pypi.org/project/jc/)) +""" + +EXAMPLES = r""" +- name: Install the prereqs of the jc filter (jc Python package) on the Ansible controller + delegate_to: localhost + ansible.builtin.pip: + name: jc + state: present -EXAMPLES = ''' - name: Run command ansible.builtin.command: uname -a register: result @@ -74,13 +66,13 @@ EXAMPLES = ''' # "operating_system": "GNU/Linux", # "processor": "x86_64" # } -''' +""" -RETURN = ''' - _value: - description: The processed output. - type: any -''' +RETURN = r""" +_value: + description: The processed output. + type: any +""" from ansible.errors import AnsibleError, AnsibleFilterError import importlib @@ -92,7 +84,7 @@ except ImportError: HAS_LIB = False -def jc(data, parser, quiet=True, raw=False): +def jc_filter(data, parser, quiet=True, raw=False): """Convert returned command output to JSON using the JC library Arguments: @@ -107,15 +99,19 @@ def jc(data, parser, quiet=True, raw=False): dictionary or list of dictionaries Example: - - name: run date command hosts: ubuntu tasks: - - shell: date + - name: install the prereqs of the jc filter (jc Python package) on the Ansible controller + delegate_to: localhost + ansible.builtin.pip: + name: jc + state: present + - ansible.builtin.shell: date register: result - - set_fact: + - ansible.builtin.set_fact: myvar: "{{ result.stdout | community.general.jc('date') }}" - - debug: + - ansible.builtin.debug: msg: "{{ myvar }}" produces: @@ -137,14 +133,20 @@ def jc(data, parser, quiet=True, raw=False): """ if not HAS_LIB: - raise AnsibleError('You need to install "jc" prior to running jc filter') + raise AnsibleError('You need to install "jc" as a Python library on the Ansible controller prior to running jc filter') try: - jc_parser = importlib.import_module('jc.parsers.' + parser) - return jc_parser.parse(data, quiet=quiet, raw=raw) + # new API (jc v1.18.0 and higher) allows use of plugin parsers + if hasattr(jc, 'parse'): + return jc.parse(parser, data, quiet=quiet, raw=raw) + + # old API (jc v1.17.7 and lower) + else: + jc_parser = importlib.import_module(f'jc.parsers.{parser}') + return jc_parser.parse(data, quiet=quiet, raw=raw) except Exception as e: - raise AnsibleFilterError('Error in jc filter plugin: %s' % e) + raise AnsibleFilterError(f'Error in jc filter plugin: {e}') class FilterModule(object): @@ -152,5 +154,5 @@ class FilterModule(object): def filters(self): return { - 'jc': jc + 'jc': jc_filter, } diff --git a/plugins/filter/json_diff.yml b/plugins/filter/json_diff.yml new file mode 100644 index 0000000000..a370564d7a --- /dev/null +++ b/plugins/filter/json_diff.yml @@ -0,0 +1,56 @@ +--- +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: json_diff + short_description: Create a JSON patch by comparing two JSON files + description: + - This filter compares the input with the argument and computes a list of operations + that can be consumed by the P(community.general.json_patch_recipe#filter) to change the input + to the argument. + requirements: + - jsonpatch + version_added: 10.3.0 + author: + - Stanislav Meduna (@numo68) + positional: target + options: + _input: + description: A list or a dictionary representing a source JSON object, or a string containing a JSON object. + type: raw + required: true + target: + description: A list or a dictionary representing a target JSON object, or a string containing a JSON object. + type: raw + required: true + seealso: + - name: RFC 6902 + description: JavaScript Object Notation (JSON) Patch + link: https://datatracker.ietf.org/doc/html/rfc6902 + - name: RFC 6901 + description: JavaScript Object Notation (JSON) Pointer + link: https://datatracker.ietf.org/doc/html/rfc6901 + - name: jsonpatch Python Package + description: A Python library for applying JSON patches + link: https://pypi.org/project/jsonpatch/ + +RETURN: + _value: + description: A list of JSON patch operations to apply. + type: list + elements: dict + +EXAMPLES: | + - name: Compute a difference + ansible.builtin.debug: + msg: "{{ input | community.general.json_diff(target) }}" + vars: + input: {"foo": 1, "bar":{"baz": 2}, "baw": [1, 2, 3], "hello": "day"} + target: {"foo": 1, "bar": {"baz": 2}, "baw": [1, 3], "baq": {"baz": 2}, "hello": "night"} + # => [ + # {"op": "add", "path": "/baq", "value": {"baz": 2}}, + # {"op": "remove", "path": "/baw/1"}, + # {"op": "replace", "path": "/hello", "value": "night"} + # ] diff --git a/plugins/filter/json_patch.py b/plugins/filter/json_patch.py new file mode 100644 index 0000000000..8cd6bd08b0 --- /dev/null +++ b/plugins/filter/json_patch.py @@ -0,0 +1,193 @@ +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations +from json import loads +from typing import TYPE_CHECKING +from ansible.errors import AnsibleFilterError + + +if TYPE_CHECKING: + from typing import Any, Callable, Union + +try: + import jsonpatch + +except ImportError as exc: + HAS_LIB = False + JSONPATCH_IMPORT_ERROR = exc +else: + HAS_LIB = True + JSONPATCH_IMPORT_ERROR = None + +OPERATIONS_AVAILABLE = ["add", "copy", "move", "remove", "replace", "test"] +OPERATIONS_NEEDING_FROM = ["copy", "move"] +OPERATIONS_NEEDING_VALUE = ["add", "replace", "test"] + + +class FilterModule: + """Filter plugin.""" + + def check_json_object(self, filter_name: str, object_name: str, inp: Any): + if isinstance(inp, (str, bytes, bytearray)): + try: + return loads(inp) + except Exception as e: + raise AnsibleFilterError( + f"{filter_name}: could not decode JSON from {object_name}: {e}" + ) from e + + if not isinstance(inp, (list, dict)): + raise AnsibleFilterError( + f"{filter_name}: {object_name} is not dictionary, list or string" + ) + + return inp + + def check_patch_arguments(self, filter_name: str, args: dict): + + if "op" not in args or not isinstance(args["op"], str): + raise AnsibleFilterError(f"{filter_name}: 'op' argument is not a string") + + if args["op"] not in OPERATIONS_AVAILABLE: + raise AnsibleFilterError( + f"{filter_name}: unsupported 'op' argument: {args['op']}" + ) + + if "path" not in args or not isinstance(args["path"], str): + raise AnsibleFilterError(f"{filter_name}: 'path' argument is not a string") + + if args["op"] in OPERATIONS_NEEDING_FROM: + if "from" not in args: + raise AnsibleFilterError( + f"{filter_name}: 'from' argument missing for '{args['op']}' operation" + ) + if not isinstance(args["from"], str): + raise AnsibleFilterError( + f"{filter_name}: 'from' argument is not a string" + ) + + def json_patch( + self, + inp: Union[str, list, dict, bytes, bytearray], + op: str, + path: str, + value: Any = None, + **kwargs: dict, + ) -> Any: + + if not HAS_LIB: + raise AnsibleFilterError( + "You need to install 'jsonpatch' package prior to running 'json_patch' filter" + ) from JSONPATCH_IMPORT_ERROR + + args = {"op": op, "path": path} + from_arg = kwargs.pop("from", None) + fail_test = kwargs.pop("fail_test", False) + + if kwargs: + raise AnsibleFilterError( + f"json_patch: unexpected keywords arguments: {', '.join(sorted(kwargs))}" + ) + + if not isinstance(fail_test, bool): + raise AnsibleFilterError("json_patch: 'fail_test' argument is not a bool") + + if op in OPERATIONS_NEEDING_VALUE: + args["value"] = value + if op in OPERATIONS_NEEDING_FROM and from_arg is not None: + args["from"] = from_arg + + inp = self.check_json_object("json_patch", "input", inp) + self.check_patch_arguments("json_patch", args) + + result = None + + try: + result = jsonpatch.apply_patch(inp, [args]) + except jsonpatch.JsonPatchTestFailed as e: + if fail_test: + raise AnsibleFilterError( + f"json_patch: test operation failed: {e}" + ) from e + else: + pass + except Exception as e: + raise AnsibleFilterError(f"json_patch: patch failed: {e}") from e + + return result + + def json_patch_recipe( + self, + inp: Union[str, list, dict, bytes, bytearray], + operations: list, + /, + fail_test: bool = False, + ) -> Any: + + if not HAS_LIB: + raise AnsibleFilterError( + "You need to install 'jsonpatch' package prior to running 'json_patch_recipe' filter" + ) from JSONPATCH_IMPORT_ERROR + + if not isinstance(operations, list): + raise AnsibleFilterError( + "json_patch_recipe: 'operations' needs to be a list" + ) + + if not isinstance(fail_test, bool): + raise AnsibleFilterError("json_patch: 'fail_test' argument is not a bool") + + result = None + + inp = self.check_json_object("json_patch_recipe", "input", inp) + for args in operations: + self.check_patch_arguments("json_patch_recipe", args) + + try: + result = jsonpatch.apply_patch(inp, operations) + except jsonpatch.JsonPatchTestFailed as e: + if fail_test: + raise AnsibleFilterError( + f"json_patch_recipe: test operation failed: {e}" + ) from e + else: + pass + except Exception as e: + raise AnsibleFilterError(f"json_patch_recipe: patch failed: {e}") from e + + return result + + def json_diff( + self, + inp: Union[str, list, dict, bytes, bytearray], + target: Union[str, list, dict, bytes, bytearray], + ) -> list: + + if not HAS_LIB: + raise AnsibleFilterError( + "You need to install 'jsonpatch' package prior to running 'json_diff' filter" + ) from JSONPATCH_IMPORT_ERROR + + inp = self.check_json_object("json_diff", "input", inp) + target = self.check_json_object("json_diff", "target", target) + + try: + result = list(jsonpatch.make_patch(inp, target)) + except Exception as e: + raise AnsibleFilterError(f"JSON diff failed: {e}") from e + + return result + + def filters(self) -> dict[str, Callable[..., Any]]: + """Map filter plugin names to their functions. + + Returns: + dict: The filter plugin functions. + """ + return { + "json_patch": self.json_patch, + "json_patch_recipe": self.json_patch_recipe, + "json_diff": self.json_diff, + } diff --git a/plugins/filter/json_patch.yml b/plugins/filter/json_patch.yml new file mode 100644 index 0000000000..42a0309202 --- /dev/null +++ b/plugins/filter/json_patch.yml @@ -0,0 +1,145 @@ +--- +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: json_patch + short_description: Apply a JSON-Patch (RFC 6902) operation to an object + description: + - This filter applies a single JSON patch operation and returns a modified object. + - If the operation is a test, the filter returns an ummodified object if the test + succeeded and a V(none) value otherwise. + requirements: + - jsonpatch + version_added: 10.3.0 + author: + - Stanislav Meduna (@numo68) + positional: op, path, value + options: + _input: + description: A list or a dictionary representing a JSON object, or a string containing a JSON object. + type: raw + required: true + op: + description: Operation to perform (see L(RFC 6902, https://datatracker.ietf.org/doc/html/rfc6902)). + type: str + choices: [add, copy, move, remove, replace, test] + required: true + path: + description: JSON Pointer path to the target location (see L(RFC 6901, https://datatracker.ietf.org/doc/html/rfc6901)). + type: str + required: true + value: + description: Value to use in the operation. Ignored for O(op=copy), O(op=move), and O(op=remove). + type: raw + from: + description: The source location for the copy and move operation. Mandatory + for O(op=copy) and O(op=move), ignored otherwise. + type: str + fail_test: + description: If V(false), a failed O(op=test) will return V(none). If V(true), the filter + invocation will fail with an error. + type: bool + default: false + seealso: + - name: RFC 6902 + description: JavaScript Object Notation (JSON) Patch + link: https://datatracker.ietf.org/doc/html/rfc6902 + - name: RFC 6901 + description: JavaScript Object Notation (JSON) Pointer + link: https://datatracker.ietf.org/doc/html/rfc6901 + - name: jsonpatch Python Package + description: A Python library for applying JSON patches + link: https://pypi.org/project/jsonpatch/ + +RETURN: + _value: + description: A modified object or V(none) if O(op=test), O(fail_test=false) and the test failed. + type: any + returned: always + +EXAMPLES: | + - name: Insert a new element into an array at a specified index + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/1', {'baz': 'qux'}) }}" + vars: + input: ["foo": { "one": 1 }, "bar": { "two": 2 }] + # => [{"foo": {"one": 1}}, {"baz": "qux"}, {"bar": {"two": 2}}] + + - name: Insert a new key into a dictionary + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/bar/baz', 'qux') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": {"baz": "qux", "two": 2}} + + - name: Input is a string + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/baz', 3) }}" + vars: + input: '{ "foo": { "one": 1 }, "bar": { "two": 2 } }' + # => {"foo": {"one": 1}, "bar": { "two": 2 }, "baz": 3} + + - name: Existing key is replaced + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/bar', 'qux') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": "qux"} + + - name: Escaping tilde as ~0 and slash as ~1 in the path + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/~0~1', 'qux') }}" + vars: + input: {} + # => {"~/": "qux"} + + - name: Add at the end of the array + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/-', 4) }}" + vars: + input: [1, 2, 3] + # => [1, 2, 3, 4] + + - name: Remove a key + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('remove', '/bar') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1} } + + - name: Replace a value + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('replace', '/bar', 2) }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": 2} + + - name: Copy a value + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('copy', '/baz', from='/bar') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": { "two": 2 }, "baz": { "two": 2 }} + + - name: Move a value + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('move', '/baz', from='/bar') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "baz": { "two": 2 }} + + - name: Successful test + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('test', '/bar/two', 2) | ternary('OK', 'Failed') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => OK + + - name: Unuccessful test + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('test', '/bar/two', 9) | ternary('OK', 'Failed') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => Failed diff --git a/plugins/filter/json_patch_recipe.yml b/plugins/filter/json_patch_recipe.yml new file mode 100644 index 0000000000..671600b941 --- /dev/null +++ b/plugins/filter/json_patch_recipe.yml @@ -0,0 +1,102 @@ +--- +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: json_patch_recipe + short_description: Apply JSON-Patch (RFC 6902) operations to an object + description: + - This filter sequentially applies JSON patch operations and returns a modified object. + - If there is a test operation in the list, the filter continues if the test + succeeded and returns a V(none) value otherwise. + requirements: + - jsonpatch + version_added: 10.3.0 + author: + - Stanislav Meduna (@numo68) + positional: operations, fail_test + options: + _input: + description: A list or a dictionary representing a JSON object, or a string containing a JSON object. + type: raw + required: true + operations: + description: A list of JSON patch operations to apply. + type: list + elements: dict + required: true + suboptions: + op: + description: Operation to perform (see L(RFC 6902, https://datatracker.ietf.org/doc/html/rfc6902)). + type: str + choices: [add, copy, move, remove, replace, test] + required: true + path: + description: JSON Pointer path to the target location (see L(RFC 6901, https://datatracker.ietf.org/doc/html/rfc6901)). + type: str + required: true + value: + description: Value to use in the operation. Ignored for O(operations[].op=copy), O(operations[].op=move), and O(operations[].op=remove). + type: raw + from: + description: The source location for the copy and move operation. Mandatory + for O(operations[].op=copy) and O(operations[].op=move), ignored otherwise. + type: str + fail_test: + description: If V(false), a failed O(operations[].op=test) will return V(none). If V(true), the filter + invocation will fail with an error. + type: bool + default: false + seealso: + - name: RFC 6902 + description: JavaScript Object Notation (JSON) Patch + link: https://datatracker.ietf.org/doc/html/rfc6902 + - name: RFC 6901 + description: JavaScript Object Notation (JSON) Pointer + link: https://datatracker.ietf.org/doc/html/rfc6901 + - name: jsonpatch Python Package + description: A Python library for applying JSON patches + link: https://pypi.org/project/jsonpatch/ + +RETURN: + _value: + description: A modified object or V(none) if O(operations[].op=test), O(fail_test=false) + and the test failed. + type: any + returned: always + +EXAMPLES: | + - name: Apply a series of operations + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch_recipe(operations) }}" + vars: + input: {} + operations: + - op: 'add' + path: '/foo' + value: 1 + - op: 'add' + path: '/bar' + value: [] + - op: 'add' + path: '/bar/-' + value: 2 + - op: 'add' + path: '/bar/0' + value: 1 + - op: 'remove' + path: '/bar/0' + - op: 'move' + from: '/foo' + path: '/baz' + - op: 'copy' + from: '/baz' + path: '/bax' + - op: 'copy' + from: '/baz' + path: '/bay' + - op: 'replace' + path: '/baz' + value: [10, 20, 30] + # => {"bar":[2],"bax":1,"bay":1,"baz":[10,20,30]} diff --git a/plugins/filter/json_query.py b/plugins/filter/json_query.py index 7b04455181..e040a4aca2 100644 --- a/plugins/filter/json_query.py +++ b/plugins/filter/json_query.py @@ -1,47 +1,32 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Filipe Niero Felisbino -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) 2015, Filipe Niero Felisbino +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: json_query - short_description: Select a single element or a data subset from a complex data structure - description: - - This filter lets you query a complex JSON structure and iterate over it using a loop structure. - positional: expr - options: - _input: - description: - - The JSON data to query. - type: any - required: true - expr: - description: - - The query expression. - - See U(http://jmespath.org/examples.html) for examples. - type: string - required: true - requirements: - - jmespath -''' +DOCUMENTATION = r""" +name: json_query +short_description: Select a single element or a data subset from a complex data structure +description: + - This filter lets you query a complex JSON structure and iterate over it using a loop structure. +positional: expr +options: + _input: + description: + - The JSON data to query. + type: any + required: true + expr: + description: + - The query expression. + - See U(http://jmespath.org/examples.html) for examples. + type: string + required: true +requirements: + - jmespath +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Define data to work on in the examples below ansible.builtin.set_fact: domain_definition: @@ -112,13 +97,13 @@ EXAMPLES = ''' msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}" vars: server_name_query: "domain.server[?contains(name,'server1')].port" -''' +""" -RETURN = ''' - _value: - description: The result of the query. - type: any -''' +RETURN = r""" +_value: + description: The result of the query. + type: any +""" from ansible.errors import AnsibleError, AnsibleFilterError @@ -138,17 +123,24 @@ def json_query(data, expr): 'json_query filter') # Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence - # See issue: https://github.com/ansible-collections/community.general/issues/320 - jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', ) - jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ('AnsibleSequence', ) - jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ('AnsibleMapping', ) + # See issues https://github.com/ansible-collections/community.general/issues/320 + # and https://github.com/ansible/ansible/issues/85600. + jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ( + 'AnsibleUnicode', 'AnsibleUnsafeText', '_AnsibleTaggedStr', + ) + jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ( + 'AnsibleSequence', '_AnsibleLazyTemplateList', + ) + jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ( + 'AnsibleMapping', '_AnsibleLazyTemplateDict', + ) try: return jmespath.search(expr, data) except jmespath.exceptions.JMESPathError as e: - raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e) + raise AnsibleFilterError(f'JMESPathError in json_query filter plugin:\n{e}') except Exception as e: # For older jmespath, we can get ValueError and TypeError without much info. - raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e) + raise AnsibleFilterError(f'Error in jmespath.search in json_query filter plugin:\n{e}') class FilterModule(object): diff --git a/plugins/filter/keep_keys.py b/plugins/filter/keep_keys.py new file mode 100644 index 0000000000..18876789d6 --- /dev/null +++ b/plugins/filter/keep_keys.py @@ -0,0 +1,136 @@ +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: keep_keys +short_description: Keep specific keys from dictionaries in a list +version_added: "9.1.0" +author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) +description: This filter keeps only specified keys from a provided list of dictionaries. +options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A single key or key pattern to keep, or a list of keys or keys patterns to keep. + - If O(matching_parameter=regex) there must be exactly one pattern provided. + type: raw + required: true + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target) items. + starts_with: Matches keys that start with one of the O(target) items. + ends_with: Matches keys that end with one of the O(target) items. + regex: + - Matches keys that match the regular expresion provided in O(target). + - In this case, O(target) must be a regex string or a list with single regex string. +""" + +EXAMPLES = r""" +- l: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + + # 1) By default match keys that equal any of the items in the target. +- t: [k0_x0, k1_x1] + r: "{{ l | community.general.keep_keys(target=t) }}" + + # 2) Match keys that start with any of the items in the target. +- t: [k0, k1] + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}" + + # 3) Match keys that end with any of the items in target. +- t: [x0, x1] + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}" + + # 4) Match keys by the regex. +- t: ['^.*[01]_x.*$'] + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" + + # 5) Match keys by the regex. +- t: '^.*[01]_x.*$' + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 1-5 are all the same. +- r: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + + # 6) By default match keys that equal the target. +- t: k0_x0 + r: "{{ l | community.general.keep_keys(target=t) }}" + + # 7) Match keys that start with the target. +- t: k0 + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}" + + # 8) Match keys that end with the target. +- t: x0 + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}" + + # 9) Match keys by the regex. +- t: '^.*0_x.*$' + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 6-9 are all the same. +- r: + - {k0_x0: A0} + - {k0_x0: A1} +""" + +RETURN = r""" +_value: + description: The list of dictionaries with selected keys. + type: list + elements: dictionary +""" + +from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( + _keys_filter_params, + _keys_filter_target_str) + + +def keep_keys(data, target=None, matching_parameter='equal'): + """keep specific keys from dictionaries in a list""" + + # test parameters + _keys_filter_params(data, matching_parameter) + # test and transform target + tt = _keys_filter_target_str(target, matching_parameter) + + if matching_parameter == 'equal': + def keep_key(key): + return key in tt + elif matching_parameter == 'starts_with': + def keep_key(key): + return key.startswith(tt) + elif matching_parameter == 'ends_with': + def keep_key(key): + return key.endswith(tt) + elif matching_parameter == 'regex': + def keep_key(key): + return tt.match(key) is not None + + return [{k: v for k, v in d.items() if keep_key(k)} for d in data] + + +class FilterModule(object): + + def filters(self): + return { + 'keep_keys': keep_keys, + } diff --git a/plugins/filter/lists.py b/plugins/filter/lists.py new file mode 100644 index 0000000000..0bae08f24c --- /dev/null +++ b/plugins/filter/lists.py @@ -0,0 +1,200 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.common.collections import is_sequence + + +def remove_duplicates(lst): + seen = set() + seen_add = seen.add + result = [] + for item in lst: + try: + if item not in seen: + seen_add(item) + result.append(item) + except TypeError: + # This happens for unhashable values `item`. If this happens, + # convert `seen` to a list and continue. + seen = list(seen) + seen_add = seen.append + if item not in seen: + seen_add(item) + result.append(item) + return result + + +def flatten_list(lst): + result = [] + for sublist in lst: + if not is_sequence(sublist): + msg = ("All arguments must be lists. %s is %s") + raise AnsibleFilterError(msg % (sublist, type(sublist))) + if len(sublist) > 0: + if all(is_sequence(sub) for sub in sublist): + for item in sublist: + result.append(item) + else: + result.append(sublist) + return result + + +def lists_union(*args, **kwargs): + lists = args + flatten = kwargs.pop('flatten', False) + + if kwargs: + # Some unused kwargs remain + raise AnsibleFilterError( + f"lists_union() got unexpected keywords arguments: {', '.join(kwargs.keys())}" + ) + + if flatten: + lists = flatten_list(args) + + if not lists: + return [] + + if len(lists) == 1: + return lists[0] + + a = lists[0] + for b in lists[1:]: + a = do_union(a, b) + return remove_duplicates(a) + + +def do_union(a, b): + return a + b + + +def lists_intersect(*args, **kwargs): + lists = args + flatten = kwargs.pop('flatten', False) + + if kwargs: + # Some unused kwargs remain + raise AnsibleFilterError( + f"lists_intersect() got unexpected keywords arguments: {', '.join(kwargs.keys())}" + ) + + if flatten: + lists = flatten_list(args) + + if not lists: + return [] + + if len(lists) == 1: + return lists[0] + + a = remove_duplicates(lists[0]) + for b in lists[1:]: + a = do_intersect(a, b) + return a + + +def do_intersect(a, b): + isect = [] + try: + other = set(b) + isect = [item for item in a if item in other] + except TypeError: + # This happens for unhashable values, + # use a list instead and redo. + other = list(b) + isect = [item for item in a if item in other] + return isect + + +def lists_difference(*args, **kwargs): + lists = args + flatten = kwargs.pop('flatten', False) + + if kwargs: + # Some unused kwargs remain + raise AnsibleFilterError( + f"lists_difference() got unexpected keywords arguments: {', '.join(kwargs.keys())}" + ) + + if flatten: + lists = flatten_list(args) + + if not lists: + return [] + + if len(lists) == 1: + return lists[0] + + a = remove_duplicates(lists[0]) + for b in lists[1:]: + a = do_difference(a, b) + return a + + +def do_difference(a, b): + diff = [] + try: + other = set(b) + diff = [item for item in a if item not in other] + except TypeError: + # This happens for unhashable values, + # use a list instead and redo. + other = list(b) + diff = [item for item in a if item not in other] + return diff + + +def lists_symmetric_difference(*args, **kwargs): + lists = args + flatten = kwargs.pop('flatten', False) + + if kwargs: + # Some unused kwargs remain + raise AnsibleFilterError( + f"lists_difference() got unexpected keywords arguments: {', '.join(kwargs.keys())}" + ) + + if flatten: + lists = flatten_list(args) + + if not lists: + return [] + + if len(lists) == 1: + return lists[0] + + a = lists[0] + for b in lists[1:]: + a = do_symmetric_difference(a, b) + return a + + +def do_symmetric_difference(a, b): + sym_diff = [] + union = lists_union(a, b) + try: + isect = set(a) & set(b) + sym_diff = [item for item in union if item not in isect] + except TypeError: + # This happens for unhashable values, + # build the intersection of `a` and `b` backed + # by a list instead of a set and redo. + isect = lists_intersect(a, b) + sym_diff = [item for item in union if item not in isect] + return sym_diff + + +class FilterModule(object): + ''' Ansible lists jinja2 filters ''' + + def filters(self): + return { + 'lists_union': lists_union, + 'lists_intersect': lists_intersect, + 'lists_difference': lists_difference, + 'lists_symmetric_difference': lists_symmetric_difference, + } diff --git a/plugins/filter/lists_difference.yml b/plugins/filter/lists_difference.yml new file mode 100644 index 0000000000..630e77cf0a --- /dev/null +++ b/plugins/filter/lists_difference.yml @@ -0,0 +1,48 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: lists_difference + short_description: Difference of lists with a predictive order + version_added: 8.4.0 + description: + - Provide a unique list of all the elements from the first which do not appear in the other lists. + - The order of the items in the resulting list is preserved. + options: + _input: + description: A list. + type: list + elements: any + required: true + flatten: + description: Whether to remove one hierarchy level from the input list. + type: boolean + default: false + author: + - Christoph Fiehe (@cfiehe) + +EXAMPLES: | + - name: Return the difference of list1 and list2. + ansible.builtin.debug: + msg: "{{ list1 | community.general.lists_difference(list2) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + # => [10] + + - name: Return the difference of list1, list2 and list3. + ansible.builtin.debug: + msg: "{{ [list1, list2, list3] | community.general.lists_difference(flatten=true) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + list3: [1, 2, 3, 4, 5, 10, 99, 101] + # => [] + +RETURN: + _value: + description: A unique list of all the elements from the first list that do not appear on the other lists. + type: list + elements: any diff --git a/plugins/filter/lists_intersect.yml b/plugins/filter/lists_intersect.yml new file mode 100644 index 0000000000..d2ea9483b1 --- /dev/null +++ b/plugins/filter/lists_intersect.yml @@ -0,0 +1,48 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: lists_intersect + short_description: Intersection of lists with a predictive order + version_added: 8.4.0 + description: + - Provide a unique list of all the common elements of two or more lists. + - The order of the items in the resulting list is preserved. + options: + _input: + description: A list. + type: list + elements: any + required: true + flatten: + description: Whether to remove one hierarchy level from the input list. + type: boolean + default: false + author: + - Christoph Fiehe (@cfiehe) + +EXAMPLES: | + - name: Return the intersection of list1 and list2. + ansible.builtin.debug: + msg: "{{ list1 | community.general.lists_intersect(list2) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + # => [1, 2, 5, 3, 4] + + - name: Return the intersection of list1, list2 and list3. + ansible.builtin.debug: + msg: "{{ [list1, list2, list3] | community.general.lists_intersect(flatten=true) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + list3: [1, 2, 3, 4, 5, 10, 99, 101] + # => [1, 2, 5, 3, 4] + +RETURN: + _value: + description: A unique list of all the common elements from the provided lists. + type: list + elements: any diff --git a/plugins/filter/lists_mergeby.py b/plugins/filter/lists_mergeby.py index 4848cc8785..4b8bf971f4 100644 --- a/plugins/filter/lists_mergeby.py +++ b/plugins/filter/lists_mergeby.py @@ -1,121 +1,216 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2020-2022, Vladimir Botka -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020-2024, Vladimir Botka +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: lists_mergeby - short_description: Merge two or more lists of dictionaries by a given attribute - version_added: 2.0.0 - author: Vladimir Botka (@vbotka) - description: - - Merge two or more lists by attribute I(index). Optional parameters 'recursive' and 'list_merge' - control the merging of the lists in values. The function merge_hash from ansible.utils.vars - is used. To learn details on how to use the parameters 'recursive' and 'list_merge' see - Ansible User's Guide chapter "Using filters to manipulate data" section "Combining - hashes/dictionaries". - positional: another_list, index - options: - _input: - description: A list of dictionaries. - type: list - elements: dictionary - required: true - another_list: - description: Another list of dictionaries. This parameter can be specified multiple times. - type: list - elements: dictionary - index: - description: - - The dictionary key that must be present in every dictionary in every list that is used to - merge the lists. - type: string - required: true - recursive: - description: - - Should the combine recursively merge nested dictionaries (hashes). - - "B(Note:) It does not depend on the value of the C(hash_behaviour) setting in C(ansible.cfg)." - type: boolean - default: false - list_merge: - description: - - Modifies the behaviour when the dictionaries (hashes) to merge contain arrays/lists. - type: string - default: replace - choices: - - replace - - keep - - append - - prepend - - append_rp - - prepend_rp -''' +DOCUMENTATION = r""" +name: lists_mergeby +short_description: Merge two or more lists of dictionaries by a given attribute +version_added: 2.0.0 +author: Vladimir Botka (@vbotka) +description: + - Merge two or more lists by attribute O(index). Optional parameters O(recursive) and O(list_merge) control the merging + of the nested dictionaries and lists. + - The function C(merge_hash) from C(ansible.utils.vars) is used. + - To learn details on how to use the parameters O(recursive) and O(list_merge) see Ansible User's Guide chapter "Using filters + to manipulate data" section R(Combining hashes/dictionaries, combine_filter) or the filter P(ansible.builtin.combine#filter). +positional: another_list, index +options: + _input: + description: + - A list of dictionaries, or a list of lists of dictionaries. + - The required type of the C(elements) is set to C(raw) because all elements of O(_input) can be either dictionaries + or lists. + type: list + elements: raw + required: true + another_list: + description: + - Another list of dictionaries, or a list of lists of dictionaries. + - This parameter can be specified multiple times. + type: list + elements: raw + index: + description: + - The dictionary key that must be present in every dictionary in every list that is used to merge the lists. + type: string + required: true + recursive: + description: + - Should the combine recursively merge nested dictionaries (hashes). + - B(Note:) It does not depend on the value of the C(hash_behaviour) setting in C(ansible.cfg). + type: boolean + default: false + list_merge: + description: + - Modifies the behaviour when the dictionaries (hashes) to merge contain arrays/lists. + type: string + default: replace + choices: + - replace + - keep + - append + - prepend + - append_rp + - prepend_rp +""" -EXAMPLES = ''' -- name: Merge two lists +EXAMPLES = r""" +# Some results below are manually formatted for better readability. The +# dictionaries' keys will be sorted alphabetically in real output. + +- name: Example 1. Merge two lists. The results r1 and r2 are the same. ansible.builtin.debug: - msg: >- - {{ list1 | community.general.lists_mergeby( - list2, - 'index', - recursive=True, - list_merge='append' - ) }}" + msg: | + r1: {{ r1 }} + r2: {{ r2 }} vars: list1: - - index: a - value: 123 - - index: b - value: 42 + - {index: a, value: 123} + - {index: b, value: 4} list2: - - index: a - foo: bar - - index: c - foo: baz - # Produces the following list of dictionaries: - # { - # "index": "a", - # "foo": "bar", - # "value": 123 - # }, - # { - # "index": "b", - # "value": 42 - # }, - # { - # "index": "c", - # "foo": "baz" - # } -''' + - {index: a, foo: bar} + - {index: c, foo: baz} + r1: "{{ list1 | community.general.lists_mergeby(list2, 'index') }}" + r2: "{{ [list1, list2] | community.general.lists_mergeby('index') }}" -RETURN = ''' - _value: - description: The merged list. - type: list - elements: dictionary -''' +# r1: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} +# r2: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} + +- name: Example 2. Merge three lists + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, value: 123} + - {index: b, value: 4} + list2: + - {index: a, foo: bar} + - {index: c, foo: baz} + list3: + - {index: d, foo: qux} + r: "{{ [list1, list2, list3] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} +# - {index: d, foo: qux} + +- name: Example 3. Merge single list. The result is the same as 2. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, value: 123} + - {index: b, value: 4} + - {index: a, foo: bar} + - {index: c, foo: baz} + - {index: d, foo: qux} + r: "{{ [list1, []] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} +# - {index: d, foo: qux} + +- name: Example 4. Merge two lists. By default, replace nested lists. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: [X1, X2]} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: [Y1, Y2]} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: [Y1, Y2]} +# - {index: b, foo: [Y1, Y2]} + +- name: Example 5. Merge two lists. Append nested lists. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: [X1, X2]} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: [Y1, Y2]} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index', list_merge='append') }}" + +# r: +# - {index: a, foo: [X1, X2, Y1, Y2]} +# - {index: b, foo: [X1, X2, Y1, Y2]} + +- name: Example 6. Merge two lists. By default, do not merge nested dictionaries. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: {x: 1, y: 2}} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: {y: 3, z: 4}} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: {y: 3, z: 4}} +# - {index: b, foo: [Y1, Y2]} + +- name: Example 7. Merge two lists. Merge nested dictionaries too. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: {x: 1, y: 2}} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: {y: 3, z: 4}} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index', recursive=true) }}" + +# r: +# - {index: a, foo: {x:1, y: 3, z: 4}} +# - {index: b, foo: [Y1, Y2]} +""" + +RETURN = r""" +_value: + description: The merged list. + type: list + elements: dictionary +""" from ansible.errors import AnsibleFilterError -from ansible.module_utils.six import string_types -from ansible.module_utils.common._collections_compat import Mapping, Sequence +from collections.abc import Mapping, Sequence from ansible.utils.vars import merge_hash -from ansible.release import __version__ as ansible_version -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from collections import defaultdict from operator import itemgetter def list_mergeby(x, y, index, recursive=False, list_merge='replace'): - ''' Merge 2 lists by attribute 'index'. The function merge_hash from ansible.utils.vars is used. - This function is used by the function lists_mergeby. + '''Merge 2 lists by attribute 'index'. The function 'merge_hash' + from ansible.utils.vars is used. This function is used by the + function lists_mergeby. ''' d = defaultdict(dict) - for l in (x, y): - for elem in l: + for lst in (x, y): + for elem in lst: if not isinstance(elem, Mapping): msg = "Elements of list arguments for lists_mergeby must be dictionaries. %s is %s" raise AnsibleFilterError(msg % (elem, type(elem))) @@ -125,20 +220,9 @@ def list_mergeby(x, y, index, recursive=False, list_merge='replace'): def lists_mergeby(*terms, **kwargs): - ''' Merge 2 or more lists by attribute 'index'. Optional parameters 'recursive' and 'list_merge' - control the merging of the lists in values. The function merge_hash from ansible.utils.vars - is used. To learn details on how to use the parameters 'recursive' and 'list_merge' see - Ansible User's Guide chapter "Using filters to manipulate data" section "Combining - hashes/dictionaries". - - Example: - - debug: - msg: "{{ list1| - community.general.lists_mergeby(list2, - 'index', - recursive=True, - list_merge='append')| - list }}" + '''Merge 2 or more lists by attribute 'index'. To learn details + on how to use the parameters 'recursive' and 'list_merge' see + the filter ansible.builtin.combine. ''' recursive = kwargs.pop('recursive', False) @@ -156,7 +240,7 @@ def lists_mergeby(*terms, **kwargs): "must be lists. %s is %s") raise AnsibleFilterError(msg % (sublist, type(sublist))) if len(sublist) > 0: - if all(isinstance(l, Sequence) for l in sublist): + if all(isinstance(lst, Sequence) for lst in sublist): for item in sublist: flat_list.append(item) else: @@ -171,7 +255,7 @@ def lists_mergeby(*terms, **kwargs): index = terms[-1] - if not isinstance(index, string_types): + if not isinstance(index, str): msg = ("First argument after the lists for community.general.lists_mergeby must be string. " "%s is %s") raise AnsibleFilterError(msg % (index, type(index))) diff --git a/plugins/filter/lists_symmetric_difference.yml b/plugins/filter/lists_symmetric_difference.yml new file mode 100644 index 0000000000..abd8caab8a --- /dev/null +++ b/plugins/filter/lists_symmetric_difference.yml @@ -0,0 +1,48 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: lists_symmetric_difference + short_description: Symmetric Difference of lists with a predictive order + version_added: 8.4.0 + description: + - Provide a unique list containing the symmetric difference of two or more lists. + - The order of the items in the resulting list is preserved. + options: + _input: + description: A list. + type: list + elements: any + required: true + flatten: + description: Whether to remove one hierarchy level from the input list. + type: boolean + default: false + author: + - Christoph Fiehe (@cfiehe) + +EXAMPLES: | + - name: Return the symmetric difference of list1 and list2. + ansible.builtin.debug: + msg: "{{ list1 | community.general.lists_symmetric_difference(list2) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + # => [10, 11, 99] + + - name: Return the symmetric difference of list1, list2 and list3. + ansible.builtin.debug: + msg: "{{ [list1, list2, list3] | community.general.lists_symmetric_difference(flatten=true) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + list3: [1, 2, 3, 4, 5, 10, 99, 101] + # => [11, 1, 2, 3, 4, 5, 101] + +RETURN: + _value: + description: A unique list containing the symmetric difference of two or more lists. + type: list + elements: any diff --git a/plugins/filter/lists_union.yml b/plugins/filter/lists_union.yml new file mode 100644 index 0000000000..8c1ffb4f87 --- /dev/null +++ b/plugins/filter/lists_union.yml @@ -0,0 +1,48 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: lists_union + short_description: Union of lists with a predictive order + version_added: 8.4.0 + description: + - Provide a unique list of all the elements of two or more lists. + - The order of the items in the resulting list is preserved. + options: + _input: + description: A list. + type: list + elements: any + required: true + flatten: + description: Whether to remove one hierarchy level from the input list. + type: boolean + default: false + author: + - Christoph Fiehe (@cfiehe) + +EXAMPLES: | + - name: Return the union of list1, list2 and list3. + ansible.builtin.debug: + msg: "{{ list1 | community.general.lists_union(list2, list3) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + list3: [1, 2, 3, 4, 5, 10, 99, 101] + # => [1, 2, 5, 3, 4, 10, 11, 99, 101] + + - name: Return the union of list1 and list2. + ansible.builtin.debug: + msg: "{{ [list1, list2] | community.general.lists_union(flatten=true) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + # => [1, 2, 5, 3, 4, 10, 11, 99] + +RETURN: + _value: + description: A unique list of all the elements from the provided lists. + type: list + elements: any diff --git a/plugins/filter/random_mac.py b/plugins/filter/random_mac.py index 544cd0aa0b..e5e6201f1c 100644 --- a/plugins/filter/random_mac.py +++ b/plugins/filter/random_mac.py @@ -1,44 +1,29 @@ -# -*- coding: utf-8 -*- -# (c) 2020 Ansible Project -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) 2020 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: random_mac - short_description: Generate a random MAC address - description: - - Generates random networking interfaces MAC addresses for a given prefix. - options: - _input: - description: A string prefix to use as a basis for the random MAC generated. - type: string - required: true - seed: - description: - - A randomization seed to initialize the process, used to get repeatable results. - - If no seed is provided, a system random source such as C(/dev/urandom) is used. - required: false - type: string -''' +DOCUMENTATION = r""" +name: random_mac +short_description: Generate a random MAC address +description: + - Generates random networking interfaces MAC addresses for a given prefix. +options: + _input: + description: A string prefix to use as a basis for the random MAC generated. + type: string + required: true + seed: + description: + - A randomization seed to initialize the process, used to get repeatable results. + - If no seed is provided, a system random source such as C(/dev/urandom) is used. + required: false + type: string +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Random MAC given a prefix ansible.builtin.debug: msg: "{{ '52:54:00' | community.general.random_mac }}" @@ -47,35 +32,32 @@ EXAMPLES = ''' - name: With a seed ansible.builtin.debug: msg: "{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}" -''' +""" -RETURN = ''' - _value: - description: The generated MAC. - type: string -''' +RETURN = r""" +_value: + description: The generated MAC. + type: string +""" import re from random import Random, SystemRandom from ansible.errors import AnsibleFilterError -from ansible.module_utils.six import string_types def random_mac(value, seed=None): ''' takes string prefix, and return it completed with random bytes to get a complete 6 bytes MAC address ''' - if not isinstance(value, string_types): - raise AnsibleFilterError('Invalid value type (%s) for random_mac (%s)' % - (type(value), value)) + if not isinstance(value, str): + raise AnsibleFilterError(f'Invalid value type ({type(value)}) for random_mac ({value})') value = value.lower() mac_items = value.split(':') if len(mac_items) > 5: - raise AnsibleFilterError('Invalid value (%s) for random_mac: 5 colon(:) separated' - ' items max' % value) + raise AnsibleFilterError(f'Invalid value ({value}) for random_mac: 5 colon(:) separated items max') err = "" for mac in mac_items: @@ -83,11 +65,11 @@ def random_mac(value, seed=None): err += ",empty item" continue if not re.match('[a-f0-9]{2}', mac): - err += ",%s not hexa byte" % mac + err += f",{mac} not hexa byte" err = err.strip(',') if err: - raise AnsibleFilterError('Invalid value (%s) for random_mac: %s' % (value, err)) + raise AnsibleFilterError(f'Invalid value ({value}) for random_mac: {err}') if seed is None: r = SystemRandom() @@ -97,7 +79,7 @@ def random_mac(value, seed=None): v = r.randint(68719476736, 1099511627775) # Select first n chars to complement input prefix remain = 2 * (6 - len(mac_items)) - rnd = ('%x' % v)[:remain] + rnd = f'{v:x}'[:remain] return value + re.sub(r'(..)', r':\1', rnd) diff --git a/plugins/filter/remove_keys.py b/plugins/filter/remove_keys.py new file mode 100644 index 0000000000..fc134b41d0 --- /dev/null +++ b/plugins/filter/remove_keys.py @@ -0,0 +1,136 @@ +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: remove_keys +short_description: Remove specific keys from dictionaries in a list +version_added: "9.1.0" +author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) +description: This filter removes only specified keys from a provided list of dictionaries. +options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A single key or key pattern to remove, or a list of keys or keys patterns to remove. + - If O(matching_parameter=regex) there must be exactly one pattern provided. + type: raw + required: true + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target) items. + starts_with: Matches keys that start with one of the O(target) items. + ends_with: Matches keys that end with one of the O(target) items. + regex: + - Matches keys that match the regular expresion provided in O(target). + - In this case, O(target) must be a regex string or a list with single regex string. +""" + +EXAMPLES = r""" +- l: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + + # 1) By default match keys that equal any of the items in the target. +- t: [k0_x0, k1_x1] + r: "{{ l | community.general.remove_keys(target=t) }}" + + # 2) Match keys that start with any of the items in the target. +- t: [k0, k1] + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}" + + # 3) Match keys that end with any of the items in target. +- t: [x0, x1] + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}" + + # 4) Match keys by the regex. +- t: ['^.*[01]_x.*$'] + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" + + # 5) Match keys by the regex. +- t: '^.*[01]_x.*$' + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 1-5 are all the same. +- r: + - {k2_x2: [C0], k3_x3: foo} + - {k2_x2: [C1], k3_x3: bar} + + # 6) By default match keys that equal the target. +- t: k0_x0 + r: "{{ l | community.general.remove_keys(target=t) }}" + + # 7) Match keys that start with the target. +- t: k0 + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}" + + # 8) Match keys that end with the target. +- t: x0 + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}" + + # 9) Match keys by the regex. +- t: '^.*0_x.*$' + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 6-9 are all the same. +- r: + - {k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k1_x1: B1, k2_x2: [C1], k3_x3: bar} +""" + +RETURN = r""" +_value: + description: The list of dictionaries with selected keys removed. + type: list + elements: dictionary +""" + +from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( + _keys_filter_params, + _keys_filter_target_str) + + +def remove_keys(data, target=None, matching_parameter='equal'): + """remove specific keys from dictionaries in a list""" + + # test parameters + _keys_filter_params(data, matching_parameter) + # test and transform target + tt = _keys_filter_target_str(target, matching_parameter) + + if matching_parameter == 'equal': + def keep_key(key): + return key not in tt + elif matching_parameter == 'starts_with': + def keep_key(key): + return not key.startswith(tt) + elif matching_parameter == 'ends_with': + def keep_key(key): + return not key.endswith(tt) + elif matching_parameter == 'regex': + def keep_key(key): + return tt.match(key) is None + + return [{k: v for k, v in d.items() if keep_key(k)} for d in data] + + +class FilterModule(object): + + def filters(self): + return { + 'remove_keys': remove_keys, + } diff --git a/plugins/filter/replace_keys.py b/plugins/filter/replace_keys.py new file mode 100644 index 0000000000..5af0b22f62 --- /dev/null +++ b/plugins/filter/replace_keys.py @@ -0,0 +1,178 @@ +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: replace_keys +short_description: Replace specific keys in a list of dictionaries +version_added: "9.1.0" +author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) +description: This filter replaces specified keys in a provided list of dictionaries. +options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A list of dictionaries with attributes C(before) and C(after). + - The value of O(target[].after) replaces key matching O(target[].before). + type: list + elements: dictionary + required: true + suboptions: + before: + description: + - A key or key pattern to change. + - The interpretation of O(target[].before) depends on O(matching_parameter). + - For a key that matches multiple O(target[].before)s, the B(first) matching O(target[].after) is used. + type: str + after: + description: A matching key change to. + type: str + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target[].before) items. + starts_with: Matches keys that start with one of the O(target[].before) items. + ends_with: Matches keys that end with one of the O(target[].before) items. + regex: Matches keys that match one of the regular expressions provided in O(target[].before). +""" + +EXAMPLES = r""" +- l: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + + # 1) By default, replace keys that are equal any of the attributes before. +- t: + - {before: k0_x0, after: a0} + - {before: k1_x1, after: a1} + r: "{{ l | community.general.replace_keys(target=t) }}" + + # 2) Replace keys that starts with any of the attributes before. +- t: + - {before: k0, after: a0} + - {before: k1, after: a1} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}" + + # 3) Replace keys that ends with any of the attributes before. +- t: + - {before: x0, after: a0} + - {before: x1, after: a1} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='ends_with') }}" + + # 4) Replace keys that match any regex of the attributes before. +- t: + - {before: "^.*0_x.*$", after: a0} + - {before: "^.*1_x.*$", after: a1} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 1-4 are all the same. +- r: + - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} + - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} + + # 5) If more keys match the same attribute before the last one will be used. +- t: + - {before: "^.*_x.*$", after: X} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" + + # gives + +- r: + - X: foo + - X: bar + + # 6) If there are items with equal attribute before the first one will be used. +- t: + - {before: "^.*_x.*$", after: X} + - {before: "^.*_x.*$", after: Y} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" + + # gives + +- r: + - X: foo + - X: bar + + # 7) If there are more matches for a key the first one will be used. +- l: + - {aaa1: A, bbb1: B, ccc1: C} + - {aaa2: D, bbb2: E, ccc2: F} +- t: + - {before: a, after: X} + - {before: aa, after: Y} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}" + + # gives + +- r: + - {X: A, bbb1: B, ccc1: C} + - {X: D, bbb2: E, ccc2: F} +""" + +RETURN = r""" +_value: + description: The list of dictionaries with replaced keys. + type: list + elements: dictionary +""" + +from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( + _keys_filter_params, + _keys_filter_target_dict) + + +def replace_keys(data, target=None, matching_parameter='equal'): + """replace specific keys in a list of dictionaries""" + + # test parameters + _keys_filter_params(data, matching_parameter) + # test and transform target + tz = _keys_filter_target_dict(target, matching_parameter) + + if matching_parameter == 'equal': + def replace_key(key): + for b, a in tz: + if key == b: + return a + return key + elif matching_parameter == 'starts_with': + def replace_key(key): + for b, a in tz: + if key.startswith(b): + return a + return key + elif matching_parameter == 'ends_with': + def replace_key(key): + for b, a in tz: + if key.endswith(b): + return a + return key + elif matching_parameter == 'regex': + def replace_key(key): + for b, a in tz: + if b.match(key): + return a + return key + + return [{replace_key(k): v for k, v in d.items()} for d in data] + + +class FilterModule(object): + + def filters(self): + return { + 'replace_keys': replace_keys, + } diff --git a/plugins/filter/reveal_ansible_type.py b/plugins/filter/reveal_ansible_type.py new file mode 100644 index 0000000000..e068702355 --- /dev/null +++ b/plugins/filter/reveal_ansible_type.py @@ -0,0 +1,147 @@ +# Copyright (c) 2024 Vladimir Botka +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: reveal_ansible_type +short_description: Return input type +version_added: "9.2.0" +author: Vladimir Botka (@vbotka) +description: This filter returns input type. +options: + _input: + description: Input data. + type: raw + required: true + alias: + description: Data type aliases. + default: {} + type: dictionary +""" + +EXAMPLES = r""" +# Substitution converts str to AnsibleUnicode or _AnsibleTaggedStr +# ---------------------------------------------------------------- + +# String. AnsibleUnicode or _AnsibleTaggedStr. +- data: "abc" + result: '{{ data | community.general.reveal_ansible_type }}' +# result => AnsibleUnicode (or _AnsibleTaggedStr) + +# String. AnsibleUnicode/_AnsibleTaggedStr alias str. +- alias: {"AnsibleUnicode": "str", "_AnsibleTaggedStr": "str"} + data: "abc" + result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => str + +# List. All items are AnsibleUnicode/_AnsibleTaggedStr. +- data: ["a", "b", "c"] + result: '{{ data | community.general.reveal_ansible_type }}' +# result => list[AnsibleUnicode] or list[_AnsibleTaggedStr] + +# Dictionary. All keys and values are AnsibleUnicode/_AnsibleTaggedStr. +- data: {"a": "foo", "b": "bar", "c": "baz"} + result: '{{ data | community.general.reveal_ansible_type }}' +# result => dict[AnsibleUnicode, AnsibleUnicode] or dict[_AnsibleTaggedStr, _AnsibleTaggedStr] + +# No substitution and no alias. Type of strings is str +# ---------------------------------------------------- + +# String +- result: '{{ "abc" | community.general.reveal_ansible_type }}' +# result => str + +# Integer +- result: '{{ 123 | community.general.reveal_ansible_type }}' +# result => int + +# Float +- result: '{{ 123.45 | community.general.reveal_ansible_type }}' +# result => float + +# Boolean +- result: '{{ true | community.general.reveal_ansible_type }}' +# result => bool + +# List. All items are strings. +- result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}' +# result => list[str] + +# List of dictionaries. +- result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}' +# result => list[dict] + +# Dictionary. All keys are strings. All values are integers. +- result: '{{ {"a": 1} | community.general.reveal_ansible_type }}' +# result => dict[str, int] + +# Dictionary. All keys are strings. All values are integers. +- result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}' +# result => dict[str, int] + +# Type of strings is AnsibleUnicode, _AnsibleTaggedStr, or str +# ------------------------------------------------------------ + +# Dictionary. The keys are integers or strings. All values are strings. +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + data: {1: 'a', 'b': 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => dict[int|str, str] + +# Dictionary. All keys are integers. All values are keys. +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + data: {1: 'a', 2: 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => dict[int, str] + +# Dictionary. All keys are strings. Multiple types values. +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + _AnsibleTaggedFloat: float + data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': true, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => dict[str, bool|dict|float|int|list|str] + +# List. Multiple types items. +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + _AnsibleTaggedFloat: float + data: [1, 2, 1.1, 'abc', true, ['x', 'y', 'z'], {'x': 1, 'y': 2}] + result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => list[bool|dict|float|int|list|str] +""" + +RETURN = r""" +_value: + description: Type of the data. + type: str +""" + +from ansible_collections.community.general.plugins.plugin_utils.ansible_type import _ansible_type + + +def reveal_ansible_type(data, alias=None): + """Returns data type""" + + # TODO: expose use_native_type parameter + return _ansible_type(data, alias) + + +class FilterModule(object): + + def filters(self): + return { + 'reveal_ansible_type': reveal_ansible_type + } diff --git a/plugins/filter/time.py b/plugins/filter/time.py index f069780fe7..e48e24216a 100644 --- a/plugins/filter/time.py +++ b/plugins/filter/time.py @@ -1,9 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2020, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re from ansible.errors import AnsibleFilterError @@ -56,10 +55,10 @@ def to_time_unit(human_time, unit='ms', **kwargs): unit = unit_to_short_form.get(unit.rstrip('s'), unit) if unit not in unit_factors: - raise AnsibleFilterError("to_time_unit() can not convert to the following unit: %s. " - "Available units (singular or plural): %s. " - "Available short units: %s" - % (unit, ', '.join(unit_to_short_form.keys()), ', '.join(unit_factors.keys()))) + raise AnsibleFilterError(( + f"to_time_unit() can not convert to the following unit: {unit}. Available units (singular or plural):" + f"{', '.join(unit_to_short_form.keys())}. Available short units: {', '.join(unit_factors.keys())}" + )) if 'year' in kwargs: unit_factors['y'] = unit_factors['y'][:-1] + [kwargs.pop('year')] @@ -67,14 +66,14 @@ def to_time_unit(human_time, unit='ms', **kwargs): unit_factors['mo'] = unit_factors['mo'][:-1] + [kwargs.pop('month')] if kwargs: - raise AnsibleFilterError('to_time_unit() got unknown keyword arguments: %s' % ', '.join(kwargs.keys())) + raise AnsibleFilterError(f"to_time_unit() got unknown keyword arguments: {', '.join(kwargs.keys())}") result = 0 for h_time_string in human_time.split(): res = re.match(r'(-?\d+)(\w+)', h_time_string) if not res: raise AnsibleFilterError( - "to_time_unit() can not interpret following string: %s" % human_time) + f"to_time_unit() can not interpret following string: {human_time}") h_time_int = int(res.group(1)) h_time_unit = res.group(2) @@ -82,7 +81,7 @@ def to_time_unit(human_time, unit='ms', **kwargs): h_time_unit = unit_to_short_form.get(h_time_unit.rstrip('s'), h_time_unit) if h_time_unit not in unit_factors: raise AnsibleFilterError( - "to_time_unit() can not interpret following string: %s" % human_time) + f"to_time_unit() can not interpret following string: {human_time}") time_in_milliseconds = h_time_int * multiply(unit_factors[h_time_unit]) result += time_in_milliseconds diff --git a/plugins/filter/to_days.yml b/plugins/filter/to_days.yml index e06c9463dc..c76697f1ee 100644 --- a/plugins/filter/to_days.yml +++ b/plugins/filter/to_days.yml @@ -1,6 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + DOCUMENTATION: name: to_days - short_description: Converte a duration string to days + short_description: Converts a duration string to days version_added: 0.2.0 description: - Parse a human readable time duration string and convert to days. @@ -8,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/to_hours.yml b/plugins/filter/to_hours.yml index 976c3a6adf..520740897b 100644 --- a/plugins/filter/to_hours.yml +++ b/plugins/filter/to_hours.yml @@ -1,6 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + DOCUMENTATION: name: to_hours - short_description: Converte a duration string to hours + short_description: Converts a duration string to hours version_added: 0.2.0 description: - Parse a human readable time duration string and convert to hours. @@ -8,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/to_ini.py b/plugins/filter/to_ini.py new file mode 100644 index 0000000000..a70740b8aa --- /dev/null +++ b/plugins/filter/to_ini.py @@ -0,0 +1,100 @@ + +# Copyright (c) 2023, Steffen Scheib +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: to_ini +short_description: Converts a dictionary to the INI file format +version_added: 8.2.0 +author: Steffen Scheib (@sscheib) +description: + - Converts a dictionary to the INI file format. +options: + _input: + description: The dictionary that should be converted to the INI format. + type: dictionary + required: true +""" + +EXAMPLES = r""" +- name: Define a dictionary + ansible.builtin.set_fact: + my_dict: + section_name: + key_name: 'key value' + + another_section: + connection: 'ssh' + +- name: Write dictionary to INI file + ansible.builtin.copy: + dest: /tmp/test.ini + content: '{{ my_dict | community.general.to_ini }}' + + # /tmp/test.ini will look like this: + # [section_name] + # key_name = key value + # + # [another_section] + # connection = ssh +""" + +RETURN = r""" +_value: + description: A string formatted as INI file. + type: string +""" + +from collections.abc import Mapping +from configparser import ConfigParser +from io import StringIO +from ansible.errors import AnsibleFilterError + + +class IniParser(ConfigParser): + ''' Implements a configparser which sets the correct optionxform ''' + + def __init__(self): + super().__init__(interpolation=None) + self.optionxform = str + + +def to_ini(obj): + ''' Read the given dict and return an INI formatted string ''' + + if not isinstance(obj, Mapping): + raise AnsibleFilterError(f'to_ini requires a dict, got {type(obj)}') + + ini_parser = IniParser() + + try: + ini_parser.read_dict(obj) + except Exception as ex: + raise AnsibleFilterError('to_ini failed to parse given dict:' + f'{ex}', orig_exc=ex) + + # catching empty dicts + if obj == dict(): + raise AnsibleFilterError('to_ini received an empty dict. ' + 'An empty dict cannot be converted.') + + config = StringIO() + ini_parser.write(config) + + # config.getvalue() returns two \n at the end + # with the below insanity, we remove the very last character of + # the resulting string + return ''.join(config.getvalue().rsplit(config.getvalue()[-1], 1)) + + +class FilterModule(object): + ''' Query filter ''' + + def filters(self): + + return { + 'to_ini': to_ini + } diff --git a/plugins/filter/to_milliseconds.yml b/plugins/filter/to_milliseconds.yml index a4c59ce958..f25bd86623 100644 --- a/plugins/filter/to_milliseconds.yml +++ b/plugins/filter/to_milliseconds.yml @@ -1,6 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + DOCUMENTATION: name: to_milliseconds - short_description: Converte a duration string to milliseconds + short_description: Converts a duration string to milliseconds version_added: 0.2.0 description: - Parse a human readable time duration string and convert to milliseconds. @@ -8,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/to_minutes.yml b/plugins/filter/to_minutes.yml index 7dfeada29f..924fb6feb3 100644 --- a/plugins/filter/to_minutes.yml +++ b/plugins/filter/to_minutes.yml @@ -1,6 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + DOCUMENTATION: name: to_minutes - short_description: Converte a duration string to minutes + short_description: Converts a duration string to minutes version_added: 0.2.0 description: - Parse a human readable time duration string and convert to minutes. @@ -8,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/to_months.yml b/plugins/filter/to_months.yml index 84a94d2526..09e9c38b5d 100644 --- a/plugins/filter/to_months.yml +++ b/plugins/filter/to_months.yml @@ -1,6 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + DOCUMENTATION: name: to_months - short_description: Converte a duration string to months + short_description: Convert a duration string to months version_added: 0.2.0 description: - Parse a human readable time duration string and convert to months. @@ -8,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/to_nice_yaml.yml b/plugins/filter/to_nice_yaml.yml new file mode 100644 index 0000000000..fe7a316f46 --- /dev/null +++ b/plugins/filter/to_nice_yaml.yml @@ -0,0 +1,89 @@ +# Copyright (c) Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: to_nice_yaml + author: + - Ansible Core Team + - Felix Fontein (@felixfontein) + version_added: 11.3.0 + short_description: Convert variable to YAML string + description: + - Converts an Ansible variable into a YAML string representation, without preserving vaulted strings as P(ansible.builtin.to_yaml#filter). + - This filter functions as a wrapper to the L(Python PyYAML library, https://pypi.org/project/PyYAML/)'s C(yaml.dump) function. + positional: _input + options: + _input: + description: + - A variable or expression that returns a data structure. + type: raw + required: true + indent: + description: + - Number of spaces to indent Python structures, mainly used for display to humans. + type: integer + default: 2 + sort_keys: + description: + - Affects sorting of dictionary keys. + default: true + type: bool + default_style: + description: + - Indicates the style of the scalar. + choices: + - '' + - "'" + - '"' + - '|' + - '>' + type: string + canonical: + description: + - If set to V(true), export tag type to the output. + type: bool + width: + description: + - Set the preferred line width. + type: integer + line_break: + description: + - Specify the line break. + type: string + encoding: + description: + - Specify the output encoding. + type: string + explicit_start: + description: + - If set to V(true), adds an explicit start using C(---). + type: bool + explicit_end: + description: + - If set to V(true), adds an explicit end using C(...). + type: bool + redact_sensitive_values: + description: + - If set to V(true), vaulted strings are replaced by V() instead of being decrypted. + - With future ansible-core versions, this can extend to other strings tagged as sensitive. + - B(Note) that with ansible-core 2.18 and before this might not yield the expected result + since these versions of ansible-core strip the vault information away from strings that are + part of more complex data structures specified in C(vars). + type: bool + default: false + notes: + - More options may be available, see L(PyYAML documentation, https://pyyaml.org/wiki/PyYAMLDocumentation) for details. + - >- + These parameters to C(yaml.dump) are not accepted, as they are overridden internally: O(ignore:allow_unicode). + +EXAMPLES: | + --- + # Dump variable in a template to create a YAML document + value: "{{ github_workflow | community.general.to_nice_yaml }}" + +RETURN: + _value: + description: + - The YAML serialized string representing the variable structure inputted. + type: string diff --git a/plugins/filter/to_prettytable.py b/plugins/filter/to_prettytable.py new file mode 100644 index 0000000000..266a426cf2 --- /dev/null +++ b/plugins/filter/to_prettytable.py @@ -0,0 +1,409 @@ +# Copyright (c) 2025, Timur Gadiev +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: to_prettytable +short_description: Format a list of dictionaries as an ASCII table +version_added: "10.7.0" +author: Timur Gadiev (@tgadiev) +description: + - This filter takes a list of dictionaries and formats it as an ASCII table using the I(prettytable) Python library. +requirements: + - prettytable +options: + _input: + description: A list of dictionaries to format. + type: list + elements: dictionary + required: true + column_order: + description: List of column names to specify the order of columns in the table. + type: list + elements: string + header_names: + description: List of custom header names to use instead of dictionary keys. + type: list + elements: string + column_alignments: + description: + - Dictionary where keys are column names and values are alignment settings. Valid alignment values are C(left), C(center), + C(right), C(l), C(c), or C(r). + - "For example, V({'name': 'left', 'id': 'right'}) aligns the C(name) column to the left and the C(id) column to the + right." + type: dictionary +""" + +EXAMPLES = r""" +- name: Set a list of users + ansible.builtin.set_fact: + users: + - name: Alice + age: 25 + role: admin + - name: Bob + age: 30 + role: user + +- name: Display a list of users as a table + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable + }} + +- name: Display a table with custom column ordering + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_order=['role', 'name', 'age'] + ) + }} + +- name: Display a table with selective column output (only show name and role fields) + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_order=['name', 'role'] + ) + }} + +- name: Display a table with custom headers + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + header_names=['User Name', 'User Age', 'User Role'] + ) + }} + +- name: Display a table with custom alignments + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_alignments={'name': 'center', 'age': 'right', 'role': 'left'} + ) + }} + +- name: Combine multiple options + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_order=['role', 'name', 'age'], + header_names=['Position', 'Full Name', 'Years'], + column_alignments={'name': 'center', 'age': 'right', 'role': 'left'} + ) + }} +""" + +RETURN = r""" +_value: + description: The formatted ASCII table. + type: string +""" + +try: + import prettytable + HAS_PRETTYTABLE = True +except ImportError: + HAS_PRETTYTABLE = False + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.common.text.converters import to_text + + +class TypeValidationError(AnsibleFilterError): + """Custom exception for type validation errors. + + Args: + obj: The object with incorrect type + expected: Description of expected type + """ + def __init__(self, obj, expected): + type_name = "string" if isinstance(obj, str) else type(obj).__name__ + super().__init__(f"Expected {expected}, got a {type_name}") + + +def _validate_list_param(param, param_name, ensure_strings=True): + """Validate a parameter is a list and optionally ensure all elements are strings. + + Args: + param: The parameter to validate + param_name: The name of the parameter for error messages + ensure_strings: Whether to check that all elements are strings + + Raises: + AnsibleFilterError: If validation fails + """ + # Map parameter names to their original error message format + error_messages = { + "column_order": "a list of column names", + "header_names": "a list of header names" + } + + # Use the specific error message if available, otherwise use a generic one + error_msg = error_messages.get(param_name, f"a list for {param_name}") + + if not isinstance(param, list): + raise TypeValidationError(param, error_msg) + + if ensure_strings: + for item in param: + if not isinstance(item, str): + # Maintain original error message format + if param_name == "column_order": + error_msg = "a string for column name" + elif param_name == "header_names": + error_msg = "a string for header name" + else: + error_msg = f"a string for {param_name} element" + raise TypeValidationError(item, error_msg) + + +def _match_key(item_dict, lookup_key): + """Find a matching key in a dictionary, handling type conversion. + + Args: + item_dict: Dictionary to search in + lookup_key: Key to look for, possibly needing type conversion + + Returns: + The matching key or None if no match found + """ + # Direct key match + if lookup_key in item_dict: + return lookup_key + + # Try boolean conversion for 'true'/'false' strings + if isinstance(lookup_key, str): + if lookup_key.lower() == 'true' and True in item_dict: + return True + if lookup_key.lower() == 'false' and False in item_dict: + return False + + # Try numeric conversion for string numbers + if lookup_key.isdigit() and int(lookup_key) in item_dict: + return int(lookup_key) + + # No match found + return None + + +def _build_key_maps(data): + """Build mappings between string keys and original keys. + + Args: + data: List of dictionaries with keys to map + + Returns: + Tuple of (key_map, reverse_key_map) + """ + key_map = {} + reverse_key_map = {} + + # Check if the data list is not empty + if not data: + return key_map, reverse_key_map + + first_dict = data[0] + for orig_key in first_dict.keys(): + # Store string version of the key + str_key = to_text(orig_key) + key_map[str_key] = orig_key + # Also store lowercase version for case-insensitive lookups + reverse_key_map[str_key.lower()] = orig_key + + return key_map, reverse_key_map + + +def _configure_alignments(table, field_names, column_alignments): + """Configure column alignments for the table. + + Args: + table: The PrettyTable instance to configure + field_names: List of field names to align + column_alignments: Dict of column alignments + """ + valid_alignments = {"left", "center", "right", "l", "c", "r"} + + if not isinstance(column_alignments, dict): + return + + for col_name, alignment in column_alignments.items(): + if col_name in field_names: + # We already validated alignment is a string and a valid value in the main function + # Just apply it here + alignment = alignment.lower() + table.align[col_name] = alignment[0] + + +def to_prettytable(data, *args, **kwargs): + """Convert a list of dictionaries to an ASCII table. + + Args: + data: List of dictionaries to format + *args: Optional list of column names to specify column order + **kwargs: Optional keyword arguments: + - column_order: List of column names to specify the order + - header_names: List of custom header names + - column_alignments: Dict of column alignments (left, center, right) + + Returns: + String containing the ASCII table + """ + if not HAS_PRETTYTABLE: + raise AnsibleFilterError( + 'You need to install "prettytable" Python module to use this filter' + ) + + # === Input validation === + # Validate list type + if not isinstance(data, list): + raise TypeValidationError(data, "a list of dictionaries") + + # Validate dictionary items if list is not empty + if data and not all(isinstance(item, dict) for item in data): + invalid_item = next((item for item in data if not isinstance(item, dict)), None) + raise TypeValidationError(invalid_item, "all items in the list to be dictionaries") + + # Get sample dictionary to determine fields - empty if no data + sample_dict = data[0] if data else {} + max_fields = len(sample_dict) + + # === Process column order === + # Handle both positional and keyword column_order + column_order = kwargs.pop('column_order', None) + + # Check for conflict between args and column_order + if args and column_order is not None: + raise AnsibleFilterError("Cannot use both positional arguments and the 'column_order' keyword argument") + + # Use positional args if provided + if args: + column_order = list(args) + + # Validate column_order + if column_order is not None: + _validate_list_param(column_order, "column_order") + + # Validate column_order doesn't exceed the number of fields (skip if data is empty) + if data and len(column_order) > max_fields: + raise AnsibleFilterError( + f"'column_order' has more elements ({len(column_order)}) than available fields in data ({max_fields})") + + # === Process headers === + # Determine field names and ensure they are strings + if column_order: + field_names = column_order + else: + # Use field names from first dictionary, ensuring all are strings + field_names = [to_text(k) for k in sample_dict] + + # Process custom headers + header_names = kwargs.pop('header_names', None) + if header_names is not None: + _validate_list_param(header_names, "header_names") + + # Validate header_names doesn't exceed the number of fields (skip if data is empty) + if data and len(header_names) > max_fields: + raise AnsibleFilterError( + f"'header_names' has more elements ({len(header_names)}) than available fields in data ({max_fields})") + + # Validate that column_order and header_names have the same size if both provided + if column_order is not None and len(column_order) != len(header_names): + raise AnsibleFilterError( + f"'column_order' and 'header_names' must have the same number of elements. " + f"Got {len(column_order)} columns and {len(header_names)} headers.") + + # === Process alignments === + # Get column alignments and validate + column_alignments = kwargs.pop('column_alignments', {}) + valid_alignments = {"left", "center", "right", "l", "c", "r"} + + # Validate column_alignments is a dictionary + if not isinstance(column_alignments, dict): + raise TypeValidationError(column_alignments, "a dictionary for column_alignments") + + # Validate column_alignments keys and values + for key, value in column_alignments.items(): + # Check that keys are strings + if not isinstance(key, str): + raise TypeValidationError(key, "a string for column_alignments key") + + # Check that values are strings + if not isinstance(value, str): + raise TypeValidationError(value, "a string for column_alignments value") + + # Check that values are valid alignments + if value.lower() not in valid_alignments: + raise AnsibleFilterError( + f"Invalid alignment '{value}' in 'column_alignments'. " + f"Valid alignments are: {', '.join(sorted(valid_alignments))}") + + # Validate column_alignments doesn't have more keys than fields (skip if data is empty) + if data and len(column_alignments) > max_fields: + raise AnsibleFilterError( + f"'column_alignments' has more elements ({len(column_alignments)}) than available fields in data ({max_fields})") + + # Check for unknown parameters + if kwargs: + raise AnsibleFilterError(f"Unknown parameter(s) for to_prettytable filter: {', '.join(sorted(kwargs))}") + + # === Build the table === + table = prettytable.PrettyTable() + + # Set the field names for display + display_names = header_names if header_names is not None else field_names + table.field_names = [to_text(name) for name in display_names] + + # Configure alignments after setting field_names + _configure_alignments(table, display_names, column_alignments) + + # Build key maps only if not using explicit column_order and we have data + key_map = {} + reverse_key_map = {} + if not column_order and data: # Only needed when using original dictionary keys and we have data + key_map, reverse_key_map = _build_key_maps(data) + + # If we have an empty list with no custom parameters, return a simple empty table + if not data and not column_order and not header_names and not column_alignments: + return "++\n++" + + # Process each row if we have data + for item in data: + row = [] + for col in field_names: + # Try direct mapping first + if col in key_map: + row.append(item.get(key_map[col], "")) + else: + # Try to find a matching key in the item + matched_key = _match_key(item, col) + if matched_key is not None: + row.append(item.get(matched_key, "")) + else: + # Try case-insensitive lookup as last resort + lower_col = col.lower() if isinstance(col, str) else str(col).lower() + if lower_col in reverse_key_map: + row.append(item.get(reverse_key_map[lower_col], "")) + else: + # No match found + row.append("") + table.add_row(row) + + return to_text(table) + + +class FilterModule(object): + """Ansible core jinja2 filters.""" + + def filters(self): + return { + 'to_prettytable': to_prettytable + } diff --git a/plugins/filter/to_seconds.yml b/plugins/filter/to_seconds.yml index 0b09e98456..49b69d6d69 100644 --- a/plugins/filter/to_seconds.yml +++ b/plugins/filter/to_seconds.yml @@ -1,6 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + DOCUMENTATION: name: to_seconds - short_description: Converte a duration string to seconds + short_description: Converts a duration string to seconds version_added: 0.2.0 description: - Parse a human readable time duration string and convert to seconds. @@ -8,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/to_time_unit.yml b/plugins/filter/to_time_unit.yml index 436a4d6a80..256ca573f4 100644 --- a/plugins/filter/to_time_unit.yml +++ b/plugins/filter/to_time_unit.yml @@ -1,6 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + DOCUMENTATION: name: to_time_unit - short_description: Converte a duration string to the given time unit + short_description: Converts a duration string to the given time unit version_added: 0.2.0 description: - Parse a human readable time duration string and convert to the given time unit. @@ -9,12 +14,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true unit: diff --git a/plugins/filter/to_weeks.yml b/plugins/filter/to_weeks.yml index 4626e35662..750e77c378 100644 --- a/plugins/filter/to_weeks.yml +++ b/plugins/filter/to_weeks.yml @@ -1,6 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + DOCUMENTATION: name: to_weeks - short_description: Converte a duration string to weeks + short_description: Converts a duration string to weeks version_added: 0.2.0 description: - Parse a human readable time duration string and convert to weeks. @@ -8,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/to_yaml.py b/plugins/filter/to_yaml.py new file mode 100644 index 0000000000..905b04271c --- /dev/null +++ b/plugins/filter/to_yaml.py @@ -0,0 +1,113 @@ +# Copyright (c) Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +import typing as t +from collections.abc import Mapping, Set + +from yaml import dump +try: + from yaml.cyaml import CSafeDumper as SafeDumper +except ImportError: + from yaml import SafeDumper + +from ansible.module_utils.common.collections import is_sequence +try: + # This is ansible-core 2.19+ + from ansible.utils.vars import transform_to_native_types + from ansible.parsing.vault import VaultHelper, VaultLib +except ImportError: + transform_to_native_types = None + +from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode +from ansible.utils.unsafe_proxy import AnsibleUnsafe + + +def _to_native_types_compat(value: t.Any, *, redact_value: str | None) -> t.Any: + """Compatibility function for ansible-core 2.18 and before.""" + if value is None: + return value + if isinstance(value, AnsibleUnsafe): + # This only works up to ansible-core 2.18: + return _to_native_types_compat(value._strip_unsafe(), redact_value=redact_value) + # But that's fine, since this code path isn't taken on ansible-core 2.19+ anyway. + if isinstance(value, Mapping): + return { + _to_native_types_compat(key, redact_value=redact_value): _to_native_types_compat(val, redact_value=redact_value) + for key, val in value.items() + } + if isinstance(value, Set): + return {_to_native_types_compat(elt, redact_value=redact_value) for elt in value} + if is_sequence(value): + return [_to_native_types_compat(elt, redact_value=redact_value) for elt in value] + if isinstance(value, AnsibleVaultEncryptedUnicode): + if redact_value is not None: + return redact_value + # This only works up to ansible-core 2.18: + return value.data + # But that's fine, since this code path isn't taken on ansible-core 2.19+ anyway. + if isinstance(value, bytes): + return bytes(value) + if isinstance(value, str): + return str(value) + + return value + + +def _to_native_types(value: t.Any, *, redact: bool) -> t.Any: + if isinstance(value, Mapping): + return {_to_native_types(k, redact=redact): _to_native_types(v, redact=redact) for k, v in value.items()} + if is_sequence(value): + return [_to_native_types(e, redact=redact) for e in value] + if redact: + ciphertext = VaultHelper.get_ciphertext(value, with_tags=False) + if ciphertext and VaultLib.is_encrypted(ciphertext): + return "" + return transform_to_native_types(value, redact=redact) + + +def remove_all_tags(value: t.Any, *, redact_sensitive_values: bool = False) -> t.Any: + """ + Remove all tags from all values in the input. + + If ``redact_sensitive_values`` is ``True``, all sensitive values will be redacted. + """ + if transform_to_native_types is not None: + return _to_native_types(value, redact=redact_sensitive_values) + + return _to_native_types_compat( + value, + redact_value="" if redact_sensitive_values else None, # same string as in ansible-core 2.19 by transform_to_native_types() + ) + + +def to_yaml(value: t.Any, *, redact_sensitive_values: bool = False, default_flow_style: bool | None = None, **kwargs) -> str: + """Serialize input as terse flow-style YAML.""" + return dump( + remove_all_tags(value, redact_sensitive_values=redact_sensitive_values), + Dumper=SafeDumper, + allow_unicode=True, + default_flow_style=default_flow_style, + **kwargs, + ) + + +def to_nice_yaml(value: t.Any, *, redact_sensitive_values: bool = False, indent: int = 2, default_flow_style: bool = False, **kwargs) -> str: + """Serialize input as verbose multi-line YAML.""" + return to_yaml( + value, + redact_sensitive_values=redact_sensitive_values, + default_flow_style=default_flow_style, + indent=indent, + **kwargs, + ) + + +class FilterModule(object): + def filters(self): + return { + 'to_yaml': to_yaml, + 'to_nice_yaml': to_nice_yaml, + } diff --git a/plugins/filter/to_yaml.yml b/plugins/filter/to_yaml.yml new file mode 100644 index 0000000000..066f8d990d --- /dev/null +++ b/plugins/filter/to_yaml.yml @@ -0,0 +1,92 @@ +# Copyright (c) Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: to_yaml + author: + - Ansible Core Team + - Felix Fontein (@felixfontein) + version_added: 11.3.0 + short_description: Convert variable to YAML string + description: + - Converts an Ansible variable into a YAML string representation, without preserving vaulted strings as P(ansible.builtin.to_yaml#filter). + - This filter functions as a wrapper to the L(Python PyYAML library, https://pypi.org/project/PyYAML/)'s C(yaml.dump) function. + positional: _input + options: + _input: + description: + - A variable or expression that returns a data structure. + type: raw + required: true + indent: + description: + - Number of spaces to indent Python structures, mainly used for display to humans. + type: integer + sort_keys: + description: + - Affects sorting of dictionary keys. + default: true + type: bool + default_style: + description: + - Indicates the style of the scalar. + choices: + - '' + - "'" + - '"' + - '|' + - '>' + type: string + canonical: + description: + - If set to V(true), export tag type to the output. + type: bool + width: + description: + - Set the preferred line width. + type: integer + line_break: + description: + - Specify the line break. + type: string + encoding: + description: + - Specify the output encoding. + type: string + explicit_start: + description: + - If set to V(true), adds an explicit start using C(---). + type: bool + explicit_end: + description: + - If set to V(true), adds an explicit end using C(...). + type: bool + redact_sensitive_values: + description: + - If set to V(true), vaulted strings are replaced by V() instead of being decrypted. + - With future ansible-core versions, this can extend to other strings tagged as sensitive. + - B(Note) that with ansible-core 2.18 and before this might not yield the expected result + since these versions of ansible-core strip the vault information away from strings that are + part of more complex data structures specified in C(vars). + type: bool + default: false + notes: + - More options may be available, see L(PyYAML documentation, https://pyyaml.org/wiki/PyYAMLDocumentation) for details. + - >- + These parameters to C(yaml.dump) are not accepted, as they are overridden internally: O(ignore:allow_unicode). + +EXAMPLES: | + --- + # Dump variable in a template to create a YAML document + value: "{{ github_workflow | community.general.to_yaml }}" + + --- + # Same as above but 'prettier' (equivalent to community.general.to_nice_yaml filter) + value: "{{ docker_config | community.general.to_yaml(indent=2) }}" + +RETURN: + _value: + description: + - The YAML serialized string representing the variable structure inputted. + type: string diff --git a/plugins/filter/to_years.yml b/plugins/filter/to_years.yml index 4fb54b8753..62f282a8b6 100644 --- a/plugins/filter/to_years.yml +++ b/plugins/filter/to_years.yml @@ -1,6 +1,11 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + DOCUMENTATION: name: to_years - short_description: Converte a duration string to years + short_description: Converts a duration string to years version_added: 0.2.0 description: - Parse a human readable time duration string and convert to years. @@ -8,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/unicode_normalize.py b/plugins/filter/unicode_normalize.py index 30aed2005a..f1fe18402b 100644 --- a/plugins/filter/unicode_normalize.py +++ b/plugins/filter/unicode_normalize.py @@ -1,55 +1,58 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: unicode_normalize - short_description: Normalizes unicode strings to facilitate comparison of characters with normalized forms - version_added: 3.7.0 - author: Andrew Pantuso (@Ajpantuso) - description: - - Normalizes unicode strings to facilitate comparison of characters with normalized forms. - positional: form - options: - _input: - description: A unicode string. - type: string - required: true - form: - description: - - The normal form to use. - - See U(https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize) for details. - type: string - default: NFC - choices: - - NFC - - NFD - - NFKC - - NFKD -''' +DOCUMENTATION = r""" +name: unicode_normalize +short_description: Normalizes unicode strings to facilitate comparison of characters with normalized forms +version_added: 3.7.0 +author: Andrew Pantuso (@Ajpantuso) +description: + - Normalizes unicode strings to facilitate comparison of characters with normalized forms. +positional: form +options: + _input: + description: A unicode string. + type: string + required: true + form: + description: + - The normal form to use. + - See U(https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize) for details. + type: string + default: NFC + choices: + - NFC + - NFD + - NFKC + - NFKD +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Normalize unicode string ansible.builtin.set_fact: dictionary: "{{ 'ä' | community.general.unicode_normalize('NFKD') }}" # The resulting string has length 2: one letter is 'a', the other # the diacritic combiner. -''' +""" -RETURN = ''' - _value: - description: The normalized unicode string of the specified normal form. - type: string -''' +RETURN = r""" +_value: + description: The normalized unicode string of the specified normal form. + type: string +""" from unicodedata import normalize -from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError -from ansible.module_utils.six import text_type +from ansible.errors import AnsibleFilterError + +try: + from ansible.errors import AnsibleTypeError +except ImportError: + from ansible.errors import AnsibleFilterTypeError as AnsibleTypeError def unicode_normalize(data, form='NFC'): @@ -64,11 +67,11 @@ def unicode_normalize(data, form='NFC'): A normalized unicode string of the specified 'form'. """ - if not isinstance(data, text_type): - raise AnsibleFilterTypeError("%s is not a valid input type" % type(data)) + if not isinstance(data, str): + raise AnsibleTypeError(f"{type(data)} is not a valid input type") if form not in ('NFC', 'NFD', 'NFKC', 'NFKD'): - raise AnsibleFilterError("%s is not a valid form" % form) + raise AnsibleFilterError(f"{form!r} is not a valid form") return normalize(form, data) diff --git a/plugins/filter/version_sort.py b/plugins/filter/version_sort.py index 9d21691085..893c7e5bd3 100644 --- a/plugins/filter/version_sort.py +++ b/plugins/filter/version_sort.py @@ -1,38 +1,37 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2021 Eric Lavarde -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: version_sort - short_description: Sort a list according to version order instead of pure alphabetical one - version_added: 2.2.0 - author: Eric L. (@ericzolf) - description: - - Sort a list according to version order instead of pure alphabetical one. - options: - _input: - description: A list of strings to sort. - type: list - elements: string - required: true -''' +DOCUMENTATION = r""" +name: version_sort +short_description: Sort a list according to version order instead of pure alphabetical one +version_added: 2.2.0 +author: Eric L. (@ericzolf) +description: + - Sort a list according to version order instead of pure alphabetical one. +options: + _input: + description: A list of strings to sort. + type: list + elements: string + required: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Convert list of tuples into dictionary ansible.builtin.set_fact: dictionary: "{{ ['2.1', '2.10', '2.9'] | community.general.version_sort }}" # Result is ['2.1', '2.9', '2.10'] -''' +""" -RETURN = ''' - _value: - description: The list of strings sorted by version. - type: list - elements: string -''' +RETURN = r""" +_value: + description: The list of strings sorted by version. + type: list + elements: string +""" from ansible_collections.community.general.plugins.module_utils.version import LooseVersion diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py index d50acd0c55..7374193a74 100644 --- a/plugins/inventory/cobbler.py +++ b/plugins/inventory/cobbler.py @@ -1,102 +1,160 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2020 Orion Poplawski # Copyright (c) 2020 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: Orion Poplawski (@opoplawski) - name: cobbler - short_description: Cobbler inventory source - version_added: 1.0.0 +DOCUMENTATION = r""" +author: Orion Poplawski (@opoplawski) +name: cobbler +short_description: Cobbler inventory source +version_added: 1.0.0 +description: + - Get inventory hosts from the cobbler service. + - 'Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and have a C(plugin: + cobbler) entry.' + - Adds the primary IP addresses to C(cobbler_ipv4_address) and C(cobbler_ipv6_address) host variables if defined in Cobbler. + The primary IP address is defined as the management interface if defined, or the interface who's DNS name matches the + hostname of the system, or else the first interface found. +extends_documentation_fragment: + - inventory_cache +options: + plugin: + description: The name of this plugin, it should always be set to V(community.general.cobbler) for this plugin to recognize + it as its own. + type: string + required: true + choices: ['cobbler', 'community.general.cobbler'] + url: + description: URL to cobbler. + type: string + default: 'http://cobbler/cobbler_api' + env: + - name: COBBLER_SERVER + user: + description: Cobbler authentication user. + type: string + required: false + env: + - name: COBBLER_USER + password: + description: Cobbler authentication password. + type: string + required: false + env: + - name: COBBLER_PASSWORD + cache_fallback: + description: Fallback to cached results if connection to cobbler fails. + type: boolean + default: false + connection_timeout: + description: Timeout to connect to cobbler server. + type: int + required: false + version_added: 10.7.0 + exclude_mgmt_classes: + description: Management classes to exclude from inventory. + type: list + default: [] + elements: str + version_added: 7.4.0 + exclude_profiles: description: - - Get inventory hosts from the cobbler service. - - "Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and has a C(plugin: cobbler) entry." - extends_documentation_fragment: - - inventory_cache - options: - plugin: - description: The name of this plugin, it should always be set to C(community.general.cobbler) for this plugin to recognize it as it's own. - required: yes - choices: [ 'cobbler', 'community.general.cobbler' ] - url: - description: URL to cobbler. - default: 'http://cobbler/cobbler_api' - env: - - name: COBBLER_SERVER - user: - description: Cobbler authentication user. - required: no - env: - - name: COBBLER_USER - password: - description: Cobbler authentication password - required: no - env: - - name: COBBLER_PASSWORD - cache_fallback: - description: Fallback to cached results if connection to cobbler fails - type: boolean - default: no - exclude_profiles: - description: - - Profiles to exclude from inventory. - - Ignored if I(include_profiles) is specified. - type: list - default: [] - elements: str - include_profiles: - description: - - Profiles to include from inventory. - - If specified, all other profiles will be excluded. - - I(exclude_profiles) is ignored if I(include_profiles) is specified. - type: list - default: [] - elements: str - version_added: 4.4.0 - group_by: - description: Keys to group hosts by - type: list - elements: string - default: [ 'mgmt_classes', 'owners', 'status' ] - group: - description: Group to place all hosts into - default: cobbler - group_prefix: - description: Prefix to apply to cobbler groups - default: cobbler_ - want_facts: - description: Toggle, if C(true) the plugin will retrieve host facts from the server - type: boolean - default: yes -''' + - Profiles to exclude from inventory. + - Ignored if O(include_profiles) is specified. + type: list + default: [] + elements: str + include_mgmt_classes: + description: Management classes to include from inventory. + type: list + default: [] + elements: str + version_added: 7.4.0 + include_profiles: + description: + - Profiles to include from inventory. + - If specified, all other profiles are excluded. + - O(exclude_profiles) is ignored if O(include_profiles) is specified. + type: list + default: [] + elements: str + version_added: 4.4.0 + inventory_hostname: + description: + - What to use for the ansible inventory hostname. + - By default the networking hostname is used if defined, otherwise the DNS name of the management or first non-static + interface. + - If set to V(system), the cobbler system name is used. + type: str + choices: ['hostname', 'system'] + default: hostname + version_added: 7.1.0 + group_by: + description: Keys to group hosts by. + type: list + elements: string + default: ['mgmt_classes', 'owners', 'status'] + group: + description: Group to place all hosts into. + default: cobbler + group_prefix: + description: Prefix to apply to cobbler groups. + default: cobbler_ + want_facts: + description: Toggle, if V(true) the plugin retrieves all host facts from the server. + type: boolean + default: true + want_ip_addresses: + description: + - Toggle, if V(true) the plugin adds a C(cobbler_ipv4_addresses) and C(cobbler_ipv6_addresses) dictionary to the + defined O(group) mapping interface DNS names to IP addresses. + type: boolean + default: true + version_added: 7.1.0 + facts_level: + description: + - Set to V(normal) to gather only system-level variables. + - Set to V(as_rendered) to gather all variables as rolled up by Cobbler. + type: string + choices: ['normal', 'as_rendered'] + default: normal + version_added: 10.7.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" # my.cobbler.yml plugin: community.general.cobbler url: http://cobbler/cobbler_api user: ansible-tester password: secure -''' +""" import socket from ansible.errors import AnsibleError -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.six import iteritems from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe + # xmlrpc try: - import xmlrpclib as xmlrpc_client + import xmlrpc.client as xmlrpc_client HAS_XMLRPC_CLIENT = True except ImportError: - try: - import xmlrpc.client as xmlrpc_client - HAS_XMLRPC_CLIENT = True - except ImportError: - HAS_XMLRPC_CLIENT = False + HAS_XMLRPC_CLIENT = False + + +class TimeoutTransport (xmlrpc_client.SafeTransport): + def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): + super(TimeoutTransport, self).__init__() + self._timeout = timeout + self.context = None + + def make_connection(self, host): + conn = xmlrpc_client.SafeTransport.make_connection(self, host) + conn.timeout = self._timeout + return conn class InventoryModule(BaseInventoryPlugin, Cacheable): @@ -107,7 +165,9 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): def __init__(self): super(InventoryModule, self).__init__() self.cache_key = None - self.connection = None + + if not HAS_XMLRPC_CLIENT: + raise AnsibleError('Could not import xmlrpc client library') def verify_file(self, path): valid = False @@ -118,18 +178,6 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): self.display.vvv('Skipping due to inventory source not ending in "cobbler.yaml" nor "cobbler.yml"') return valid - def _get_connection(self): - if not HAS_XMLRPC_CLIENT: - raise AnsibleError('Could not import xmlrpc client library') - - if self.connection is None: - self.display.vvvv('Connecting to %s\n' % self.cobbler_url) - self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True) - self.token = None - if self.get_option('user') is not None: - self.token = self.connection.login(self.get_option('user'), self.get_option('password')) - return self.connection - def _init_cache(self): if self.cache_key not in self._cache: self._cache[self.cache_key] = {} @@ -143,12 +191,11 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): def _get_profiles(self): if not self.use_cache or 'profiles' not in self._cache.get(self.cache_key, {}): - c = self._get_connection() try: if self.token is not None: - data = c.get_profiles(self.token) + data = self.cobbler.get_profiles(self.token) else: - data = c.get_profiles() + data = self.cobbler.get_profiles() except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError): self._reload_cache() else: @@ -159,12 +206,20 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): def _get_systems(self): if not self.use_cache or 'systems' not in self._cache.get(self.cache_key, {}): - c = self._get_connection() try: if self.token is not None: - data = c.get_systems(self.token) + data = self.cobbler.get_systems(self.token) else: - data = c.get_systems() + data = self.cobbler.get_systems() + + # If more facts are requested, gather them all from Cobbler + if self.facts_level == "as_rendered": + for i, host in enumerate(data): + self.display.vvvv(f"Gathering all facts for {host['name']}\n") + if self.token is not None: + data[i] = self.cobbler.get_system_as_rendered(host['name'], self.token) + else: + data[i] = self.cobbler.get_system_as_rendered(host['name']) except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError): self._reload_cache() else: @@ -174,7 +229,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): return self._cache[self.cache_key]['systems'] def _add_safe_group_name(self, group, child=None): - group_name = self.inventory.add_group(to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group.lower().replace(" ", "")))) + group_name = self.inventory.add_group(to_safe_group_name(f"{self.get_option('group_prefix')}{group.lower().replace(' ', '')}")) if child is not None: self.inventory.add_child(group_name, child) return group_name @@ -194,38 +249,53 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): # get connection host self.cobbler_url = self.get_option('url') + self.display.vvvv(f'Connecting to {self.cobbler_url}\n') + + if 'connection_timeout' in self._options: + self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True, + transport=TimeoutTransport(timeout=self.get_option('connection_timeout'))) + else: + self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True) + self.token = None + if self.get_option('user') is not None: + self.token = self.cobbler.login(str(self.get_option('user')), str(self.get_option('password'))) + self.cache_key = self.get_cache_key(path) self.use_cache = cache and self.get_option('cache') + self.exclude_mgmt_classes = self.get_option('exclude_mgmt_classes') + self.include_mgmt_classes = self.get_option('include_mgmt_classes') self.exclude_profiles = self.get_option('exclude_profiles') self.include_profiles = self.get_option('include_profiles') self.group_by = self.get_option('group_by') + self.inventory_hostname = self.get_option('inventory_hostname') + self.facts_level = self.get_option('facts_level') for profile in self._get_profiles(): if profile['parent']: - self.display.vvvv('Processing profile %s with parent %s\n' % (profile['name'], profile['parent'])) + self.display.vvvv(f"Processing profile {profile['name']} with parent {profile['parent']}\n") if not self._exclude_profile(profile['parent']): parent_group_name = self._add_safe_group_name(profile['parent']) - self.display.vvvv('Added profile parent group %s\n' % parent_group_name) + self.display.vvvv(f'Added profile parent group {parent_group_name}\n') if not self._exclude_profile(profile['name']): group_name = self._add_safe_group_name(profile['name']) - self.display.vvvv('Added profile group %s\n' % group_name) + self.display.vvvv(f'Added profile group {group_name}\n') self.inventory.add_child(parent_group_name, group_name) else: - self.display.vvvv('Processing profile %s without parent\n' % profile['name']) - # Create a heirarchy of profile names + self.display.vvvv(f"Processing profile {profile['name']} without parent\n") + # Create a hierarchy of profile names profile_elements = profile['name'].split('-') i = 0 while i < len(profile_elements) - 1: profile_group = '-'.join(profile_elements[0:i + 1]) profile_group_child = '-'.join(profile_elements[0:i + 2]) if self._exclude_profile(profile_group): - self.display.vvvv('Excluding profile %s\n' % profile_group) + self.display.vvvv(f'Excluding profile {profile_group}\n') break group_name = self._add_safe_group_name(profile_group) - self.display.vvvv('Added profile group %s\n' % group_name) + self.display.vvvv(f'Added profile group {group_name}\n') child_group_name = self._add_safe_group_name(profile_group_child) - self.display.vvvv('Added profile child group %s to %s\n' % (child_group_name, group_name)) + self.display.vvvv(f'Added profile child group {child_group_name} to {group_name}\n') self.inventory.add_child(group_name, child_group_name) i = i + 1 @@ -233,54 +303,112 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): self.group = to_safe_group_name(self.get_option('group')) if self.group is not None and self.group != '': self.inventory.add_group(self.group) - self.display.vvvv('Added site group %s\n' % self.group) + self.display.vvvv(f'Added site group {self.group}\n') + ip_addresses = {} + ipv6_addresses = {} for host in self._get_systems(): # Get the FQDN for the host and add it to the right groups - hostname = host['hostname'] # None + if self.inventory_hostname == 'system': + hostname = make_unsafe(host['name']) # None + else: + hostname = make_unsafe(host['hostname']) # None interfaces = host['interfaces'] - if self._exclude_profile(host['profile']): - self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile'])) - continue + if set(host['mgmt_classes']) & set(self.include_mgmt_classes): + self.display.vvvv(f"Including host {host['name']} in mgmt_classes {host['mgmt_classes']}\n") + else: + if self._exclude_profile(host['profile']): + self.display.vvvv(f"Excluding host {host['name']} in profile {host['profile']}\n") + continue + + if set(host['mgmt_classes']) & set(self.exclude_mgmt_classes): + self.display.vvvv(f"Excluding host {host['name']} in mgmt_classes {host['mgmt_classes']}\n") + continue # hostname is often empty for non-static IP hosts if hostname == '': - for (iname, ivalue) in iteritems(interfaces): + for iname, ivalue in interfaces.items(): if ivalue['management'] or not ivalue['static']: this_dns_name = ivalue.get('dns_name', None) if this_dns_name is not None and this_dns_name != "": - hostname = this_dns_name - self.display.vvvv('Set hostname to %s from %s\n' % (hostname, iname)) + hostname = make_unsafe(this_dns_name) + self.display.vvvv(f'Set hostname to {hostname} from {iname}\n') if hostname == '': - self.display.vvvv('Cannot determine hostname for host %s, skipping\n' % host['name']) + self.display.vvvv(f"Cannot determine hostname for host {host['name']}, skipping\n") continue self.inventory.add_host(hostname) - self.display.vvvv('Added host %s hostname %s\n' % (host['name'], hostname)) + self.display.vvvv(f"Added host {host['name']} hostname {hostname}\n") # Add host to profile group - group_name = self._add_safe_group_name(host['profile'], child=hostname) - self.display.vvvv('Added host %s to profile group %s\n' % (hostname, group_name)) + if host['profile'] != '': + group_name = self._add_safe_group_name(host['profile'], child=hostname) + self.display.vvvv(f'Added host {hostname} to profile group {group_name}\n') + else: + self.display.warning(f'Host {hostname} has an empty profile\n') # Add host to groups specified by group_by fields for group_by in self.group_by: - if host[group_by] == '<>': + if host[group_by] == '<>' or host[group_by] == '': groups = [] else: groups = [host[group_by]] if isinstance(host[group_by], str) else host[group_by] for group in groups: group_name = self._add_safe_group_name(group, child=hostname) - self.display.vvvv('Added host %s to group_by %s group %s\n' % (hostname, group_by, group_name)) + self.display.vvvv(f'Added host {hostname} to group_by {group_by} group {group_name}\n') # Add to group for this inventory if self.group is not None: self.inventory.add_child(self.group, hostname) # Add host variables + ip_address = None + ip_address_first = None + ipv6_address = None + ipv6_address_first = None + for iname, ivalue in interfaces.items(): + # Set to first interface or management interface if defined or hostname matches dns_name + if ivalue['ip_address'] != "": + if ip_address_first is None: + ip_address_first = ivalue['ip_address'] + if ivalue['management']: + ip_address = ivalue['ip_address'] + elif ivalue['dns_name'] == hostname and ip_address is None: + ip_address = ivalue['ip_address'] + if ivalue['ipv6_address'] != "": + if ipv6_address_first is None: + ipv6_address_first = ivalue['ipv6_address'] + if ivalue['management']: + ipv6_address = ivalue['ipv6_address'] + elif ivalue['dns_name'] == hostname and ipv6_address is None: + ipv6_address = ivalue['ipv6_address'] + + # Collect all interface name mappings for adding to group vars + if self.get_option('want_ip_addresses'): + if ivalue['dns_name'] != "": + if ivalue['ip_address'] != "": + ip_addresses[ivalue['dns_name']] = ivalue['ip_address'] + if ivalue['ipv6_address'] != "": + ip_addresses[ivalue['dns_name']] = ivalue['ipv6_address'] + + # Add ip_address to host if defined, use first if no management or matched dns_name + if ip_address is None and ip_address_first is not None: + ip_address = ip_address_first + if ip_address is not None: + self.inventory.set_variable(hostname, 'cobbler_ipv4_address', make_unsafe(ip_address)) + if ipv6_address is None and ipv6_address_first is not None: + ipv6_address = ipv6_address_first + if ipv6_address is not None: + self.inventory.set_variable(hostname, 'cobbler_ipv6_address', make_unsafe(ipv6_address)) + if self.get_option('want_facts'): try: - self.inventory.set_variable(hostname, 'cobbler', host) + self.inventory.set_variable(hostname, 'cobbler', make_unsafe(host)) except ValueError as e: - self.display.warning("Could not set host info for %s: %s" % (hostname, to_text(e))) + self.display.warning(f"Could not set host info for {hostname}: {e}") + + if self.get_option('want_ip_addresses'): + self.inventory.set_variable(self.group, 'cobbler_ipv4_addresses', make_unsafe(ip_addresses)) + self.inventory.set_variable(self.group, 'cobbler_ipv6_addresses', make_unsafe(ipv6_addresses)) diff --git a/plugins/inventory/gitlab_runners.py b/plugins/inventory/gitlab_runners.py index ac0fa21ad9..4a2b32680e 100644 --- a/plugins/inventory/gitlab_runners.py +++ b/plugins/inventory/gitlab_runners.py @@ -1,71 +1,71 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Stefan Heitmueller # Copyright (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' - name: gitlab_runners - author: - - Stefan Heitmüller (@morph027) - short_description: Ansible dynamic inventory plugin for GitLab runners. - requirements: - - python >= 2.7 - - python-gitlab > 1.8.0 - extends_documentation_fragment: - - constructed - description: - - Reads inventories from the GitLab API. - - Uses a YAML configuration file gitlab_runners.[yml|yaml]. - options: - plugin: - description: The name of this plugin, it should always be set to 'gitlab_runners' for this plugin to recognize it as it's own. - type: str - required: true - choices: - - gitlab_runners - - community.general.gitlab_runners - server_url: - description: The URL of the GitLab server, with protocol (i.e. http or https). - env: - - name: GITLAB_SERVER_URL - version_added: 1.0.0 - type: str - required: true - api_token: - description: GitLab token for logging in. - env: - - name: GITLAB_API_TOKEN - version_added: 1.0.0 - type: str - aliases: - - private_token - - access_token - filter: - description: filter runners from GitLab API - env: - - name: GITLAB_FILTER - version_added: 1.0.0 - type: str - choices: ['active', 'paused', 'online', 'specific', 'shared'] - verbose_output: - description: Toggle to (not) include all available nodes metadata - type: bool - default: yes -''' +DOCUMENTATION = r""" +name: gitlab_runners +author: + - Stefan Heitmüller (@morph027) +short_description: Ansible dynamic inventory plugin for GitLab runners +requirements: + - python-gitlab > 1.8.0 +extends_documentation_fragment: + - constructed +description: + - Reads inventories from the GitLab API. + - Uses a YAML configuration file gitlab_runners.[yml|yaml]. +options: + plugin: + description: The name of this plugin, it should always be set to V(gitlab_runners) for this plugin to recognize it as its own. + type: str + required: true + choices: + - gitlab_runners + - community.general.gitlab_runners + server_url: + description: The URL of the GitLab server, with protocol (i.e. http or https). + env: + - name: GITLAB_SERVER_URL + version_added: 1.0.0 + type: str + required: true + api_token: + description: GitLab token for logging in. + env: + - name: GITLAB_API_TOKEN + version_added: 1.0.0 + type: str + aliases: + - private_token + - access_token + filter: + description: Filter runners from GitLab API. + env: + - name: GITLAB_FILTER + version_added: 1.0.0 + type: str + choices: ['active', 'paused', 'online', 'specific', 'shared'] + verbose_output: + description: Toggle to (not) include all available nodes metadata. + type: bool + default: true +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # gitlab_runners.yml plugin: community.general.gitlab_runners host: https://gitlab.com +--- # Example using constructed features to create groups and set ansible_host plugin: community.general.gitlab_runners host: https://gitlab.com -strict: False +strict: false keyed_groups: # add e.g. amd64 hosts to an arch_amd64 group - prefix: arch @@ -78,12 +78,13 @@ keyed_groups: # hint: labels containing special characters will be converted to safe names - key: 'tag_list' prefix: tag -''' +""" from ansible.errors import AnsibleError, AnsibleParserError -from ansible.module_utils.common.text.converters import to_native from ansible.plugins.inventory import BaseInventoryPlugin, Constructable +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe + try: import gitlab HAS_GITLAB = True @@ -105,11 +106,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable): else: runners = gl.runners.all() for runner in runners: - host = str(runner['id']) + host = make_unsafe(str(runner['id'])) ip_address = runner['ip_address'] - host_attrs = vars(gl.runners.get(runner['id']))['_attrs'] + host_attrs = make_unsafe(vars(gl.runners.get(runner['id']))['_attrs']) self.inventory.add_host(host, group='gitlab_runners') - self.inventory.set_variable(host, 'ansible_host', ip_address) + self.inventory.set_variable(host, 'ansible_host', make_unsafe(ip_address)) if self.get_option('verbose_output', True): self.inventory.set_variable(host, 'gitlab_runner_attributes', host_attrs) @@ -122,7 +123,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): # Create groups based on variable values and add the corresponding hosts to it self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_attrs, host, strict=strict) except Exception as e: - raise AnsibleParserError('Unable to fetch hosts from GitLab API, this was the original exception: %s' % to_native(e)) + raise AnsibleParserError(f'Unable to fetch hosts from GitLab API, this was the original exception: {e}') def verify_file(self, path): """Return the possibly of a file being consumable by this plugin.""" diff --git a/plugins/inventory/icinga2.py b/plugins/inventory/icinga2.py index 5ae565beb9..017959f403 100644 --- a/plugins/inventory/icinga2.py +++ b/plugins/inventory/icinga2.py @@ -1,77 +1,81 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Cliff Hults # Copyright (c) 2021 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' - name: icinga2 - short_description: Icinga2 inventory source - version_added: 3.7.0 - author: - - Cliff Hults (@BongoEADGC6) +DOCUMENTATION = r""" +name: icinga2 +short_description: Icinga2 inventory source +version_added: 3.7.0 +author: + - Cliff Hults (@BongoEADGC6) +description: + - Get inventory hosts from the Icinga2 API. + - Uses a configuration file as an inventory source, it must end in C(.icinga2.yml) or C(.icinga2.yaml). +extends_documentation_fragment: + - constructed +options: + strict: + version_added: 4.4.0 + compose: + version_added: 4.4.0 + groups: + version_added: 4.4.0 + keyed_groups: + version_added: 4.4.0 + plugin: + description: Name of the plugin. + required: true + type: string + choices: ['community.general.icinga2'] + url: + description: Root URL of Icinga2 API. + type: string + required: true + user: + description: Username to query the API. + type: string + required: true + password: + description: Password to query the API. + type: string + required: true + host_filter: description: - - Get inventory hosts from the Icinga2 API. - - "Uses a configuration file as an inventory source, it must end in - C(.icinga2.yml) or C(.icinga2.yaml)." - extends_documentation_fragment: - - constructed - options: - strict: - version_added: 4.4.0 - compose: - version_added: 4.4.0 - groups: - version_added: 4.4.0 - keyed_groups: - version_added: 4.4.0 - plugin: - description: Name of the plugin. - required: true - type: string - choices: ['community.general.icinga2'] - url: - description: Root URL of Icinga2 API. - type: string - required: true - user: - description: Username to query the API. - type: string - required: true - password: - description: Password to query the API. - type: string - required: true - host_filter: - description: - - An Icinga2 API valid host filter. Leave blank for no filtering - type: string - required: false - validate_certs: - description: Enables or disables SSL certificate verification. - type: boolean - default: true - inventory_attr: - description: - - Allows the override of the inventory name based on different attributes. - - This allows for changing the way limits are used. - - The current default, C(address), is sometimes not unique or present. We recommend to use C(name) instead. - type: string - default: address - choices: ['name', 'display_name', 'address'] - version_added: 4.2.0 -''' + - An Icinga2 API valid host filter. Leave blank for no filtering. + type: string + required: false + validate_certs: + description: Enables or disables SSL certificate verification. + type: boolean + default: true + inventory_attr: + description: + - Allows the override of the inventory name based on different attributes. + - This allows for changing the way limits are used. + - The current default, V(address), is sometimes not unique or present. We recommend to use V(name) instead. + type: string + default: address + choices: ['name', 'display_name', 'address'] + version_added: 4.2.0 + group_by_hostgroups: + description: + - Uses Icinga2 hostgroups as groups. + type: boolean + default: true + version_added: 8.4.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" # my.icinga2.yml plugin: community.general.icinga2 url: http://localhost:5665 user: ansible password: secure host_filter: \"linux-servers\" in host.groups -validate_certs: false +validate_certs: false # only do this when connecting to localhost! inventory_attr: name groups: # simple name matching @@ -87,14 +91,16 @@ compose: # set 'ansible_user' and 'ansible_port' from icinga2 host vars ansible_user: icinga2_attributes.vars.ansible_user ansible_port: icinga2_attributes.vars.ansible_port | default(22) -''' +""" import json +from urllib.error import HTTPError from ansible.errors import AnsibleParserError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible.module_utils.urls import open_url -from ansible.module_utils.six.moves.urllib.error import HTTPError + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe class InventoryModule(BaseInventoryPlugin, Constructable): @@ -113,6 +119,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): self.ssl_verify = None self.host_filter = None self.inventory_attr = None + self.group_by_hostgroups = None self.cache_key = None self.use_cache = None @@ -131,7 +138,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): 'User-Agent': "ansible-icinga2-inv", 'Accept': "application/json", } - api_status_url = self.icinga2_url + "/status" + api_status_url = f"{self.icinga2_url}/status" request_args = { 'headers': self.headers, 'url_username': self.icinga2_user, @@ -141,7 +148,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): open_url(api_status_url, **request_args) def _post_request(self, request_url, data=None): - self.display.vvv("Requested URL: %s" % request_url) + self.display.vvv(f"Requested URL: {request_url}") request_args = { 'headers': self.headers, 'url_username': self.icinga2_user, @@ -150,42 +157,38 @@ class InventoryModule(BaseInventoryPlugin, Constructable): } if data is not None: request_args['data'] = json.dumps(data) - self.display.vvv("Request Args: %s" % request_args) + self.display.vvv(f"Request Args: {request_args}") try: response = open_url(request_url, **request_args) except HTTPError as e: try: error_body = json.loads(e.read().decode()) - self.display.vvv("Error returned: {0}".format(error_body)) + self.display.vvv(f"Error returned: {error_body}") except Exception: error_body = {"status": None} if e.code == 404 and error_body.get('status') == "No objects found.": raise AnsibleParserError("Host filter returned no data. Please confirm your host_filter value is valid") - raise AnsibleParserError("Unexpected data returned: {0} -- {1}".format(e, error_body)) + raise AnsibleParserError(f"Unexpected data returned: {e} -- {error_body}") response_body = response.read() json_data = json.loads(response_body.decode('utf-8')) - self.display.vvv("Returned Data: %s" % json.dumps(json_data, indent=4, sort_keys=True)) + self.display.vvv(f"Returned Data: {json.dumps(json_data, indent=4, sort_keys=True)}") if 200 <= response.status <= 299: return json_data if response.status == 404 and json_data['status'] == "No objects found.": raise AnsibleParserError( - "API returned no data -- Response: %s - %s" - % (response.status, json_data['status'])) + f"API returned no data -- Response: {response.status} - {json_data['status']}") if response.status == 401: raise AnsibleParserError( - "API was unable to complete query -- Response: %s - %s" - % (response.status, json_data['status'])) + f"API was unable to complete query -- Response: {response.status} - {json_data['status']}") if response.status == 500: raise AnsibleParserError( - "API Response - %s - %s" - % (json_data['status'], json_data['errors'])) + f"API Response - {json_data['status']} - {json_data['errors']}") raise AnsibleParserError( - "Unexpected data returned - %s - %s" - % (json_data['status'], json_data['errors'])) + f"Unexpected data returned - {json_data['status']} - {json_data['errors']}") def _query_hosts(self, hosts=None, attrs=None, joins=None, host_filter=None): - query_hosts_url = "{0}/objects/hosts".format(self.icinga2_url) + query_hosts_url = f"{self.icinga2_url}/objects/hosts" self.headers['X-HTTP-Method-Override'] = 'GET' data_dict = dict() if hosts: @@ -232,31 +235,32 @@ class InventoryModule(BaseInventoryPlugin, Constructable): """Convert Icinga2 API data to JSON format for Ansible""" groups_dict = {"_meta": {"hostvars": {}}} for entry in json_data: - host_attrs = entry['attrs'] + host_attrs = make_unsafe(entry['attrs']) if self.inventory_attr == "name": - host_name = entry.get('name') + host_name = make_unsafe(entry.get('name')) if self.inventory_attr == "address": # When looking for address for inventory, if missing fallback to object name if host_attrs.get('address', '') != '': - host_name = host_attrs.get('address') + host_name = make_unsafe(host_attrs.get('address')) else: - host_name = entry.get('name') + host_name = make_unsafe(entry.get('name')) if self.inventory_attr == "display_name": host_name = host_attrs.get('display_name') if host_attrs['state'] == 0: host_attrs['state'] = 'on' else: host_attrs['state'] = 'off' - host_groups = host_attrs.get('groups') self.inventory.add_host(host_name) - for group in host_groups: - if group not in self.inventory.groups.keys(): - self.inventory.add_group(group) - self.inventory.add_child(group, host_name) + if self.group_by_hostgroups: + host_groups = host_attrs.get('groups') + for group in host_groups: + if group not in self.inventory.groups.keys(): + self.inventory.add_group(group) + self.inventory.add_child(group, host_name) # If the address attribute is populated, override ansible_host with the value if host_attrs.get('address') != '': self.inventory.set_variable(host_name, 'ansible_host', host_attrs.get('address')) - self.inventory.set_variable(host_name, 'hostname', entry.get('name')) + self.inventory.set_variable(host_name, 'hostname', make_unsafe(entry.get('name'))) self.inventory.set_variable(host_name, 'display_name', host_attrs.get('display_name')) self.inventory.set_variable(host_name, 'state', host_attrs['state']) @@ -276,12 +280,23 @@ class InventoryModule(BaseInventoryPlugin, Constructable): self._read_config_data(path) # Store the options from the YAML file - self.icinga2_url = self.get_option('url').rstrip('/') + '/v1' + self.icinga2_url = self.get_option('url') self.icinga2_user = self.get_option('user') self.icinga2_password = self.get_option('password') self.ssl_verify = self.get_option('validate_certs') self.host_filter = self.get_option('host_filter') self.inventory_attr = self.get_option('inventory_attr') + self.group_by_hostgroups = self.get_option('group_by_hostgroups') + + if self.templar.is_template(self.icinga2_url): + self.icinga2_url = self.templar.template(variable=self.icinga2_url) + if self.templar.is_template(self.icinga2_user): + self.icinga2_user = self.templar.template(variable=self.icinga2_user) + if self.templar.is_template(self.icinga2_password): + self.icinga2_password = self.templar.template(variable=self.icinga2_password) + + self.icinga2_url = f"{self.icinga2_url.rstrip('/')}/v1" + # Not currently enabled # self.cache_key = self.get_cache_key(path) # self.use_cache = cache and self.get_option('cache') diff --git a/plugins/inventory/iocage.py b/plugins/inventory/iocage.py new file mode 100644 index 0000000000..9d4cef4a03 --- /dev/null +++ b/plugins/inventory/iocage.py @@ -0,0 +1,418 @@ + +# Copyright (c) 2024 Vladimir Botka +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: iocage +short_description: C(iocage) inventory source +version_added: 10.2.0 +author: + - Vladimir Botka (@vbotka) +requirements: + - iocage >= 1.8 +description: + - Get inventory hosts from the C(iocage) jail manager running on O(host). + - By default, O(host) is V(localhost). If O(host) is not V(localhost) it is expected that the user running Ansible on the + controller can connect to the O(host) account O(user) with SSH non-interactively and execute the command C(iocage list). + - Uses a configuration file as an inventory source, it must end in C(.iocage.yml) or C(.iocage.yaml). +extends_documentation_fragment: + - ansible.builtin.constructed + - ansible.builtin.inventory_cache +options: + plugin: + description: + - The name of this plugin, it should always be set to V(community.general.iocage) for this plugin to recognize it as + its own. + required: true + choices: ['community.general.iocage'] + type: str + host: + description: The IP/hostname of the C(iocage) host. + type: str + default: localhost + user: + description: + - C(iocage) user. It is expected that the O(user) is able to connect to the O(host) with SSH and execute the command + C(iocage list). This option is not required if O(host=localhost). + type: str + sudo: + description: + - Enable execution as root. + - This requires passwordless sudo of the command C(iocage list*). + type: bool + default: false + version_added: 10.3.0 + sudo_preserve_env: + description: + - Preserve environment if O(sudo) is enabled. + - This requires C(SETENV) sudoers tag. + type: bool + default: false + version_added: 10.3.0 + get_properties: + description: + - Get jails' properties. Creates dictionary C(iocage_properties) for each added host. + type: bool + default: false + env: + description: + - O(user)'s environment on O(host). + - Enable O(sudo_preserve_env) if O(sudo) is enabled. + type: dict + default: {} + hooks_results: + description: + - List of paths to the files in a jail. + - Content of the files is stored in the items of the list C(iocage_hooks). + - If a file is not available the item keeps the dash character C(-). + - The variable C(iocage_hooks) is not created if O(hooks_results) is empty. + type: list + elements: path + version_added: 10.4.0 + inventory_hostname_tag: + description: + - The name of the tag in the C(iocage properties notes) that contains the jails alias. + - By default, the C(iocage list -l) column C(NAME) is used to name the jail. + - This option requires the notes format C("t1=v1 t2=v2 ..."). + - The option O(get_properties) must be enabled. + type: str + version_added: 11.0.0 + inventory_hostname_required: + description: + - If enabled, the tag declared in O(inventory_hostname_tag) is required. + type: bool + default: false + version_added: 11.0.0 +notes: + - You might want to test the command C(ssh user@host iocage list -l) on the controller before using this inventory plugin + with O(user) specified and with O(host) other than V(localhost). + - If you run this inventory plugin on V(localhost) C(ssh) is not used. In this case, test the command C(iocage list -l). + - This inventory plugin creates variables C(iocage_*) for each added host. + - The values of these variables are collected from the output of the command C(iocage list -l). + - The names of these variables correspond to the output columns. + - The column C(NAME) is used to name the added host. + - The option O(hooks_results) expects the C(poolname) of a jail is mounted to C(/poolname). For example, if you activate + the pool C(iocage) this plugin expects to find the O(hooks_results) items in the path C(/iocage/iocage/jails//root). + If you mount the C(poolname) to a different path the easiest remedy is to create a symlink. +""" + +EXAMPLES = r""" +--- +# file name must end with iocage.yaml or iocage.yml +plugin: community.general.iocage +host: 10.1.0.73 +user: admin + +--- +# user is not required if iocage is running on localhost (default) +plugin: community.general.iocage + +--- +# run cryptography without legacy algorithms +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 + +--- +# execute as root +# sudoers example 'admin ALL=(ALL) NOPASSWD:SETENV: /usr/local/bin/iocage list*' +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +sudo: true +sudo_preserve_env: true +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 + +--- +# enable cache +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 +cache: true + +--- +# see inventory plugin ansible.builtin.constructed +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 +cache: true +strict: false +compose: + ansible_host: iocage_ip4 + release: iocage_release | split('-') | first +groups: + test: inventory_hostname.startswith('test') +keyed_groups: + - prefix: distro + key: iocage_release + - prefix: state + key: iocage_state + +--- +# Read the file /var/db/dhclient-hook.address.epair0b in the jails and use it as ansible_host +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +hooks_results: + - /var/db/dhclient-hook.address.epair0b +compose: + ansible_host: iocage_hooks.0 +groups: + test: inventory_hostname.startswith('test') +""" + +import re +import os +from subprocess import Popen, PIPE + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable +from ansible.utils.display import Display + +display = Display() + + +def _parse_ip4(ip4): + ''' Return dictionary iocage_ip4_dict. default = {ip4: [], msg: ''}. + If item matches ifc|IP or ifc|CIDR parse ifc, ip, and mask. + Otherwise, append item to msg. + ''' + + iocage_ip4_dict = {} + iocage_ip4_dict['ip4'] = [] + iocage_ip4_dict['msg'] = '' + + items = ip4.split(',') + for item in items: + if re.match('^\\w+\\|(?:\\d{1,3}\\.){3}\\d{1,3}.*$', item): + i = re.split('\\||/', item) + if len(i) == 3: + iocage_ip4_dict['ip4'].append({'ifc': i[0], 'ip': i[1], 'mask': i[2]}) + else: + iocage_ip4_dict['ip4'].append({'ifc': i[0], 'ip': i[1], 'mask': '-'}) + else: + iocage_ip4_dict['msg'] += item + + return iocage_ip4_dict + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + ''' Host inventory parser for ansible using iocage as source. ''' + + NAME = 'community.general.iocage' + IOCAGE = '/usr/local/bin/iocage' + + def __init__(self): + super(InventoryModule, self).__init__() + + def verify_file(self, path): + valid = False + if super(InventoryModule, self).verify_file(path): + if path.endswith(('iocage.yaml', 'iocage.yml')): + valid = True + else: + self.display.vvv('Skipping due to inventory source not ending in "iocage.yaml" nor "iocage.yml"') + return valid + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + + self._read_config_data(path) + cache_key = self.get_cache_key(path) + + user_cache_setting = self.get_option('cache') + attempt_to_read_cache = user_cache_setting and cache + cache_needs_update = user_cache_setting and not cache + + if attempt_to_read_cache: + try: + results = self._cache[cache_key] + except KeyError: + cache_needs_update = True + if not attempt_to_read_cache or cache_needs_update: + results = self.get_inventory(path) + if cache_needs_update: + self._cache[cache_key] = results + + self.populate(results) + + def get_inventory(self, path): + host = self.get_option('host') + sudo = self.get_option('sudo') + sudo_preserve_env = self.get_option('sudo_preserve_env') + env = self.get_option('env') + get_properties = self.get_option('get_properties') + hooks_results = self.get_option('hooks_results') + inventory_hostname_tag = self.get_option('inventory_hostname_tag') + inventory_hostname_required = self.get_option('inventory_hostname_required') + + cmd = [] + my_env = os.environ.copy() + if host == 'localhost': + my_env.update({str(k): str(v) for k, v in env.items()}) + else: + user = self.get_option('user') + cmd.append("ssh") + cmd.append(f"{user}@{host}") + cmd.extend([f"{k}={v}" for k, v in env.items()]) + + cmd_list = cmd.copy() + if sudo: + cmd_list.append('sudo') + if sudo_preserve_env: + cmd_list.append('--preserve-env') + cmd_list.append(self.IOCAGE) + cmd_list.append('list') + cmd_list.append('--long') + try: + p = Popen(cmd_list, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError(f'Failed to run cmd={cmd_list}, rc={p.returncode}, stderr={to_native(stderr)}') + + try: + t_stdout = to_text(stdout, errors='surrogate_or_strict') + except UnicodeError as e: + raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + + except Exception as e: + raise AnsibleParserError(f'Failed to parse {to_native(path)}: {e}') from e + + results = {'_meta': {'hostvars': {}}} + self.get_jails(t_stdout, results) + + if get_properties: + for hostname, host_vars in results['_meta']['hostvars'].items(): + cmd_get_properties = cmd.copy() + cmd_get_properties.append(self.IOCAGE) + cmd_get_properties.append("get") + cmd_get_properties.append("--all") + cmd_get_properties.append(f"{hostname}") + try: + p = Popen(cmd_get_properties, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError( + f'Failed to run cmd={cmd_get_properties}, rc={p.returncode}, stderr={to_native(stderr)}') + + try: + t_stdout = to_text(stdout, errors='surrogate_or_strict') + except UnicodeError as e: + raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + + except Exception as e: + raise AnsibleError(f'Failed to get properties: {e}') from e + + self.get_properties(t_stdout, results, hostname) + + if hooks_results: + cmd_get_pool = cmd.copy() + cmd_get_pool.append(self.IOCAGE) + cmd_get_pool.append('get') + cmd_get_pool.append('--pool') + try: + p = Popen(cmd_get_pool, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError( + f'Failed to run cmd={cmd_get_pool}, rc={p.returncode}, stderr={to_native(stderr)}') + try: + iocage_pool = to_text(stdout, errors='surrogate_or_strict').strip() + except UnicodeError as e: + raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + except Exception as e: + raise AnsibleError(f'Failed to get pool: {e}') from e + + for hostname, host_vars in results['_meta']['hostvars'].items(): + iocage_hooks = [] + for hook in hooks_results: + path = f"/{iocage_pool}/iocage/jails/{hostname}/root{hook}" + cmd_cat_hook = cmd.copy() + cmd_cat_hook.append('cat') + cmd_cat_hook.append(path) + try: + p = Popen(cmd_cat_hook, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + iocage_hooks.append('-') + continue + + try: + iocage_hook = to_text(stdout, errors='surrogate_or_strict').strip() + except UnicodeError as e: + raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + + except Exception: + iocage_hooks.append('-') + else: + iocage_hooks.append(iocage_hook) + + results['_meta']['hostvars'][hostname]['iocage_hooks'] = iocage_hooks + + # Optionally, get the jails names from the properties notes. + # Requires the notes format "t1=v1 t2=v2 ..." + if inventory_hostname_tag: + if not get_properties: + raise AnsibleError('Jail properties are needed to use inventory_hostname_tag. Enable get_properties') + update = {} + for hostname, host_vars in results['_meta']['hostvars'].items(): + tags = dict(tag.split('=', 1) for tag in host_vars['iocage_properties']['notes'].split() if '=' in tag) + if inventory_hostname_tag in tags: + update[hostname] = tags[inventory_hostname_tag] + elif inventory_hostname_required: + raise AnsibleError(f'Mandatory tag {inventory_hostname_tag!r} is missing in the properties notes.') + for hostname, alias in update.items(): + results['_meta']['hostvars'][alias] = results['_meta']['hostvars'].pop(hostname) + + return results + + def get_jails(self, t_stdout, results): + lines = t_stdout.splitlines() + if len(lines) < 5: + return + indices = [i for i, val in enumerate(lines[1]) if val == '|'] + for line in lines[3::2]: + jail = [line[i + 1:j].strip() for i, j in zip(indices[:-1], indices[1:])] + iocage_name = jail[1] + iocage_ip4_dict = _parse_ip4(jail[6]) + if iocage_ip4_dict['ip4']: + iocage_ip4 = ','.join([d['ip'] for d in iocage_ip4_dict['ip4']]) + else: + iocage_ip4 = '-' + results['_meta']['hostvars'][iocage_name] = {} + results['_meta']['hostvars'][iocage_name]['iocage_jid'] = jail[0] + results['_meta']['hostvars'][iocage_name]['iocage_boot'] = jail[2] + results['_meta']['hostvars'][iocage_name]['iocage_state'] = jail[3] + results['_meta']['hostvars'][iocage_name]['iocage_type'] = jail[4] + results['_meta']['hostvars'][iocage_name]['iocage_release'] = jail[5] + results['_meta']['hostvars'][iocage_name]['iocage_ip4_dict'] = iocage_ip4_dict + results['_meta']['hostvars'][iocage_name]['iocage_ip4'] = iocage_ip4 + results['_meta']['hostvars'][iocage_name]['iocage_ip6'] = jail[7] + results['_meta']['hostvars'][iocage_name]['iocage_template'] = jail[8] + results['_meta']['hostvars'][iocage_name]['iocage_basejail'] = jail[9] + + def get_properties(self, t_stdout, results, hostname): + properties = dict(x.split(':', 1) for x in t_stdout.splitlines()) + results['_meta']['hostvars'][hostname]['iocage_properties'] = properties + + def populate(self, results): + strict = self.get_option('strict') + + for hostname, host_vars in results['_meta']['hostvars'].items(): + self.inventory.add_host(hostname, group='all') + for var, value in host_vars.items(): + self.inventory.set_variable(hostname, var, value) + self._set_composite_vars(self.get_option('compose'), host_vars, hostname, strict=True) + self._add_host_to_composed_groups(self.get_option('groups'), host_vars, hostname, strict=strict) + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_vars, hostname, strict=strict) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 33ecc5135c..fc039b03b5 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -1,91 +1,93 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - name: linode - author: - - Luke Murphy (@decentral1se) - short_description: Ansible dynamic inventory plugin for Linode. - requirements: - - python >= 2.7 - - linode_api4 >= 2.0.0 - description: - - Reads inventories from the Linode API v4. - - Uses a YAML configuration file that ends with linode.(yml|yaml). - - Linode labels are used by default as the hostnames. - - The default inventory groups are built from groups (deprecated by - Linode) and not tags. - extends_documentation_fragment: - - constructed - - inventory_cache - options: - cache: - version_added: 4.5.0 - cache_plugin: - version_added: 4.5.0 - cache_timeout: - version_added: 4.5.0 - cache_connection: - version_added: 4.5.0 - cache_prefix: - version_added: 4.5.0 - plugin: - description: Marks this as an instance of the 'linode' plugin. - required: true - choices: ['linode', 'community.general.linode'] - ip_style: - description: Populate hostvars with all information available from the Linode APIv4. - type: string - default: plain - choices: - - plain - - api - version_added: 3.6.0 - access_token: - description: The Linode account personal access token. - required: true - env: - - name: LINODE_ACCESS_TOKEN - regions: - description: Populate inventory with instances in this region. - default: [] - type: list - elements: string - tags: - description: Populate inventory only with instances which have at least one of the tags listed here. - default: [] - type: list - elements: string - version_added: 2.0.0 - types: - description: Populate inventory with instances with this type. - default: [] - type: list - elements: string - strict: - version_added: 2.0.0 - compose: - version_added: 2.0.0 - groups: - version_added: 2.0.0 - keyed_groups: - version_added: 2.0.0 -''' +DOCUMENTATION = r""" +name: linode +author: + - Luke Murphy (@decentral1se) +short_description: Ansible dynamic inventory plugin for Linode +requirements: + - linode_api4 >= 2.0.0 +description: + - Reads inventories from the Linode API v4. + - Uses a YAML configuration file that ends with linode.(yml|yaml). + - Linode labels are used by default as the hostnames. + - The default inventory groups are built from groups (deprecated by Linode) and not tags. +extends_documentation_fragment: + - constructed + - inventory_cache +options: + cache: + version_added: 4.5.0 + cache_plugin: + version_added: 4.5.0 + cache_timeout: + version_added: 4.5.0 + cache_connection: + version_added: 4.5.0 + cache_prefix: + version_added: 4.5.0 + plugin: + description: Marks this as an instance of the 'linode' plugin. + type: string + required: true + choices: ['linode', 'community.general.linode'] + ip_style: + description: Populate hostvars with all information available from the Linode APIv4. + type: string + default: plain + choices: + - plain + - api + version_added: 3.6.0 + access_token: + description: The Linode account personal access token. + type: string + required: true + env: + - name: LINODE_ACCESS_TOKEN + regions: + description: Populate inventory with instances in this region. + default: [] + type: list + elements: string + tags: + description: Populate inventory only with instances which have at least one of the tags listed here. + default: [] + type: list + elements: string + version_added: 2.0.0 + types: + description: Populate inventory with instances with this type. + default: [] + type: list + elements: string + strict: + version_added: 2.0.0 + compose: + version_added: 2.0.0 + groups: + version_added: 2.0.0 + keyed_groups: + version_added: 2.0.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" +--- # Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment. plugin: community.general.linode +--- # You can use Jinja to template the access token. plugin: community.general.linode access_token: "{{ lookup('ini', 'token', section='your_username', file='~/.config/linode-cli') }}" # For older Ansible versions, you need to write this as: # access_token: "{{ lookup('ini', 'token section=your_username file=~/.config/linode-cli') }}" +--- # Example with regions, types, groups and access token plugin: community.general.linode access_token: foobar @@ -94,6 +96,7 @@ regions: types: - g5-standard-2 +--- # Example with keyed_groups, groups, and compose plugin: community.general.linode access_token: foobar @@ -112,20 +115,19 @@ compose: ansible_ssh_host: ipv4[0] ansible_port: 2222 +--- # Example where control traffic limited to internal network plugin: community.general.linode access_token: foobar ip_style: api compose: ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first" -''' +""" -import os - -from ansible.errors import AnsibleError, AnsibleParserError -from ansible.module_utils.six import string_types +from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable -from ansible.template import Templar + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe try: @@ -144,22 +146,14 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def _build_client(self, loader): """Build the Linode client.""" - t = Templar(loader=loader) - access_token = self.get_option('access_token') - if t.is_template(access_token): - access_token = t.template(variable=access_token, disable_lookups=False) - - if access_token is None: - try: - access_token = os.environ['LINODE_ACCESS_TOKEN'] - except KeyError: - pass + if self.templar.is_template(access_token): + access_token = self.templar.template(variable=access_token) if access_token is None: raise AnsibleError(( 'Could not retrieve Linode access token ' - 'from plugin configuration or environment' + 'from plugin configuration sources' )) self.client = LinodeClient(access_token) @@ -169,17 +163,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: self.instances = self.client.linode.instances() except LinodeApiError as exception: - raise AnsibleError('Linode client raised: %s' % exception) + raise AnsibleError(f'Linode client raised: {exception}') def _add_groups(self): """Add Linode instance groups to the dynamic inventory.""" - self.linode_groups = set( - filter(None, [ - instance.group - for instance - in self.instances - ]) - ) + self.linode_groups = {instance.group for instance in self.instances if instance.group} for linode_group in self.linode_groups: self.inventory.add_group(linode_group) @@ -210,20 +198,21 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def _add_instances_to_groups(self): """Add instance names to their dynamic inventory groups.""" for instance in self.instances: - self.inventory.add_host(instance.label, group=instance.group) + self.inventory.add_host(make_unsafe(instance.label), group=instance.group) def _add_hostvars_for_instances(self): """Add hostvars for instances in the dynamic inventory.""" ip_style = self.get_option('ip_style') for instance in self.instances: hostvars = instance._raw_json + hostname = make_unsafe(instance.label) for hostvar_key in hostvars: if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']: continue self.inventory.set_variable( - instance.label, + hostname, hostvar_key, - hostvars[hostvar_key] + make_unsafe(hostvars[hostvar_key]) ) if ip_style == 'api': ips = instance.ips.ipv4.public + instance.ips.ipv4.private @@ -232,9 +221,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): for ip_type in set(ip.type for ip in ips): self.inventory.set_variable( - instance.label, + hostname, ip_type, - self._ip_data([ip for ip in ips if ip.type == ip_type]) + make_unsafe(self._ip_data([ip for ip in ips if ip.type == ip_type])) ) def _ip_data(self, ip_list): @@ -265,30 +254,44 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._add_instances_to_groups() self._add_hostvars_for_instances() for instance in self.instances: - variables = self.inventory.get_host(instance.label).get_vars() + hostname = make_unsafe(instance.label) + variables = self.inventory.get_host(hostname).get_vars() self._add_host_to_composed_groups( self.get_option('groups'), variables, - instance.label, + hostname, strict=strict) self._add_host_to_keyed_groups( self.get_option('keyed_groups'), variables, - instance.label, + hostname, strict=strict) self._set_composite_vars( self.get_option('compose'), variables, - instance.label, + hostname, strict=strict) def verify_file(self, path): - """Verify the Linode configuration file.""" + """Verify the Linode configuration file. + + Return true/false if the config-file is valid for this plugin + + Args: + str(path): path to the config + Kwargs: + None + Raises: + None + Returns: + bool(valid): is valid config file""" + valid = False if super(InventoryModule, self).verify_file(path): - endings = ('linode.yaml', 'linode.yml') - if any((path.endswith(ending) for ending in endings)): - return True - return False + if path.endswith(("linode.yaml", "linode.yml")): + valid = True + else: + self.display.vvv('Inventory source not ending in "linode.yaml" or "linode.yml"') + return valid def parse(self, inventory, loader, path, cache=True): """Dynamically parse Linode the cloud inventory.""" diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index 912638509d..492d12a21b 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -1,101 +1,123 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Frank Dornheim -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Frank Dornheim +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - name: lxd - short_description: Returns Ansible inventory from lxd host +DOCUMENTATION = r""" +name: lxd +short_description: Returns Ansible inventory from lxd host +description: + - Get inventory from the lxd. + - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'. +version_added: "3.0.0" +author: "Frank Dornheim (@conloos)" +requirements: + - ipaddress + - lxd >= 4.0 +options: + plugin: + description: Token that ensures this is a source file for the 'lxd' plugin. + type: string + required: true + choices: ['community.general.lxd'] + url: description: - - Get inventory from the lxd. - - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'. - version_added: "3.0.0" - author: "Frank Dornheim (@conloos)" - requirements: - - ipaddress - - lxd >= 4.0 - options: - plugin: - description: Token that ensures this is a source file for the 'lxd' plugin. - required: true - choices: [ 'community.general.lxd' ] - url: - description: - - The unix domain socket path or the https URL for the lxd server. - - Sockets in filesystem have to start with C(unix:). - - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket). - default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str - client_key: - description: - - The client certificate key file path. - aliases: [ key_file ] - default: $HOME/.config/lxc/client.key - type: path - client_cert: - description: - - The client certificate file path. - aliases: [ cert_file ] - default: $HOME/.config/lxc/client.crt - type: path - trust_password: - description: - - The client trusted password. - - You need to set this password on the lxd server before - running this module using the following command - C(lxc config set core.trust_password ) - See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/). - - If I(trust_password) is set, this module send a request for authentication before sending any requests. - type: str - state: - description: Filter the instance according to the current status. - type: str - default: none - choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ] - type_filter: - description: - - Filter the instances by type C(virtual-machine), C(container) or C(both). - - The first version of the inventory only supported containers. - type: str - default: container - choices: [ 'virtual-machine', 'container', 'both' ] - version_added: 4.2.0 - prefered_instance_network_interface: - description: - - If an instance has multiple network interfaces, select which one is the prefered as pattern. - - Combined with the first number that can be found e.g. 'eth' + 0. - - The option has been renamed from I(prefered_container_network_interface) to I(prefered_instance_network_interface) in community.general 3.8.0. - The old name still works as an alias. - type: str - default: eth - aliases: - - prefered_container_network_interface - prefered_instance_network_family: - description: - - If an instance has multiple network interfaces, which one is the prefered by family. - - Specify C(inet) for IPv4 and C(inet6) for IPv6. - type: str - default: inet - choices: [ 'inet', 'inet6' ] - groupby: - description: - - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), C(type), C(vlanid). - - See example for syntax. - type: dict -''' + - The unix domain socket path or the https URL for the lxd server. + - Sockets in filesystem have to start with C(unix:). + - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket). + type: string + default: unix:/var/snap/lxd/common/lxd/unix.socket + client_key: + description: + - The client certificate key file path. + aliases: [key_file] + default: $HOME/.config/lxc/client.key + type: path + client_cert: + description: + - The client certificate file path. + aliases: [cert_file] + default: $HOME/.config/lxc/client.crt + type: path + server_cert: + description: + - The server certificate file path. + type: path + version_added: 8.0.0 + server_check_hostname: + description: + - This option controls if the server's hostname is checked as part of the HTTPS connection verification. This can be + useful to disable, if for example, the server certificate provided (see O(server_cert) option) does not cover a name + matching the one used to communicate with the server. Such mismatch is common as LXD generates self-signed server + certificates by default. + type: bool + default: true + version_added: 8.0.0 + trust_password: + description: + - The client trusted password. + - You need to set this password on the lxd server before running this module using the following command C(lxc config + set core.trust_password ) See + U(https://documentation.ubuntu.com/lxd/en/latest/authentication/#adding-client-certificates-using-a-trust-password). + - If O(trust_password) is set, this module send a request for authentication before sending any requests. + type: str + state: + description: Filter the instance according to the current status. + type: str + default: none + choices: ['STOPPED', 'STARTING', 'RUNNING', 'none'] + project: + description: Filter the instance according to the given project. + type: str + default: default + version_added: 6.2.0 + type_filter: + description: + - Filter the instances by type V(virtual-machine), V(container) or V(both). + - The first version of the inventory only supported containers. + type: str + default: container + choices: ['virtual-machine', 'container', 'both'] + version_added: 4.2.0 + prefered_instance_network_interface: + description: + - If an instance has multiple network interfaces, select which one is the preferred as pattern. + - Combined with the first number that can be found, for example C(eth) + C(0). + - The option has been renamed from O(prefered_container_network_interface) to O(prefered_instance_network_interface) + in community.general 3.8.0. The old name still works as an alias. + type: str + default: eth + aliases: + - prefered_container_network_interface + prefered_instance_network_family: + description: + - If an instance has multiple network interfaces, which one is the preferred by family. + - Specify V(inet) for IPv4 and V(inet6) for IPv6. + type: str + default: inet + choices: ['inet', 'inet6'] + groupby: + description: + - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), + C(type), C(vlanid). + - See example for syntax. + type: dict +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # simple lxd.yml plugin: community.general.lxd url: unix:/var/snap/lxd/common/lxd/unix.socket +--- # simple lxd.yml including filter plugin: community.general.lxd url: unix:/var/snap/lxd/common/lxd/unix.socket state: RUNNING +--- # simple lxd.yml including virtual machines and containers plugin: community.general.lxd url: unix:/var/snap/lxd/common/lxd/unix.socket @@ -139,20 +161,23 @@ groupby: vlan666: type: vlanid attribute: 666 -''' + projectInternals: + type: project + attribute: internals +""" -import binascii import json import re import time import os -import socket +from urllib.parse import urlencode + from ansible.plugins.inventory import BaseInventoryPlugin from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.common.dict_transformations import dict_merge -from ansible.module_utils.six import raise_from from ansible.errors import AnsibleError, AnsibleParserError from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe try: import ipaddress @@ -187,7 +212,7 @@ class InventoryModule(BaseInventoryPlugin): with open(path, 'r') as json_file: return json.load(json_file) except (IOError, json.decoder.JSONDecodeError) as err: - raise AnsibleParserError('Could not load the test data from {0}: {1}'.format(to_native(path), to_native(err))) + raise AnsibleParserError(f'Could not load the test data from {to_native(path)}: {err}') def save_json_data(self, path, file_name=None): """save data as json @@ -217,7 +242,7 @@ class InventoryModule(BaseInventoryPlugin): with open(os.path.abspath(os.path.join(cwd, *path)), 'w') as json_file: json.dump(self.data, json_file) except IOError as err: - raise AnsibleParserError('Could not save data: {0}'.format(to_native(err))) + raise AnsibleParserError(f'Could not save data: {err}') def verify_file(self, path): """Check the config @@ -257,7 +282,7 @@ class InventoryModule(BaseInventoryPlugin): if not isinstance(url, str): return False if not url.startswith(('unix:', 'https:')): - raise AnsibleError('URL is malformed: {0}'.format(to_native(url))) + raise AnsibleError(f'URL is malformed: {url}') return True def _connect_to_socket(self): @@ -278,11 +303,11 @@ class InventoryModule(BaseInventoryPlugin): urls = (url for url in url_list if self.validate_url(url)) for url in urls: try: - socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug) + socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug, self.server_cert, self.server_check_hostname) return socket_connection except LXDClientException as err: error_storage[url] = err - raise AnsibleError('No connection to the socket: {0}'.format(to_native(error_storage))) + raise AnsibleError(f'No connection to the socket: {error_storage}') def _get_networks(self): """Get Networknames @@ -329,7 +354,15 @@ class InventoryModule(BaseInventoryPlugin): # "status_code": 200, # "type": "sync" # } - instances = self.socket.do('GET', '/1.0/instances') + url = '/1.0/instances' + if self.project: + url = f"{url}?{urlencode(dict(project=self.project))}" + + instances = self.socket.do('GET', url) + + if self.project: + return [m.split('/')[3].split('?')[0] for m in instances['metadata']] + return [m.split('/')[3] for m in instances['metadata']] def _get_config(self, branch, name): @@ -343,22 +376,24 @@ class InventoryModule(BaseInventoryPlugin): Kwargs: None Source: - https://github.com/lxc/lxd/blob/master/doc/rest-api.md + https://documentation.ubuntu.com/lxd/en/latest/rest-api/ Raises: None Returns: dict(config): Config of the instance""" config = {} if isinstance(branch, (tuple, list)): - config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))} + config[name] = {branch[1]: self.socket.do( + 'GET', f'/1.0/{to_native(branch[0])}/{to_native(name)}/{to_native(branch[1])}?{urlencode(dict(project=self.project))}')} else: - config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))} + config[name] = {branch: self.socket.do( + 'GET', f'/1.0/{to_native(branch)}/{to_native(name)}?{urlencode(dict(project=self.project))}')} return config def get_instance_data(self, names): """Create Inventory of the instance - Iterate through the different branches of the instances and collect Informations. + Iterate through the different branches of the instances and collect Information. Args: list(names): List of instance names @@ -380,7 +415,7 @@ class InventoryModule(BaseInventoryPlugin): def get_network_data(self, names): """Create Inventory of the instance - Iterate through the different branches of the instances and collect Informations. + Iterate through the different branches of the instances and collect Information. Args: list(names): List of instance names @@ -415,7 +450,7 @@ class InventoryModule(BaseInventoryPlugin): None Returns: dict(network_configuration): network config""" - instance_network_interfaces = self._get_data_entry('instances/{0}/state/metadata/network'.format(instance_name)) + instance_network_interfaces = self._get_data_entry(f'instances/{instance_name}/state/metadata/network') network_configuration = None if instance_network_interfaces: network_configuration = {} @@ -428,24 +463,24 @@ class InventoryModule(BaseInventoryPlugin): address_set['family'] = address.get('family') address_set['address'] = address.get('address') address_set['netmask'] = address.get('netmask') - address_set['combined'] = address.get('address') + '/' + address.get('netmask') + address_set['combined'] = f"{address.get('address')}/{address.get('netmask')}" network_configuration[interface_name].append(address_set) return network_configuration def get_prefered_instance_network_interface(self, instance_name): - """Helper to get the prefered interface of thr instance + """Helper to get the preferred interface of thr instance - Helper to get the prefered interface provide by neme pattern from 'prefered_instance_network_interface'. + Helper to get the preferred interface provide by neme pattern from 'prefered_instance_network_interface'. Args: - str(containe_name): name of instance + str(instance_name): name of instance Kwargs: None Raises: None Returns: str(prefered_interface): None or interface name""" - instance_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)) + instance_network_interfaces = self._get_data_entry(f'inventory/{instance_name}/network_interfaces') prefered_interface = None # init if instance_network_interfaces: # instance have network interfaces # generator if interfaces which start with the desired pattern @@ -463,7 +498,7 @@ class InventoryModule(BaseInventoryPlugin): Helper to get the VLAN_ID from the instance Args: - str(containe_name): name of instance + str(instance_name): name of instance Kwargs: None Raises: @@ -482,7 +517,7 @@ class InventoryModule(BaseInventoryPlugin): # "network":"lxdbr0", # "type":"nic"}, vlan_ids = {} - devices = self._get_data_entry('instances/{0}/instances/metadata/expanded_devices'.format(to_native(instance_name))) + devices = self._get_data_entry(f'instances/{to_native(instance_name)}/instances/metadata/expanded_devices') for device in devices: if 'network' in devices[device]: if devices[device]['network'] in network_vlans: @@ -522,7 +557,7 @@ class InventoryModule(BaseInventoryPlugin): """Helper to save data Helper to save the data in self.data - Detect if data is allready in branch and use dict_merge() to prevent that branch is overwritten. + Detect if data is already in branch and use dict_merge() to prevent that branch is overwritten. Args: str(instance_name): name of instance @@ -545,7 +580,7 @@ class InventoryModule(BaseInventoryPlugin): else: path[instance_name][key] = value except KeyError as err: - raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err))) + raise AnsibleParserError(f"Unable to store Information: {err}") def extract_information_from_instance_configs(self): """Process configuration information @@ -566,22 +601,24 @@ class InventoryModule(BaseInventoryPlugin): for instance_name in self.data['instances']: self._set_data_entry(instance_name, 'os', self._get_data_entry( - 'instances/{0}/instances/metadata/config/image.os'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/config/image.os')) self._set_data_entry(instance_name, 'release', self._get_data_entry( - 'instances/{0}/instances/metadata/config/image.release'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/config/image.release')) self._set_data_entry(instance_name, 'version', self._get_data_entry( - 'instances/{0}/instances/metadata/config/image.version'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/config/image.version')) self._set_data_entry(instance_name, 'profile', self._get_data_entry( - 'instances/{0}/instances/metadata/profiles'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/profiles')) self._set_data_entry(instance_name, 'location', self._get_data_entry( - 'instances/{0}/instances/metadata/location'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/location')) self._set_data_entry(instance_name, 'state', self._get_data_entry( - 'instances/{0}/instances/metadata/config/volatile.last_state.power'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/config/volatile.last_state.power')) self._set_data_entry(instance_name, 'type', self._get_data_entry( - 'instances/{0}/instances/metadata/type'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/type')) self._set_data_entry(instance_name, 'network_interfaces', self.extract_network_information_from_instance_config(instance_name)) self._set_data_entry(instance_name, 'preferred_interface', self.get_prefered_instance_network_interface(instance_name)) self._set_data_entry(instance_name, 'vlan_ids', self.get_instance_vlans(instance_name)) + self._set_data_entry(instance_name, 'project', self._get_data_entry( + f'instances/{instance_name}/instances/metadata/project')) def build_inventory_network(self, instance_name): """Add the network interfaces of the instance to the inventory @@ -615,18 +652,18 @@ class InventoryModule(BaseInventoryPlugin): None Returns: dict(interface_name: ip)""" - prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(instance_name)) # name or None + prefered_interface = self._get_data_entry(f'inventory/{instance_name}/preferred_interface') # name or None prefered_instance_network_family = self.prefered_instance_network_family ip_address = '' if prefered_interface: - interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(instance_name, prefered_interface)) + interface = self._get_data_entry(f'inventory/{instance_name}/network_interfaces/{prefered_interface}') for config in interface: if config['family'] == prefered_instance_network_family: ip_address = config['address'] break else: - interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)) + interfaces = self._get_data_entry(f'inventory/{instance_name}/network_interfaces') for interface in interfaces.values(): for config in interface: if config['family'] == prefered_instance_network_family: @@ -634,9 +671,9 @@ class InventoryModule(BaseInventoryPlugin): break return ip_address - if self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)): # instance have network interfaces + if self._get_data_entry(f'inventory/{instance_name}/network_interfaces'): # instance have network interfaces self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh') - self.inventory.set_variable(instance_name, 'ansible_host', interface_selection(instance_name)) + self.inventory.set_variable(instance_name, 'ansible_host', make_unsafe(interface_selection(instance_name))) else: self.inventory.set_variable(instance_name, 'ansible_connection', 'local') @@ -655,36 +692,46 @@ class InventoryModule(BaseInventoryPlugin): Returns: None""" for instance_name in self.data['inventory']: - instance_state = str(self._get_data_entry('inventory/{0}/state'.format(instance_name)) or "STOPPED").lower() + instance_state = str(self._get_data_entry(f'inventory/{instance_name}/state') or "STOPPED").lower() # Only consider instances that match the "state" filter, if self.state is not None if self.filter: if self.filter.lower() != instance_state: continue # add instance + instance_name = make_unsafe(instance_name) self.inventory.add_host(instance_name) - # add network informations + # add network information self.build_inventory_network(instance_name) # add os - v = self._get_data_entry('inventory/{0}/os'.format(instance_name)) + v = self._get_data_entry(f'inventory/{instance_name}/os') if v: - self.inventory.set_variable(instance_name, 'ansible_lxd_os', v.lower()) + self.inventory.set_variable(instance_name, 'ansible_lxd_os', make_unsafe(v.lower())) # add release - v = self._get_data_entry('inventory/{0}/release'.format(instance_name)) + v = self._get_data_entry(f'inventory/{instance_name}/release') if v: - self.inventory.set_variable(instance_name, 'ansible_lxd_release', v.lower()) + self.inventory.set_variable( + instance_name, 'ansible_lxd_release', make_unsafe(v.lower())) # add profile - self.inventory.set_variable(instance_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(instance_name))) + self.inventory.set_variable( + instance_name, 'ansible_lxd_profile', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/profile'))) # add state - self.inventory.set_variable(instance_name, 'ansible_lxd_state', instance_state) + self.inventory.set_variable( + instance_name, 'ansible_lxd_state', make_unsafe(instance_state)) # add type - self.inventory.set_variable(instance_name, 'ansible_lxd_type', self._get_data_entry('inventory/{0}/type'.format(instance_name))) + self.inventory.set_variable( + instance_name, 'ansible_lxd_type', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/type'))) # add location information - if self._get_data_entry('inventory/{0}/location'.format(instance_name)) != "none": # wrong type by lxd 'none' != 'None' - self.inventory.set_variable(instance_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(instance_name))) + if self._get_data_entry(f'inventory/{instance_name}/location') != "none": # wrong type by lxd 'none' != 'None' + self.inventory.set_variable( + instance_name, 'ansible_lxd_location', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/location'))) # add VLAN_ID information - if self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)): - self.inventory.set_variable(instance_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name))) + if self._get_data_entry(f'inventory/{instance_name}/vlan_ids'): + self.inventory.set_variable( + instance_name, 'ansible_lxd_vlan_ids', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/vlan_ids'))) + # add project + self.inventory.set_variable( + instance_name, 'ansible_lxd_project', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/project'))) def build_inventory_groups_location(self, group_name): """create group by attribute: location @@ -746,7 +793,7 @@ class InventoryModule(BaseInventoryPlugin): network = ipaddress.ip_network(to_text(self.groupby[group_name].get('attribute'))) except ValueError as err: raise AnsibleParserError( - 'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err))) + f"Error while parsing network range {self.groupby[group_name].get('attribute')}: {err}") for instance_name in self.inventory.hosts: if self.data['inventory'][instance_name].get('network_interfaces') is not None: @@ -760,6 +807,28 @@ class InventoryModule(BaseInventoryPlugin): # Ignore invalid IP addresses returned by lxd pass + def build_inventory_groups_project(self, group_name): + """create group by attribute: project + + Args: + str(group_name): Group name + Kwargs: + None + Raises: + None + Returns: + None""" + # maybe we just want to expand one group + if group_name not in self.inventory.groups: + self.inventory.add_group(group_name) + + gen_instances = [ + instance_name for instance_name in self.inventory.hosts + if 'ansible_lxd_project' in self.inventory.get_host(instance_name).get_vars()] + for instance_name in gen_instances: + if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_project'): + self.inventory.add_child(group_name, instance_name) + def build_inventory_groups_os(self, group_name): """create group by attribute: os @@ -898,6 +967,7 @@ class InventoryModule(BaseInventoryPlugin): * 'profile' * 'vlanid' * 'type' + * 'project' Args: str(group_name): Group name @@ -925,14 +995,16 @@ class InventoryModule(BaseInventoryPlugin): self.build_inventory_groups_vlanid(group_name) elif self.groupby[group_name].get('type') == 'type': self.build_inventory_groups_type(group_name) + elif self.groupby[group_name].get('type') == 'project': + self.build_inventory_groups_project(group_name) else: - raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name))) + raise AnsibleParserError(f'Unknown group type: {to_native(group_name)}') if self.groupby: for group_name in self.groupby: if not group_name.isalnum(): - raise AnsibleParserError('Invalid character(s) in groupname: {0}'.format(to_native(group_name))) - group_type(group_name) + raise AnsibleParserError(f'Invalid character(s) in groupname: {to_native(group_name)}') + group_type(make_unsafe(group_name)) def build_inventory(self): """Build dynamic inventory @@ -968,7 +1040,7 @@ class InventoryModule(BaseInventoryPlugin): None""" iter_keys = list(self.data['instances'].keys()) for instance_name in iter_keys: - if self._get_data_entry('instances/{0}/instances/metadata/type'.format(instance_name)) != self.type_filter: + if self._get_data_entry(f'instances/{instance_name}/instances/metadata/type') != self.type_filter: del self.data['instances'][instance_name] def _populate(self): @@ -1021,9 +1093,7 @@ class InventoryModule(BaseInventoryPlugin): Returns: None""" if IPADDRESS_IMPORT_ERROR: - raise_from( - AnsibleError('another_library must be installed to use this plugin'), - IPADDRESS_IMPORT_ERROR) + raise AnsibleError('another_library must be installed to use this plugin') from IPADDRESS_IMPORT_ERROR super(InventoryModule, self).parse(inventory, loader, path, cache=False) # Read the inventory YAML file @@ -1031,6 +1101,9 @@ class InventoryModule(BaseInventoryPlugin): try: self.client_key = self.get_option('client_key') self.client_cert = self.get_option('client_cert') + self.server_cert = self.get_option('server_cert') + self.server_check_hostname = self.get_option('server_check_hostname') + self.project = self.get_option('project') self.debug = self.DEBUG self.data = {} # store for inventory-data self.groupby = self.get_option('groupby') @@ -1046,6 +1119,6 @@ class InventoryModule(BaseInventoryPlugin): self.url = self.get_option('url') except Exception as err: raise AnsibleParserError( - 'All correct options required: {0}'.format(to_native(err))) + f'All correct options required: {err}') # Call our internal helper to populate the dynamic inventory self._populate() diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index 6d1779bb48..ea0ce560fd 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -1,67 +1,127 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: nmap - short_description: Uses nmap to find hosts to target +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: nmap +short_description: Uses nmap to find hosts to target +description: + - Uses a YAML configuration file with a valid YAML extension. +extends_documentation_fragment: + - constructed + - inventory_cache +requirements: + - nmap CLI installed +options: + plugin: + description: Token that ensures this is a source file for the P(community.general.nmap#inventory) plugin. + type: string + required: true + choices: ['nmap', 'community.general.nmap'] + sudo: + description: Set to V(true) to execute a C(sudo nmap) plugin scan. + version_added: 4.8.0 + default: false + type: boolean + address: + description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation. + type: string + required: true + env: + - name: ANSIBLE_NMAP_ADDRESS + version_added: 6.6.0 + exclude: description: - - Uses a YAML configuration file with a valid YAML extension. - extends_documentation_fragment: - - constructed - - inventory_cache - requirements: - - nmap CLI installed - options: - plugin: - description: token that ensures this is a source file for the 'nmap' plugin. - required: True - choices: ['nmap', 'community.general.nmap'] - sudo: - description: Set to C(true) to execute a C(sudo nmap) plugin scan. - version_added: 4.8.0 - default: false - type: boolean - address: - description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation. - required: True - exclude: - description: list of addresses to exclude - type: list - elements: string - ports: - description: Enable/disable scanning for open ports - type: boolean - default: True - ipv4: - description: use IPv4 type addresses - type: boolean - default: True - ipv6: - description: use IPv6 type addresses - type: boolean - default: True - notes: - - At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False. - - 'TODO: add OS fingerprinting' -''' -EXAMPLES = ''' + - List of addresses to exclude. + - For example V(10.2.2.15-25) or V(10.2.2.15,10.2.2.16). + type: list + elements: string + env: + - name: ANSIBLE_NMAP_EXCLUDE + version_added: 6.6.0 + port: + description: + - Only scan specific port or port range (C(-p)). + - For example, you could pass V(22) for a single port, V(1-65535) for a range of ports, or V(U:53,137,T:21-25,139,8080,S:9) + to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all. + type: string + version_added: 6.5.0 + ports: + description: Enable/disable scanning ports. + type: boolean + default: true + ipv4: + description: Use IPv4 type addresses. + type: boolean + default: true + ipv6: + description: Use IPv6 type addresses. + type: boolean + default: true + udp_scan: + description: + - Scan using UDP. + - Depending on your system you might need O(sudo=true) for this to work. + type: boolean + default: false + version_added: 6.1.0 + icmp_timestamp: + description: + - Scan using ICMP Timestamp (C(-PP)). + - Depending on your system you might need O(sudo=true) for this to work. + type: boolean + default: false + version_added: 6.1.0 + open: + description: Only scan for open (or possibly open) ports. + type: boolean + default: false + version_added: 6.5.0 + dns_resolve: + description: Whether to always (V(true)) or never (V(false)) do DNS resolution. + type: boolean + default: false + version_added: 6.1.0 + dns_servers: + description: Specify which DNS servers to use for name resolution. + type: list + elements: string + version_added: 10.5.0 + use_arp_ping: + description: Whether to always (V(true)) use the quick ARP ping or (V(false)) a slower but more reliable method. + type: boolean + default: true + version_added: 7.4.0 +notes: + - At least one of O(ipv4) or O(ipv6) is required to be V(true); both can be V(true), but they cannot both be V(false). + - 'TODO: add OS fingerprinting.' +""" +EXAMPLES = r""" +--- # inventory.config file in YAML format plugin: community.general.nmap -strict: False +strict: false address: 192.168.0.0/24 - +--- # a sudo nmap scan to fully use nmap scan power. plugin: community.general.nmap sudo: true -strict: False +strict: false address: 192.168.0.0/24 -''' + +--- +# an nmap scan specifying ports and classifying results to an inventory group +plugin: community.general.nmap +address: 192.168.0.0/24 +exclude: 192.168.0.1, web.example.com +port: 22, 443 +groups: + web_servers: "ports | selectattr('port', 'equalto', '443')" +""" import os import re @@ -74,6 +134,8 @@ from ansible.module_utils.common.text.converters import to_native, to_text from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.module_utils.common.process import get_bin_path +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe + class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): @@ -90,6 +152,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): strict = self.get_option('strict') for host in hosts: + host = make_unsafe(host) hostname = host['name'] self.inventory.add_host(hostname) for var, value in host.items(): @@ -120,7 +183,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: self._nmap = get_bin_path('nmap') except ValueError as e: - raise AnsibleParserError('nmap inventory plugin requires the nmap cli tool to work: {0}'.format(to_native(e))) + raise AnsibleParserError(f'nmap inventory plugin requires the nmap cli tool to work: {e}') super(InventoryModule, self).parse(inventory, loader, path, cache=cache) @@ -148,30 +211,53 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # setup command cmd = [self._nmap] - if self._options['sudo']: + if self.get_option('sudo'): cmd.insert(0, 'sudo') - if not self._options['ports']: + if self.get_option('port'): + cmd.append('-p') + cmd.append(self.get_option('port')) + + if not self.get_option('ports'): cmd.append('-sP') - if self._options['ipv4'] and not self._options['ipv6']: + if self.get_option('ipv4') and not self.get_option('ipv6'): cmd.append('-4') - elif self._options['ipv6'] and not self._options['ipv4']: + elif self.get_option('ipv6') and not self.get_option('ipv4'): cmd.append('-6') - elif not self._options['ipv6'] and not self._options['ipv4']: + elif not self.get_option('ipv6') and not self.get_option('ipv4'): raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin') - if self._options['exclude']: + if self.get_option('exclude'): cmd.append('--exclude') - cmd.append(','.join(self._options['exclude'])) + cmd.append(','.join(self.get_option('exclude'))) - cmd.append(self._options['address']) + if self.get_option('dns_resolve'): + cmd.append('-n') + + if self.get_option('dns_servers'): + cmd.append('--dns-servers') + cmd.append(','.join(self.get_option('dns_servers'))) + + if self.get_option('udp_scan'): + cmd.append('-sU') + + if self.get_option('icmp_timestamp'): + cmd.append('-PP') + + if self.get_option('open'): + cmd.append('--open') + + if not self.get_option('use_arp_ping'): + cmd.append('--disable-arp-ping') + + cmd.append(self.get_option('address')) try: # execute p = Popen(cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr))) + raise AnsibleParserError(f'Failed to run nmap, rc={p.returncode}: {to_native(stderr)}') # parse results host = None @@ -182,7 +268,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: t_stdout = to_text(stdout, errors='surrogate_or_strict') except UnicodeError as e: - raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e)) + raise AnsibleParserError(f'Invalid (non unicode) input returned: {e}') for line in t_stdout.splitlines(): hits = self.find_host.match(line) @@ -223,7 +309,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): results[-1]['ports'] = ports except Exception as e: - raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e))) + raise AnsibleParserError(f"failed to parse {to_native(path)}: {e} ") if cache_needs_update: self._cache[cache_key] = results diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index 00454f558c..cbc46a6723 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -1,51 +1,52 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - name: online - author: - - Remy Leone (@remyleone) - short_description: Scaleway (previously Online SAS or Online.net) inventory source - description: - - Get inventory hosts from Scaleway (previously Online SAS or Online.net). - options: - plugin: - description: token that ensures this is a source file for the 'online' plugin. - required: True - choices: ['online', 'community.general.online'] - oauth_token: - required: True - description: Online OAuth token. - env: - # in order of precedence - - name: ONLINE_TOKEN - - name: ONLINE_API_KEY - - name: ONLINE_OAUTH_TOKEN - hostnames: - description: List of preference about what to use as an hostname. - type: list - elements: string - default: - - public_ipv4 - choices: - - public_ipv4 - - private_ipv4 - - hostname - groups: - description: List of groups. - type: list - elements: string - choices: - - location - - offer - - rpn -''' +DOCUMENTATION = r""" +name: online +author: + - Remy Leone (@remyleone) +short_description: Scaleway (previously Online SAS or Online.net) inventory source +description: + - Get inventory hosts from Scaleway (previously Online SAS or Online.net). +options: + plugin: + description: Token that ensures this is a source file for the P(community.general.online#inventory) plugin. + type: string + required: true + choices: ['online', 'community.general.online'] + oauth_token: + required: true + description: Online OAuth token. + type: string + env: + # in order of precedence + - name: ONLINE_TOKEN + - name: ONLINE_API_KEY + - name: ONLINE_OAUTH_TOKEN + hostnames: + description: List of preference about what to use as an hostname. + type: list + elements: string + default: + - public_ipv4 + choices: + - public_ipv4 + - private_ipv4 + - hostname + groups: + description: List of groups. + type: list + elements: string + choices: + - location + - offer + - rpn +""" -EXAMPLES = r''' +EXAMPLES = r""" # online_inventory.yml file in YAML format # Example command line: ansible-inventory --list -i online_inventory.yml @@ -56,17 +57,19 @@ groups: - location - offer - rpn -''' +""" import json from sys import version as python_version +from urllib.parse import urljoin from ansible.errors import AnsibleError from ansible.module_utils.urls import open_url from ansible.plugins.inventory import BaseInventoryPlugin -from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.ansible_release import __version__ as ansible_version -from ansible.module_utils.six.moves.urllib.parse import urljoin + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe class InventoryModule(BaseInventoryPlugin): @@ -133,7 +136,7 @@ class InventoryModule(BaseInventoryPlugin): try: response = open_url(url, headers=self.headers) except Exception as e: - self.display.warning("An error happened while fetching: %s" % url) + self.display.warning(f"An error happened while fetching: {url}") return None try: @@ -168,20 +171,20 @@ class InventoryModule(BaseInventoryPlugin): "support" ) for attribute in targeted_attributes: - self.inventory.set_variable(hostname, attribute, host_infos[attribute]) + self.inventory.set_variable(hostname, attribute, make_unsafe(host_infos[attribute])) if self.extract_public_ipv4(host_infos=host_infos): - self.inventory.set_variable(hostname, "public_ipv4", self.extract_public_ipv4(host_infos=host_infos)) - self.inventory.set_variable(hostname, "ansible_host", self.extract_public_ipv4(host_infos=host_infos)) + self.inventory.set_variable(hostname, "public_ipv4", make_unsafe(self.extract_public_ipv4(host_infos=host_infos))) + self.inventory.set_variable(hostname, "ansible_host", make_unsafe(self.extract_public_ipv4(host_infos=host_infos))) if self.extract_private_ipv4(host_infos=host_infos): - self.inventory.set_variable(hostname, "public_ipv4", self.extract_private_ipv4(host_infos=host_infos)) + self.inventory.set_variable(hostname, "public_ipv4", make_unsafe(self.extract_private_ipv4(host_infos=host_infos))) if self.extract_os_name(host_infos=host_infos): - self.inventory.set_variable(hostname, "os_name", self.extract_os_name(host_infos=host_infos)) + self.inventory.set_variable(hostname, "os_name", make_unsafe(self.extract_os_name(host_infos=host_infos))) if self.extract_os_version(host_infos=host_infos): - self.inventory.set_variable(hostname, "os_version", self.extract_os_name(host_infos=host_infos)) + self.inventory.set_variable(hostname, "os_version", make_unsafe(self.extract_os_name(host_infos=host_infos))) def _filter_host(self, host_infos, hostname_preferences): @@ -200,6 +203,8 @@ class InventoryModule(BaseInventoryPlugin): if not hostname: return + hostname = make_unsafe(hostname) + self.inventory.add_host(host=hostname) self._fill_host_variables(hostname=hostname, host_infos=host_infos) @@ -209,6 +214,8 @@ class InventoryModule(BaseInventoryPlugin): if not group: return + group = make_unsafe(group) + self.inventory.add_group(group=group) self.inventory.add_host(group=group, host=hostname) @@ -236,8 +243,8 @@ class InventoryModule(BaseInventoryPlugin): } self.headers = { - 'Authorization': "Bearer %s" % token, - 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ', 1)[0]), + 'Authorization': f"Bearer {token}", + 'User-Agent': f"ansible {ansible_version} Python {python_version.split(' ', 1)[0]}", 'Content-type': 'application/json' } diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py index 7822240627..26f7a21d88 100644 --- a/plugins/inventory/opennebula.py +++ b/plugins/inventory/opennebula.py @@ -1,82 +1,79 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2020, FELDSAM s.r.o. - FeldHost™ -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' - name: opennebula - author: - - Kristian Feldsam (@feldsam) - short_description: OpenNebula inventory source - version_added: "3.8.0" - extends_documentation_fragment: - - constructed +DOCUMENTATION = r""" +name: opennebula +author: + - Kristian Feldsam (@feldsam) +short_description: OpenNebula inventory source +version_added: "3.8.0" +extends_documentation_fragment: + - constructed +description: + - Get inventory hosts from OpenNebula cloud. + - Uses an YAML configuration file ending with either C(opennebula.yml) or C(opennebula.yaml) to set parameter values. + - Uses O(api_authfile), C(~/.one/one_auth), or E(ONE_AUTH) pointing to a OpenNebula credentials file. +options: + plugin: + description: Token that ensures this is a source file for the 'opennebula' plugin. + type: string + required: true + choices: [community.general.opennebula] + api_url: description: - - Get inventory hosts from OpenNebula cloud. - - Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml) - to set parameter values. - - Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file. - options: - plugin: - description: Token that ensures this is a source file for the 'opennebula' plugin. - type: string - required: true - choices: [ community.general.opennebula ] - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - transferred over the network unencrypted. - - If not set then the value of the C(ONE_URL) environment variable is used. - env: - - name: ONE_URL - required: True - type: string - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - then the value of the C(ONE_USERNAME) environment variable is used. - env: - - name: ONE_USERNAME - type: string - api_password: - description: - - Password or a token of the user to login into OpenNebula RPC server. - - If not set, the value of the C(ONE_PASSWORD) environment variable is used. - env: - - name: ONE_PASSWORD - required: False - type: string - api_authfile: - description: - - If both I(api_username) or I(api_password) are not set, then it will try - authenticate with ONE auth file. Default path is C(~/.one/one_auth). - - Set environment variable C(ONE_AUTH) to override this path. - env: - - name: ONE_AUTH - required: False - type: string - hostname: - description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM. - type: string - default: v4_first_ip - choices: - - v4_first_ip - - v6_first_ip - - name - filter_by_label: - description: Only return servers filtered by this label. - type: string - group_by_labels: - description: Create host groups by vm labels - type: bool - default: True -''' + - URL of the OpenNebula RPC server. + - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted. + - If not set then the value of the E(ONE_URL) environment variable is used. + env: + - name: ONE_URL + required: true + type: string + api_username: + description: + - Name of the user to login into the OpenNebula RPC server. If not set then the value of the E(ONE_USERNAME) environment + variable is used. + env: + - name: ONE_USERNAME + type: string + api_password: + description: + - Password or a token of the user to login into OpenNebula RPC server. + - If not set, the value of the E(ONE_PASSWORD) environment variable is used. + env: + - name: ONE_PASSWORD + required: false + type: string + api_authfile: + description: + - If both O(api_username) or O(api_password) are not set, then it tries to authenticate with ONE auth file. Default + path is C(~/.one/one_auth). + - Set environment variable E(ONE_AUTH) to override this path. + env: + - name: ONE_AUTH + required: false + type: string + hostname: + description: Field to match the hostname. Note V(v4_first_ip) corresponds to the first IPv4 found on VM. + type: string + default: v4_first_ip + choices: + - v4_first_ip + - v6_first_ip + - name + filter_by_label: + description: Only return servers filtered by this label. + type: string + group_by_labels: + description: Create host groups by VM labels. + type: bool + default: true +""" -EXAMPLES = r''' +EXAMPLES = r""" # inventory_opennebula.yml file in YAML format # Example command line: ansible-inventory --list -i inventory_opennebula.yml @@ -84,7 +81,7 @@ EXAMPLES = r''' plugin: community.general.opennebula api_url: https://opennebula:2633/RPC2 filter_by_label: Cache -''' +""" try: import pyone @@ -95,7 +92,8 @@ except ImportError: from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable -from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe from collections import namedtuple import os @@ -125,9 +123,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable): authstring = fp.read().rstrip() username, password = authstring.split(":") except (OSError, IOError): - raise AnsibleError("Could not find or read ONE_AUTH file at '{e}'".format(e=authfile)) + raise AnsibleError(f"Could not find or read ONE_AUTH file at '{authfile}'") except Exception: - raise AnsibleError("Error occurs when reading ONE_AUTH file at '{e}'".format(e=authfile)) + raise AnsibleError(f"Error occurs when reading ONE_AUTH file at '{authfile}'") auth_params = namedtuple('auth', ('url', 'username', 'password')) @@ -140,7 +138,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable): nic = [nic] for net in nic: - return net['IP'] + if net.get('IP'): + return net['IP'] return False @@ -162,13 +161,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable): if not (auth.username and auth.password): raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.') else: - one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) + one_client = pyone.OneServer(auth.url, session=f"{auth.username}:{auth.password}") # get hosts (VMs) try: vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3) except Exception as e: - raise AnsibleError("Something happened during XML-RPC call: {e}".format(e=to_native(e))) + raise AnsibleError(f"Something happened during XML-RPC call: {e}") return vm_pool @@ -195,6 +194,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable): continue server['name'] = vm.NAME + server['id'] = vm.ID + if hasattr(vm.HISTORY_RECORDS, 'HISTORY') and vm.HISTORY_RECORDS.HISTORY: + server['host'] = vm.HISTORY_RECORDS.HISTORY[-1].HOSTNAME server['LABELS'] = labels server['v4_first_ip'] = self._get_vm_ipv4(vm) server['v6_first_ip'] = self._get_vm_ipv6(vm) @@ -214,6 +216,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): filter_by_label = self.get_option('filter_by_label') servers = self._retrieve_servers(filter_by_label) for server in servers: + server = make_unsafe(server) hostname = server['name'] # check for labels if group_by_labels and server['LABELS']: diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py deleted file mode 100644 index e0734b33e3..0000000000 --- a/plugins/inventory/proxmox.py +++ /dev/null @@ -1,632 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2016 Guido Günther , Daniel Lobato Garcia -# Copyright (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = ''' - name: proxmox - short_description: Proxmox inventory source - version_added: "1.2.0" - author: - - Jeffrey van Pelt (@Thulium-Drake) - requirements: - - requests >= 1.1 - description: - - Get inventory hosts from a Proxmox PVE cluster. - - "Uses a configuration file as an inventory source, it must end in C(.proxmox.yml) or C(.proxmox.yaml)" - - Will retrieve the first network interface with an IP for Proxmox nodes. - - Can retrieve LXC/QEMU configuration as facts. - extends_documentation_fragment: - - constructed - - inventory_cache - options: - plugin: - description: The name of this plugin, it should always be set to C(community.general.proxmox) for this plugin to recognize it as it's own. - required: yes - choices: ['community.general.proxmox'] - type: str - url: - description: - - URL to Proxmox cluster. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_URL) will be used instead. - - Since community.general 4.7.0 you can also use templating to specify the value of the I(url). - default: 'http://localhost:8006' - type: str - env: - - name: PROXMOX_URL - version_added: 2.0.0 - user: - description: - - Proxmox authentication user. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_USER) will be used instead. - - Since community.general 4.7.0 you can also use templating to specify the value of the I(user). - required: yes - type: str - env: - - name: PROXMOX_USER - version_added: 2.0.0 - password: - description: - - Proxmox authentication password. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_PASSWORD) will be used instead. - - Since community.general 4.7.0 you can also use templating to specify the value of the I(password). - - If you do not specify a password, you must set I(token_id) and I(token_secret) instead. - type: str - env: - - name: PROXMOX_PASSWORD - version_added: 2.0.0 - token_id: - description: - - Proxmox authentication token ID. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_TOKEN_ID) will be used instead. - - To use token authentication, you must also specify I(token_secret). If you do not specify I(token_id) and I(token_secret), - you must set a password instead. - - Make sure to grant explicit pve permissions to the token or disable 'privilege separation' to use the users' privileges instead. - version_added: 4.8.0 - type: str - env: - - name: PROXMOX_TOKEN_ID - token_secret: - description: - - Proxmox authentication token secret. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_TOKEN_SECRET) will be used instead. - - To use token authentication, you must also specify I(token_id). If you do not specify I(token_id) and I(token_secret), - you must set a password instead. - version_added: 4.8.0 - type: str - env: - - name: PROXMOX_TOKEN_SECRET - validate_certs: - description: Verify SSL certificate if using HTTPS. - type: boolean - default: yes - group_prefix: - description: Prefix to apply to Proxmox groups. - default: proxmox_ - type: str - facts_prefix: - description: Prefix to apply to LXC/QEMU config facts. - default: proxmox_ - type: str - want_facts: - description: Gather LXC/QEMU configuration facts. - default: no - type: bool - want_proxmox_nodes_ansible_host: - version_added: 3.0.0 - description: - - Whether to set C(ansbile_host) for proxmox nodes. - - When set to C(true) (default), will use the first available interface. This can be different from what you expect. - - This currently defaults to C(true), but the default is deprecated since community.general 4.8.0. - The default will change to C(false) in community.general 6.0.0. To avoid a deprecation warning, please - set this parameter explicitly. - type: bool - filters: - version_added: 4.6.0 - description: A list of Jinja templates that allow filtering hosts. - type: list - elements: str - default: [] - strict: - version_added: 2.5.0 - compose: - version_added: 2.5.0 - groups: - version_added: 2.5.0 - keyed_groups: - version_added: 2.5.0 -''' - -EXAMPLES = ''' -# Minimal example which will not gather additional facts for QEMU/LXC guests -# By not specifying a URL the plugin will attempt to connect to the controller host on port 8006 -# my.proxmox.yml -plugin: community.general.proxmox -user: ansible@pve -password: secure -# Note that this can easily give you wrong values as ansible_host. See further below for -# an example where this is set to `false` and where ansible_host is set with `compose`. -want_proxmox_nodes_ansible_host: true - -# Instead of login with password, proxmox supports api token authentication since release 6.2. -plugin: community.general.proxmox -user: ci@pve -token_id: gitlab-1 -token_secret: fa256e9c-26ab-41ec-82da-707a2c079829 - -# The secret can also be a vault string or passed via the environment variable TOKEN_SECRET. -token_secret: !vault | - $ANSIBLE_VAULT;1.1;AES256 - 62353634333163633336343265623632626339313032653563653165313262343931643431656138 - 6134333736323265656466646539663134306166666237630a653363623262636663333762316136 - 34616361326263383766366663393837626437316462313332663736623066656237386531663731 - 3037646432383064630a663165303564623338666131353366373630656661333437393937343331 - 32643131386134396336623736393634373936356332623632306561356361323737313663633633 - 6231313333666361656537343562333337323030623732323833 - -# More complete example demonstrating the use of 'want_facts' and the constructed options -# Note that using facts returned by 'want_facts' in constructed options requires 'want_facts=true' -# my.proxmox.yml -plugin: community.general.proxmox -url: http://pve.domain.com:8006 -user: ansible@pve -password: secure -validate_certs: false -want_facts: true -keyed_groups: - # proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true' - - key: proxmox_tags_parsed - separator: "" - prefix: group -groups: - webservers: "'web' in (proxmox_tags_parsed|list)" - mailservers: "'mail' in (proxmox_tags_parsed|list)" -compose: - ansible_port: 2222 -# Note that this can easily give you wrong values as ansible_host. See further below for -# an example where this is set to `false` and where ansible_host is set with `compose`. -want_proxmox_nodes_ansible_host: true - -# Using the inventory to allow ansible to connect via the first IP address of the VM / Container -# (Default is connection by name of QEMU/LXC guests) -# Note: my_inv_var demonstrates how to add a string variable to every host used by the inventory. -# my.proxmox.yml -plugin: community.general.proxmox -url: http://pve.domain.com:8006 -user: ansible@pve -password: secure -validate_certs: false -want_facts: true -want_proxmox_nodes_ansible_host: false -compose: - ansible_host: proxmox_ipconfig0.ip | default(proxmox_net0.ip) | ipaddr('address') - my_inv_var_1: "'my_var1_value'" - my_inv_var_2: > - "my_var_2_value" - -# Specify the url, user and password using templating -# my.proxmox.yml -plugin: community.general.proxmox -url: "{{ lookup('ansible.builtin.ini', 'url', section='proxmox', file='file.ini') }}" -user: "{{ lookup('ansible.builtin.env','PM_USER') | default('ansible@pve') }}" -password: "{{ lookup('community.general.random_string', base64=True) }}" -# Note that this can easily give you wrong values as ansible_host. See further up for -# an example where this is set to `false` and where ansible_host is set with `compose`. -want_proxmox_nodes_ansible_host: true - -''' - -import itertools -import re - -from ansible.module_utils.common._collections_compat import MutableMapping - -from ansible.errors import AnsibleError -from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.six import string_types -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.utils.display import Display -from ansible.template import Templar - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -# 3rd party imports -try: - import requests - if LooseVersion(requests.__version__) < LooseVersion('1.1.0'): - raise ImportError - HAS_REQUESTS = True -except ImportError: - HAS_REQUESTS = False - -display = Display() - - -class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - ''' Host inventory parser for ansible using Proxmox as source. ''' - - NAME = 'community.general.proxmox' - - def __init__(self): - - super(InventoryModule, self).__init__() - - # from config - self.proxmox_url = None - - self.session = None - self.cache_key = None - self.use_cache = None - - def verify_file(self, path): - - valid = False - if super(InventoryModule, self).verify_file(path): - if path.endswith(('proxmox.yaml', 'proxmox.yml')): - valid = True - else: - self.display.vvv('Skipping due to inventory source not ending in "proxmox.yaml" nor "proxmox.yml"') - return valid - - def _get_session(self): - if not self.session: - self.session = requests.session() - self.session.verify = self.get_option('validate_certs') - return self.session - - def _get_auth(self): - credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, }) - - if self.proxmox_password: - - credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, }) - - a = self._get_session() - ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials) - - json = ret.json() - - self.headers = { - # only required for POST/PUT/DELETE methods, which we are not using currently - # 'CSRFPreventionToken': json['data']['CSRFPreventionToken'], - 'Cookie': 'PVEAuthCookie={0}'.format(json['data']['ticket']) - } - - else: - - self.headers = {'Authorization': 'PVEAPIToken={0}!{1}={2}'.format(self.proxmox_user, self.proxmox_token_id, self.proxmox_token_secret)} - - def _get_json(self, url, ignore_errors=None): - - if not self.use_cache or url not in self._cache.get(self.cache_key, {}): - - if self.cache_key not in self._cache: - self._cache[self.cache_key] = {'url': ''} - - data = [] - s = self._get_session() - while True: - ret = s.get(url, headers=self.headers) - if ignore_errors and ret.status_code in ignore_errors: - break - ret.raise_for_status() - json = ret.json() - - # process results - # FIXME: This assumes 'return type' matches a specific query, - # it will break if we expand the queries and they dont have different types - if 'data' not in json: - # /hosts/:id does not have a 'data' key - data = json - break - elif isinstance(json['data'], MutableMapping): - # /facts are returned as dict in 'data' - data = json['data'] - break - else: - # /hosts 's 'results' is a list of all hosts, returned is paginated - data = data + json['data'] - break - - self._cache[self.cache_key][url] = data - - return self._cache[self.cache_key][url] - - def _get_nodes(self): - return self._get_json("%s/api2/json/nodes" % self.proxmox_url) - - def _get_pools(self): - return self._get_json("%s/api2/json/pools" % self.proxmox_url) - - def _get_lxc_per_node(self, node): - return self._get_json("%s/api2/json/nodes/%s/lxc" % (self.proxmox_url, node)) - - def _get_qemu_per_node(self, node): - return self._get_json("%s/api2/json/nodes/%s/qemu" % (self.proxmox_url, node)) - - def _get_members_per_pool(self, pool): - ret = self._get_json("%s/api2/json/pools/%s" % (self.proxmox_url, pool)) - return ret['members'] - - def _get_node_ip(self, node): - ret = self._get_json("%s/api2/json/nodes/%s/network" % (self.proxmox_url, node)) - - for iface in ret: - try: - return iface['address'] - except Exception: - return None - - def _get_agent_network_interfaces(self, node, vmid, vmtype): - result = [] - - try: - ifaces = self._get_json( - "%s/api2/json/nodes/%s/%s/%s/agent/network-get-interfaces" % ( - self.proxmox_url, node, vmtype, vmid - ) - )['result'] - - if "error" in ifaces: - if "class" in ifaces["error"]: - # This happens on Windows, even though qemu agent is running, the IP address - # cannot be fetched, as it's unsupported, also a command disabled can happen. - errorClass = ifaces["error"]["class"] - if errorClass in ["Unsupported"]: - self.display.v("Retrieving network interfaces from guest agents on windows with older qemu-guest-agents is not supported") - elif errorClass in ["CommandDisabled"]: - self.display.v("Retrieving network interfaces from guest agents has been disabled") - return result - - for iface in ifaces: - result.append({ - 'name': iface['name'], - 'mac-address': iface['hardware-address'] if 'hardware-address' in iface else '', - 'ip-addresses': ["%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses']] if 'ip-addresses' in iface else [] - }) - except requests.HTTPError: - pass - - return result - - def _get_vm_config(self, properties, node, vmid, vmtype, name): - ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid)) - - properties[self._fact('node')] = node - properties[self._fact('vmid')] = vmid - properties[self._fact('vmtype')] = vmtype - - plaintext_configs = [ - 'description', - ] - - for config in ret: - key = self._fact(config) - value = ret[config] - try: - # fixup disk images as they have no key - if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')): - value = ('disk_image=' + value) - - # Additional field containing parsed tags as list - if config == 'tags': - stripped_value = value.strip() - if stripped_value: - parsed_key = key + "_parsed" - properties[parsed_key] = [tag.strip() for tag in stripped_value.split(",")] - - # The first field in the agent string tells you whether the agent is enabled - # the rest of the comma separated string is extra config for the agent - if config == 'agent' and int(value.split(',')[0]): - agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype) - if agent_iface_value: - agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces")) - properties[agent_iface_key] = agent_iface_value - - if config == 'lxc': - out_val = {} - for k, v in value: - if k.startswith('lxc.'): - k = k[len('lxc.'):] - out_val[k] = v - value = out_val - - if config not in plaintext_configs and isinstance(value, string_types) \ - and all("=" in v for v in value.split(",")): - # split off strings with commas to a dict - # skip over any keys that cannot be processed - try: - value = dict(key.split("=", 1) for key in value.split(",")) - except Exception: - continue - - properties[key] = value - except NameError: - return None - - def _get_vm_status(self, properties, node, vmid, vmtype, name): - ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/status/current" % (self.proxmox_url, node, vmtype, vmid)) - properties[self._fact('status')] = ret['status'] - - def _get_vm_snapshots(self, properties, node, vmid, vmtype, name): - ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/snapshot" % (self.proxmox_url, node, vmtype, vmid)) - snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current'] - properties[self._fact('snapshots')] = snapshots - - def to_safe(self, word): - '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups - #> ProxmoxInventory.to_safe("foo-bar baz") - 'foo_barbaz' - ''' - regex = r"[^A-Za-z0-9\_]" - return re.sub(regex, "_", word.replace(" ", "")) - - def _fact(self, name): - '''Generate a fact's full name from the common prefix and a name.''' - return self.to_safe('%s%s' % (self.facts_prefix, name.lower())) - - def _group(self, name): - '''Generate a group's full name from the common prefix and a name.''' - return self.to_safe('%s%s' % (self.group_prefix, name.lower())) - - def _can_add_host(self, name, properties): - '''Ensure that a host satisfies all defined hosts filters. If strict mode is - enabled, any error during host filter compositing will lead to an AnsibleError - being raised, otherwise the filter will be ignored. - ''' - for host_filter in self.host_filters: - try: - if not self._compose(host_filter, properties): - return False - except Exception as e: # pylint: disable=broad-except - message = "Could not evaluate host filter %s for host %s - %s" % (host_filter, name, to_native(e)) - if self.strict: - raise AnsibleError(message) - display.warning(message) - return True - - def _add_host(self, name, variables): - self.inventory.add_host(name) - for k, v in variables.items(): - self.inventory.set_variable(name, k, v) - variables = self.inventory.get_host(name).get_vars() - self._set_composite_vars(self.get_option('compose'), variables, name, strict=self.strict) - self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=self.strict) - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=self.strict) - - def _handle_item(self, node, ittype, item): - '''Handle an item from the list of LXC containers and Qemu VM. The - return value will be either None if the item was skipped or the name of - the item if it was added to the inventory.''' - if item.get('template'): - return None - - properties = dict() - name, vmid = item['name'], item['vmid'] - - # get status, config and snapshots if want_facts == True - if self.get_option('want_facts'): - self._get_vm_status(properties, node, vmid, ittype, name) - self._get_vm_config(properties, node, vmid, ittype, name) - self._get_vm_snapshots(properties, node, vmid, ittype, name) - - # ensure the host satisfies filters - if not self._can_add_host(name, properties): - return None - - # add the host to the inventory - self._add_host(name, properties) - node_type_group = self._group('%s_%s' % (node, ittype)) - self.inventory.add_child(self._group('all_' + ittype), name) - self.inventory.add_child(node_type_group, name) - if item['status'] == 'stopped': - self.inventory.add_child(self._group('all_stopped'), name) - elif item['status'] == 'running': - self.inventory.add_child(self._group('all_running'), name) - - return name - - def _populate_pool_groups(self, added_hosts): - '''Generate groups from Proxmox resource pools, ignoring VMs and - containers that were skipped.''' - for pool in self._get_pools(): - poolid = pool.get('poolid') - if not poolid: - continue - pool_group = self._group('pool_' + poolid) - self.inventory.add_group(pool_group) - - for member in self._get_members_per_pool(poolid): - name = member.get('name') - if name and name in added_hosts: - self.inventory.add_child(pool_group, name) - - def _populate(self): - - # create common groups - self.inventory.add_group(self._group('all_lxc')) - self.inventory.add_group(self._group('all_qemu')) - self.inventory.add_group(self._group('all_running')) - self.inventory.add_group(self._group('all_stopped')) - nodes_group = self._group('nodes') - self.inventory.add_group(nodes_group) - - want_proxmox_nodes_ansible_host = self.get_option("want_proxmox_nodes_ansible_host") - if want_proxmox_nodes_ansible_host is None: - display.deprecated( - 'The want_proxmox_nodes_ansible_host option of the community.general.proxmox inventory plugin' - ' currently defaults to `true`, but this default has been deprecated and will change to `false`' - ' in community.general 6.0.0. To keep the current behavior and remove this deprecation warning,' - ' explicitly set `want_proxmox_nodes_ansible_host` to `true` in your inventory configuration', - version='6.0.0', collection_name='community.general') - want_proxmox_nodes_ansible_host = True - - # gather vm's on nodes - self._get_auth() - hosts = [] - for node in self._get_nodes(): - if not node.get('node'): - continue - - self.inventory.add_host(node['node']) - if node['type'] == 'node': - self.inventory.add_child(nodes_group, node['node']) - - if node['status'] == 'offline': - continue - - # get node IP address - if want_proxmox_nodes_ansible_host: - ip = self._get_node_ip(node['node']) - self.inventory.set_variable(node['node'], 'ansible_host', ip) - - # add LXC/Qemu groups for the node - for ittype in ('lxc', 'qemu'): - node_type_group = self._group('%s_%s' % (node['node'], ittype)) - self.inventory.add_group(node_type_group) - - # get LXC containers and Qemu VMs for this node - lxc_objects = zip(itertools.repeat('lxc'), self._get_lxc_per_node(node['node'])) - qemu_objects = zip(itertools.repeat('qemu'), self._get_qemu_per_node(node['node'])) - for ittype, item in itertools.chain(lxc_objects, qemu_objects): - name = self._handle_item(node['node'], ittype, item) - if name is not None: - hosts.append(name) - - # gather vm's in pools - self._populate_pool_groups(hosts) - - def parse(self, inventory, loader, path, cache=True): - if not HAS_REQUESTS: - raise AnsibleError('This module requires Python Requests 1.1.0 or higher: ' - 'https://github.com/psf/requests.') - - super(InventoryModule, self).parse(inventory, loader, path) - - # read config from file, this sets 'options' - self._read_config_data(path) - - t = Templar(loader=loader) - - # read options - proxmox_url = self.get_option('url') - if t.is_template(proxmox_url): - proxmox_url = t.template(variable=proxmox_url, disable_lookups=False) - self.proxmox_url = proxmox_url.rstrip('/') - - proxmox_user = self.get_option('user') - if t.is_template(proxmox_user): - proxmox_user = t.template(variable=proxmox_user, disable_lookups=False) - self.proxmox_user = proxmox_user - - proxmox_password = self.get_option('password') - if t.is_template(proxmox_password): - proxmox_password = t.template(variable=proxmox_password, disable_lookups=False) - self.proxmox_password = proxmox_password - - proxmox_token_id = self.get_option('token_id') - if t.is_template(proxmox_token_id): - proxmox_token_id = t.template(variable=proxmox_token_id, disable_lookups=False) - self.proxmox_token_id = proxmox_token_id - - proxmox_token_secret = self.get_option('token_secret') - if t.is_template(proxmox_token_secret): - proxmox_token_secret = t.template(variable=proxmox_token_secret, disable_lookups=False) - self.proxmox_token_secret = proxmox_token_secret - - if proxmox_password is None and (proxmox_token_id is None or proxmox_token_secret is None): - raise AnsibleError('You must specify either a password or both token_id and token_secret.') - - self.cache_key = self.get_cache_key(path) - self.use_cache = cache and self.get_option('cache') - self.host_filters = self.get_option('filters') - self.group_prefix = self.get_option('group_prefix') - self.facts_prefix = self.get_option('facts_prefix') - self.strict = self.get_option('strict') - - # actually populate inventory - self._populate() diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index d48cc97a1d..59c19b498b 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -1,79 +1,85 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' - name: scaleway - author: - - Remy Leone (@remyleone) - short_description: Scaleway inventory source +DOCUMENTATION = r""" +name: scaleway +author: + - Remy Leone (@remyleone) +short_description: Scaleway inventory source +description: + - Get inventory hosts from Scaleway. +requirements: + - PyYAML +options: + plugin: + description: Token that ensures this is a source file for the 'scaleway' plugin. + required: true + type: string + choices: ['scaleway', 'community.general.scaleway'] + regions: + description: Filter results on a specific Scaleway region. + type: list + elements: string + default: + - ams1 + - ams2 + - ams3 + - par1 + - par2 + - par3 + - waw1 + - waw2 + - waw3 + tags: + description: Filter results on a specific tag. + type: list + elements: string + scw_profile: description: - - Get inventory hosts from Scaleway. - requirements: - - PyYAML - options: - plugin: - description: Token that ensures this is a source file for the 'scaleway' plugin. - required: True - choices: ['scaleway', 'community.general.scaleway'] - regions: - description: Filter results on a specific Scaleway region. - type: list - elements: string - default: - - ams1 - - par1 - - par2 - - waw1 - tags: - description: Filter results on a specific tag. - type: list - elements: string - scw_profile: - description: - - The config profile to use in config file. - - By default uses the one specified as C(active_profile) in the config file, or falls back to C(default) if that is not defined. - type: string - version_added: 4.4.0 - oauth_token: - description: - - Scaleway OAuth token. - - If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file - (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)). - - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/). - env: - # in order of precedence - - name: SCW_TOKEN - - name: SCW_API_KEY - - name: SCW_OAUTH_TOKEN - hostnames: - description: List of preference about what to use as an hostname. - type: list - elements: string - default: - - public_ipv4 - choices: - - public_ipv4 - - private_ipv4 - - public_ipv6 - - hostname - - id - variables: - description: 'Set individual variables: keys are variable names and - values are templates. Any value returned by the - L(Scaleway API, https://developer.scaleway.com/#servers-server-get) - can be used.' - type: dict -''' + - The config profile to use in config file. + - By default uses the one specified as C(active_profile) in the config file, or falls back to V(default) if that is + not defined. + type: string + version_added: 4.4.0 + oauth_token: + description: + - Scaleway OAuth token. + - If not explicitly defined or in environment variables, it tries to lookup in the C(scaleway-cli) configuration file + (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)). + - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/). + type: string + env: + # in order of precedence + - name: SCW_TOKEN + - name: SCW_API_KEY + - name: SCW_OAUTH_TOKEN + hostnames: + description: List of preference about what to use as an hostname. + type: list + elements: string + default: + - public_ipv4 + choices: + - public_ipv4 + - private_ipv4 + - public_ipv6 + - hostname + - id + variables: + description: 'Set individual variables: keys are variable names and values are templates. Any value returned by the L(Scaleway + API, https://developer.scaleway.com/#servers-server-get) can be used.' + type: dict +""" -EXAMPLES = r''' +EXAMPLES = r""" # scaleway_inventory.yml file in YAML format # Example command line: ansible-inventory --list -i scaleway_inventory.yml +--- # use hostname as inventory_hostname # use the private IP address to connect to the host plugin: community.general.scaleway @@ -88,6 +94,7 @@ variables: ansible_host: private_ip state: state +--- # use hostname as inventory_hostname and public IP address to connect to the host plugin: community.general.scaleway hostnames: @@ -97,6 +104,7 @@ regions: variables: ansible_host: public_ip.address +--- # Using static strings as variables plugin: community.general.scaleway hostnames: @@ -105,7 +113,7 @@ variables: ansible_host: public_ip.address ansible_connection: "'ssh'" ansible_user: "'admin'" -''' +""" import os import json @@ -120,11 +128,11 @@ else: from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe from ansible.module_utils.urls import open_url -from ansible.module_utils.common.text.converters import to_native, to_text -from ansible.module_utils.six import raise_from +from ansible.module_utils.common.text.converters import to_text -import ansible.module_utils.six.moves.urllib.parse as urllib_parse +import urllib.parse as urllib_parse def _fetch_information(token, url): @@ -136,7 +144,7 @@ def _fetch_information(token, url): headers={'X-Auth-Token': token, 'Content-type': 'application/json'}) except Exception as e: - raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e))) + raise AnsibleError(f"Error while fetching {url}: {e}") try: raw_json = json.loads(to_text(response.read())) except ValueError: @@ -157,7 +165,7 @@ def _fetch_information(token, url): def _build_server_url(api_endpoint): - return "/".join([api_endpoint, "servers"]) + return f"{api_endpoint}/servers" def extract_public_ipv4(server_info): @@ -278,7 +286,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): zone_info = SCALEWAY_LOCATION[zone] url = _build_server_url(zone_info["api_endpoint"]) - raw_zone_hosts_infos = _fetch_information(url=url, token=token) + raw_zone_hosts_infos = make_unsafe(_fetch_information(url=url, token=token)) for host_infos in raw_zone_hosts_infos: @@ -328,7 +336,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): def parse(self, inventory, loader, path, cache=True): if YAML_IMPORT_ERROR: - raise_from(AnsibleError('PyYAML is probably missing'), YAML_IMPORT_ERROR) + raise AnsibleError('PyYAML is probably missing') from YAML_IMPORT_ERROR super(InventoryModule, self).parse(inventory, loader, path) self._read_config_data(path=path) @@ -340,4 +348,4 @@ class InventoryModule(BaseInventoryPlugin, Constructable): hostname_preference = self.get_option("hostnames") for zone in self._get_zones(config_zones): - self.do_zone_inventory(zone=zone, token=token, tags=tags, hostname_preferences=hostname_preference) + self.do_zone_inventory(zone=make_unsafe(zone), token=token, tags=tags, hostname_preferences=hostname_preference) diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py deleted file mode 100644 index d777875578..0000000000 --- a/plugins/inventory/stackpath_compute.py +++ /dev/null @@ -1,283 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2020 Shay Rybak -# Copyright (c) 2020 Ansible Project -# GNU General Public License v3.0+ -# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - name: stackpath_compute - short_description: StackPath Edge Computing inventory source - version_added: 1.2.0 - author: - - UNKNOWN (@shayrybak) - extends_documentation_fragment: - - inventory_cache - - constructed - description: - - Get inventory hosts from StackPath Edge Computing. - - Uses a YAML configuration file that ends with stackpath_compute.(yml|yaml). - options: - plugin: - description: - - A token that ensures this is a source file for the plugin. - required: true - choices: ['community.general.stackpath_compute'] - client_id: - description: - - An OAuth client ID generated from the API Management section of the StackPath customer portal - U(https://control.stackpath.net/api-management). - required: true - type: str - client_secret: - description: - - An OAuth client secret generated from the API Management section of the StackPath customer portal - U(https://control.stackpath.net/api-management). - required: true - type: str - stack_slugs: - description: - - A list of Stack slugs to query instances in. If no entry then get instances in all stacks on the account. - type: list - elements: str - use_internal_ip: - description: - - Whether or not to use internal IP addresses, If false, uses external IP addresses, internal otherwise. - - If an instance doesn't have an external IP it will not be returned when this option is set to false. - type: bool -''' - -EXAMPLES = ''' -# Example using credentials to fetch all workload instances in a stack. ---- -plugin: community.general.stackpath_compute -client_id: my_client_id -client_secret: my_client_secret -stack_slugs: -- my_first_stack_slug -- my_other_stack_slug -use_internal_ip: false -''' - -import traceback -import json - -from ansible.errors import AnsibleError -from ansible.module_utils.urls import open_url -from ansible.plugins.inventory import ( - BaseInventoryPlugin, - Constructable, - Cacheable -) -from ansible.utils.display import Display - - -display = Display() - - -class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - - NAME = 'community.general.stackpath_compute' - - def __init__(self): - super(InventoryModule, self).__init__() - - # credentials - self.client_id = None - self.client_secret = None - self.stack_slug = None - self.api_host = "https://gateway.stackpath.com" - self.group_keys = [ - "stackSlug", - "workloadId", - "cityCode", - "countryCode", - "continent", - "target", - "name", - "workloadSlug" - ] - - def _validate_config(self, config): - if config['plugin'] != 'community.general.stackpath_compute': - raise AnsibleError("plugin doesn't match this plugin") - try: - client_id = config['client_id'] - if len(client_id) != 32: - raise AnsibleError("client_id must be 32 characters long") - except KeyError: - raise AnsibleError("config missing client_id, a required option") - try: - client_secret = config['client_secret'] - if len(client_secret) != 64: - raise AnsibleError("client_secret must be 64 characters long") - except KeyError: - raise AnsibleError("config missing client_id, a required option") - return True - - def _set_credentials(self): - ''' - :param config_data: contents of the inventory config file - ''' - self.client_id = self.get_option('client_id') - self.client_secret = self.get_option('client_secret') - - def _authenticate(self): - payload = json.dumps( - { - "client_id": self.client_id, - "client_secret": self.client_secret, - "grant_type": "client_credentials", - } - ) - headers = { - "Content-Type": "application/json", - } - resp = open_url( - self.api_host + '/identity/v1/oauth2/token', - headers=headers, - data=payload, - method="POST" - ) - status_code = resp.code - if status_code == 200: - body = resp.read() - self.auth_token = json.loads(body)["access_token"] - - def _query(self): - results = [] - workloads = [] - self._authenticate() - for stack_slug in self.stack_slugs: - try: - workloads = self._stackpath_query_get_list(self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads') - except Exception: - raise AnsibleError("Failed to get workloads from the StackPath API: %s" % traceback.format_exc()) - for workload in workloads: - try: - workload_instances = self._stackpath_query_get_list( - self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads/' + workload["id"] + '/instances' - ) - except Exception: - raise AnsibleError("Failed to get workload instances from the StackPath API: %s" % traceback.format_exc()) - for instance in workload_instances: - if instance["phase"] == "RUNNING": - instance["stackSlug"] = stack_slug - instance["workloadId"] = workload["id"] - instance["workloadSlug"] = workload["slug"] - instance["cityCode"] = instance["location"]["cityCode"] - instance["countryCode"] = instance["location"]["countryCode"] - instance["continent"] = instance["location"]["continent"] - instance["target"] = instance["metadata"]["labels"]["workload.platform.stackpath.net/target-name"] - try: - if instance[self.hostname_key]: - results.append(instance) - except KeyError: - pass - return results - - def _populate(self, instances): - for instance in instances: - for group_key in self.group_keys: - group = group_key + "_" + instance[group_key] - group = group.lower().replace(" ", "_").replace("-", "_") - self.inventory.add_group(group) - self.inventory.add_host(instance[self.hostname_key], - group=group) - - def _stackpath_query_get_list(self, url): - self._authenticate() - headers = { - "Content-Type": "application/json", - "Authorization": "Bearer " + self.auth_token, - } - next_page = True - result = [] - cursor = '-1' - while next_page: - resp = open_url( - url + '?page_request.first=10&page_request.after=%s' % cursor, - headers=headers, - method="GET" - ) - status_code = resp.code - if status_code == 200: - body = resp.read() - body_json = json.loads(body) - result.extend(body_json["results"]) - next_page = body_json["pageInfo"]["hasNextPage"] - if next_page: - cursor = body_json["pageInfo"]["endCursor"] - return result - - def _get_stack_slugs(self, stacks): - self.stack_slugs = [stack["slug"] for stack in stacks] - - def verify_file(self, path): - ''' - :param loader: an ansible.parsing.dataloader.DataLoader object - :param path: the path to the inventory config file - :return the contents of the config file - ''' - if super(InventoryModule, self).verify_file(path): - if path.endswith(('stackpath_compute.yml', 'stackpath_compute.yaml')): - return True - display.debug( - "stackpath_compute inventory filename must end with \ - 'stackpath_compute.yml' or 'stackpath_compute.yaml'" - ) - return False - - def parse(self, inventory, loader, path, cache=True): - - super(InventoryModule, self).parse(inventory, loader, path) - - config = self._read_config_data(path) - self._validate_config(config) - self._set_credentials() - - # get user specifications - self.use_internal_ip = self.get_option('use_internal_ip') - if self.use_internal_ip: - self.hostname_key = "ipAddress" - else: - self.hostname_key = "externalIpAddress" - - self.stack_slugs = self.get_option('stack_slugs') - if not self.stack_slugs: - try: - stacks = self._stackpath_query_get_list(self.api_host + '/stack/v1/stacks') - self._get_stack_slugs(stacks) - except Exception: - raise AnsibleError("Failed to get stack IDs from the Stackpath API: %s" % traceback.format_exc()) - - cache_key = self.get_cache_key(path) - # false when refresh_cache or --flush-cache is used - if cache: - # get the user-specified directive - cache = self.get_option('cache') - - # Generate inventory - cache_needs_update = False - if cache: - try: - results = self._cache[cache_key] - except KeyError: - # if cache expires or cache file doesn't exist - cache_needs_update = True - - if not cache or cache_needs_update: - results = self._query() - - self._populate(results) - - # If the cache has expired/doesn't exist or - # if refresh_inventory/flush cache is used - # when the user is using caching, update the cached inventory - try: - if cache_needs_update or (not cache and self.get_option('cache')): - self._cache[cache_key] = results - except Exception: - raise AnsibleError("Failed to populate data: %s" % traceback.format_exc()) diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index 89a77c88bb..564db57dac 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -1,67 +1,88 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: virtualbox - short_description: virtualbox inventory source +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: virtualbox +short_description: Virtualbox inventory source +description: + - Get inventory hosts from the local virtualbox installation. + - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml). + - The inventory_hostname is always the 'Name' of the virtualbox instance. + - Groups can be assigned to the VMs using C(VBoxManage). Multiple groups can be assigned by using V(/) as a delimeter. + - A separate parameter, O(enable_advanced_group_parsing) is exposed to change grouping behaviour. See the parameter documentation + for details. +extends_documentation_fragment: + - constructed + - inventory_cache +options: + plugin: + description: Token that ensures this is a source file for the P(community.general.virtualbox#inventory) plugin. + type: string + required: true + choices: ['virtualbox', 'community.general.virtualbox'] + running_only: + description: Toggles showing all VMs instead of only those currently running. + type: boolean + default: false + settings_password_file: + description: Provide a file containing the settings password (equivalent to C(--settingspwfile)). + type: string + network_info_path: + description: Property path to query for network information (C(ansible_host)). + type: string + default: "/VirtualBox/GuestInfo/Net/0/V4/IP" + query: + description: Create vars from virtualbox properties. + type: dictionary + default: {} + enable_advanced_group_parsing: description: - - Get inventory hosts from the local virtualbox installation. - - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml). - - The inventory_hostname is always the 'Name' of the virtualbox instance. - extends_documentation_fragment: - - constructed - - inventory_cache - options: - plugin: - description: token that ensures this is a source file for the 'virtualbox' plugin - required: True - choices: ['virtualbox', 'community.general.virtualbox'] - running_only: - description: toggles showing all vms vs only those currently running - type: boolean - default: False - settings_password_file: - description: provide a file containing the settings password (equivalent to --settingspwfile) - network_info_path: - description: property path to query for network information (ansible_host) - default: "/VirtualBox/GuestInfo/Net/0/V4/IP" - query: - description: create vars from virtualbox properties - type: dictionary - default: {} -''' + - The default group parsing rule (when this setting is set to V(false)) is to split the VirtualBox VM's group based + on the V(/) character and assign the resulting list elements as an Ansible Group. + - Setting O(enable_advanced_group_parsing=true) changes this behaviour to match VirtualBox's interpretation of groups + according to U(https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups). Groups are now split using the V(,) + character, and the V(/) character indicates nested groups. + - When enabled, a VM that's been configured using V(VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2,/TestGroup3") + results in the group C(TestGroup2) being a child group of C(TestGroup); and the VM being a part of C(TestGroup2) + and C(TestGroup3). + default: false + type: bool + version_added: 9.2.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # file must be named vbox.yaml or vbox.yml -simple_config_file: - plugin: community.general.virtualbox - settings_password_file: /etc/virtulbox/secrets - query: - logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList - compose: - ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh') +plugin: community.general.virtualbox +settings_password_file: /etc/virtualbox/secrets +query: + logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList +compose: + ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh') +--- # add hosts (all match with minishift vm) to the group container if any of the vms are in ansible_inventory' plugin: community.general.virtualbox groups: container: "'minis' in (inventory_hostname)" -''' +""" import os from subprocess import Popen, PIPE from ansible.errors import AnsibleParserError -from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text -from ansible.module_utils.common._collections_compat import MutableMapping +from ansible.module_utils.common.text.converters import to_bytes, to_text +from collections.abc import MutableMapping from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.module_utils.common.process import get_bin_path +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe + class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): ''' Host inventory parser for ansible using local virtualbox. ''' @@ -115,6 +136,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars[host], host, strict=strict) def _populate_from_cache(self, source_data): + source_data = make_unsafe(source_data) hostvars = source_data.pop('_meta', {}).get('hostvars', {}) for group in source_data: if group == 'all': @@ -161,7 +183,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): v = v.strip() # found host if k.startswith('Name') and ',' not in v: # some setting strings appear in Name - current_host = v + current_host = make_unsafe(v) if current_host not in hostvars: hostvars[current_host] = {} self.inventory.add_host(current_host) @@ -169,29 +191,29 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # try to get network info netdata = self._query_vbox_data(current_host, netinfo) if netdata: - self.inventory.set_variable(current_host, 'ansible_host', netdata) + self.inventory.set_variable(current_host, 'ansible_host', make_unsafe(netdata)) # found groups elif k == 'Groups': - for group in v.split('/'): - if group: - group = self.inventory.add_group(group) - self.inventory.add_child(group, current_host) - if group not in cacheable_results: - cacheable_results[group] = {'hosts': []} - cacheable_results[group]['hosts'].append(current_host) + if self.get_option('enable_advanced_group_parsing'): + self._handle_vboxmanage_group_string(v, current_host, cacheable_results) + else: + self._handle_group_string(v, current_host, cacheable_results) continue else: # found vars, accumulate in hostvars for clean inventory set - pref_k = 'vbox_' + k.strip().replace(' ', '_') - if k.startswith(' '): - if prevkey not in hostvars[current_host]: + pref_k = make_unsafe(f"vbox_{k.strip().replace(' ', '_')}") + leading_spaces = len(k) - len(k.lstrip(' ')) + if 0 < leading_spaces <= 2: + if prevkey not in hostvars[current_host] or not isinstance(hostvars[current_host][prevkey], dict): hostvars[current_host][prevkey] = {} - hostvars[current_host][prevkey][pref_k] = v + hostvars[current_host][prevkey][pref_k] = make_unsafe(v) + elif leading_spaces > 2: + continue else: if v != '': - hostvars[current_host][pref_k] = v + hostvars[current_host][pref_k] = make_unsafe(v) if self._ungrouped_host(current_host, cacheable_results): if 'ungrouped' not in cacheable_results: cacheable_results['ungrouped'] = {'hosts': []} @@ -219,6 +241,64 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return all(find_host(host, inventory)) + def _handle_group_string(self, vboxmanage_group, current_host, cacheable_results): + '''Handles parsing the VM's Group assignment from VBoxManage according to this inventory's initial implementation.''' + # The original implementation of this inventory plugin treated `/` as + # a delimeter to split and use as Ansible Groups. + for group in vboxmanage_group.split('/'): + if group: + group = make_unsafe(group) + group = self.inventory.add_group(group) + self.inventory.add_child(group, current_host) + if group not in cacheable_results: + cacheable_results[group] = {'hosts': []} + cacheable_results[group]['hosts'].append(current_host) + + def _handle_vboxmanage_group_string(self, vboxmanage_group, current_host, cacheable_results): + '''Handles parsing the VM's Group assignment from VBoxManage according to VirtualBox documentation.''' + # Per the VirtualBox documentation, a VM can be part of many groups, + # and it is possible to have nested groups. + # Many groups are separated by commas ",", and nested groups use + # slash "/". + # https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups + # Multi groups: VBoxManage modifyvm "vm01" --groups "/TestGroup,/TestGroup2" + # Nested groups: VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2" + + for group in vboxmanage_group.split(','): + if not group: + # We could get an empty element due how to split works, and + # possible assignments from VirtualBox. e.g. ,/Group1 + continue + + if group == "/": + # This is the "root" group. We get here if the VM was not + # assigned to a particular group. Consider the host to be + # unassigned to a group. + continue + + parent_group = None + for subgroup in group.split('/'): + if not subgroup: + # Similarly to above, we could get an empty element. + # e.g //Group1 + continue + + if subgroup == '/': + # "root" group. + # Consider the host to be unassigned + continue + + subgroup = make_unsafe(subgroup) + subgroup = self.inventory.add_group(subgroup) + if parent_group is not None: + self.inventory.add_child(parent_group, subgroup) + self.inventory.add_child(subgroup, current_host) + if subgroup not in cacheable_results: + cacheable_results[subgroup] = {'hosts': []} + cacheable_results[subgroup]['hosts'].append(current_host) + + parent_group = subgroup + def verify_file(self, path): valid = False @@ -272,7 +352,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: p = Popen(cmd, stdout=PIPE) except Exception as e: - raise AnsibleParserError(to_native(e)) + raise AnsibleParserError(str(e)) source_data = p.stdout.read().splitlines() diff --git a/plugins/inventory/xen_orchestra.py b/plugins/inventory/xen_orchestra.py index cc2346a1aa..fc0f0db757 100644 --- a/plugins/inventory/xen_orchestra.py +++ b/plugins/inventory/xen_orchestra.py @@ -1,66 +1,84 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: xen_orchestra - short_description: Xen Orchestra inventory source - version_added: 4.1.0 - author: - - Dom Del Nano (@ddelnano) - - Samori Gorse (@shinuza) - requirements: - - websocket-client >= 1.0.0 +DOCUMENTATION = r""" +name: xen_orchestra +short_description: Xen Orchestra inventory source +version_added: 4.1.0 +author: + - Dom Del Nano (@ddelnano) + - Samori Gorse (@shinuza) +requirements: + - websocket-client >= 1.0.0 +description: + - Get inventory hosts from a Xen Orchestra deployment. + - Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml). +extends_documentation_fragment: + - constructed + - inventory_cache +options: + plugin: + description: The name of this plugin, it should always be set to V(community.general.xen_orchestra) for this plugin to + recognize it as its own. + required: true + choices: ['community.general.xen_orchestra'] + type: str + api_host: description: - - Get inventory hosts from a Xen Orchestra deployment. - - 'Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml).' - extends_documentation_fragment: - - constructed - - inventory_cache - options: - plugin: - description: The name of this plugin, it should always be set to C(community.general.xen_orchestra) for this plugin to recognize it as its own. - required: yes - choices: ['community.general.xen_orchestra'] - type: str - api_host: - description: - - API host to XOA API. - - If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_HOST) will be used instead. - type: str - env: - - name: ANSIBLE_XO_HOST - user: - description: - - Xen Orchestra user. - - If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_USER) will be used instead. - required: yes - type: str - env: - - name: ANSIBLE_XO_USER - password: - description: - - Xen Orchestra password. - - If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_PASSWORD) will be used instead. - required: yes - type: str - env: - - name: ANSIBLE_XO_PASSWORD - validate_certs: - description: Verify TLS certificate if using HTTPS. - type: boolean - default: true - use_ssl: - description: Use wss when connecting to the Xen Orchestra API - type: boolean - default: true -''' + - API host to XOA API. + - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_HOST) + is used instead. + type: str + env: + - name: ANSIBLE_XO_HOST + user: + description: + - Xen Orchestra user. + - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_USER) + is used instead. + required: true + type: str + env: + - name: ANSIBLE_XO_USER + password: + description: + - Xen Orchestra password. + - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_PASSWORD) + is used instead. + required: true + type: str + env: + - name: ANSIBLE_XO_PASSWORD + validate_certs: + description: Verify TLS certificate if using HTTPS. + type: boolean + default: true + use_ssl: + description: Use wss when connecting to the Xen Orchestra API. + type: boolean + default: true + use_vm_uuid: + description: + - Import Xen VMs to inventory using their UUID as the VM entry name. + - If set to V(false) use VM name labels instead of UUIDs. + type: boolean + default: true + version_added: 10.4.0 + use_host_uuid: + description: + - Import Xen Hosts to inventory using their UUID as the Host entry name. + - If set to V(false) use Host name labels instead of UUIDs. + type: boolean + default: true + version_added: 10.4.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # file must be named xen_orchestra.yaml or xen_orchestra.yml plugin: community.general.xen_orchestra api_host: 192.168.1.255 @@ -69,19 +87,22 @@ password: xo_pwd validate_certs: true use_ssl: true groups: - kube_nodes: "'kube_node' in tags" + kube_nodes: "'kube_node' in tags" compose: - ansible_port: 2222 - -''' + ansible_port: 2222 +use_vm_uuid: false +use_host_uuid: true +""" import json import ssl +from time import sleep from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe # 3rd party imports try: @@ -135,27 +156,45 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): sslopt = None if validate_certs else {'cert_reqs': ssl.CERT_NONE} self.conn = create_connection( - '{0}://{1}/api/'.format(proto, xoa_api_host), sslopt=sslopt) + f'{proto}://{xoa_api_host}/api/', sslopt=sslopt) + + CALL_TIMEOUT = 100 + """Number of 1/10ths of a second to wait before method call times out.""" + + def call(self, method, params): + """Calls a method on the XO server with the provided parameters.""" + id = self.pointer + self.conn.send(json.dumps({ + 'id': id, + 'jsonrpc': '2.0', + 'method': method, + 'params': params + })) + + waited = 0 + while waited < self.CALL_TIMEOUT: + response = json.loads(self.conn.recv()) + if 'id' in response and response['id'] == id: + return response + else: + sleep(0.1) + waited += 1 + + raise AnsibleError(f'Method call {method} timed out after {self.CALL_TIMEOUT / 10} seconds.') def login(self, user, password): - payload = {'id': self.pointer, 'jsonrpc': '2.0', 'method': 'session.signIn', 'params': { - 'username': user, 'password': password}} - self.conn.send(json.dumps(payload)) - result = json.loads(self.conn.recv()) + result = self.call('session.signIn', { + 'username': user, 'password': password + }) if 'error' in result: - raise AnsibleError( - 'Could not connect: {0}'.format(result['error'])) + raise AnsibleError(f"Could not connect: {result['error']}") def get_object(self, name): - payload = {'id': self.pointer, 'jsonrpc': '2.0', - 'method': 'xo.getAllObjects', 'params': {'filter': {'type': name}}} - self.conn.send(json.dumps(payload)) - answer = json.loads(self.conn.recv()) + answer = self.call('xo.getAllObjects', {'filter': {'type': name}}) if 'error' in answer: - raise AnsibleError( - 'Could not request: {0}'.format(answer['error'])) + raise AnsibleError(f"Could not request: {answer['error']}") return answer['result'] @@ -176,10 +215,20 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict) def _add_vms(self, vms, hosts, pools): + vm_name_list = [] for uuid, vm in vms.items(): + if self.vm_entry_name_type == 'name_label': + if vm['name_label'] not in vm_name_list: + entry_name = vm['name_label'] + vm_name_list.append(vm['name_label']) + else: + vm_duplicate_count = vm_name_list.count(vm['name_label']) + entry_name = f"{vm['name_label']}_{vm_duplicate_count}" + vm_name_list.append(vm['name_label']) + else: + entry_name = uuid group = 'with_ip' ip = vm.get('mainIpAddress') - entry_name = uuid power_state = vm['power_state'].lower() pool_name = self._pool_group_name_for_uuid(pools, vm['$poolId']) host_name = self._host_group_name_for_uuid(hosts, vm['$container']) @@ -226,10 +275,20 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._apply_constructable(entry_name, self.inventory.get_host(entry_name).get_vars()) def _add_hosts(self, hosts, pools): + host_name_list = [] for host in hosts.values(): - entry_name = host['uuid'] - group_name = 'xo_host_{0}'.format( - clean_group_name(host['name_label'])) + if self.host_entry_name_type == 'name_label': + if host['name_label'] not in host_name_list: + entry_name = host['name_label'] + host_name_list.append(host['name_label']) + else: + host_duplicate_count = host_name_list.count(host['name_label']) + entry_name = f"{host['name_label']}_{host_duplicate_count}" + host_name_list.append(host['name_label']) + else: + entry_name = host['uuid'] + + group_name = f"xo_host_{clean_group_name(host['name_label'])}" pool_name = self._pool_group_name_for_uuid(pools, host['$poolId']) self.inventory.add_group(group_name) @@ -252,15 +311,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): entry_name, 'product_brand', host['productBrand']) for pool in pools.values(): - group_name = 'xo_pool_{0}'.format( - clean_group_name(pool['name_label'])) + group_name = f"xo_pool_{clean_group_name(pool['name_label'])}" self.inventory.add_group(group_name) def _add_pools(self, pools): for pool in pools.values(): - group_name = 'xo_pool_{0}'.format( - clean_group_name(pool['name_label'])) + group_name = f"xo_pool_{clean_group_name(pool['name_label'])}" self.inventory.add_group(group_name) @@ -268,16 +325,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def _pool_group_name_for_uuid(self, pools, pool_uuid): for pool in pools: if pool == pool_uuid: - return 'xo_pool_{0}'.format( - clean_group_name(pools[pool_uuid]['name_label'])) + return f"xo_pool_{clean_group_name(pools[pool_uuid]['name_label'])}" # TODO: Refactor def _host_group_name_for_uuid(self, hosts, host_uuid): for host in hosts: if host == host_uuid: - return 'xo_host_{0}'.format( - clean_group_name(hosts[host_uuid]['name_label'] - )) + return f"xo_host_{clean_group_name(hosts[host_uuid]['name_label'])}" def _populate(self, objects): # Prepare general groups @@ -323,5 +377,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if not self.get_option('use_ssl'): self.protocol = 'ws' + self.vm_entry_name_type = 'uuid' + if not self.get_option('use_vm_uuid'): + self.vm_entry_name_type = 'name_label' + + self.host_entry_name_type = 'uuid' + if not self.get_option('use_host_uuid'): + self.host_entry_name_type = 'name_label' + objects = self._get_objects() - self._populate(objects) + self._populate(make_unsafe(objects)) diff --git a/plugins/lookup/binary_file.py b/plugins/lookup/binary_file.py new file mode 100644 index 0000000000..3236ade3e4 --- /dev/null +++ b/plugins/lookup/binary_file.py @@ -0,0 +1,113 @@ +# +# Copyright (c) 2025, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION = r""" +name: binary_file +author: Felix Fontein (@felixfontein) +short_description: Read binary file and return it Base64 encoded +version_added: 11.2.0 +description: + - This lookup returns the contents from a file on the Ansible controller's file system. + - The file is read as a binary file and its contents are returned Base64 encoded. + This is similar to using P(ansible.builtin.file#lookup) combined with P(ansible.builtin.b64encode#filter), + except that P(ansible.builtin.file#lookup) does not support binary files as it interprets the contents as UTF-8, + which can cause the wrong content being Base64 encoded. +options: + _terms: + description: + - Paths of the files to read. + - Relative paths will be searched for in different places. See R(Ansible task paths, playbook_task_paths) for more details. + required: true + type: list + elements: str + not_exist: + description: + - Determine how to react if the specified file cannot be found. + type: str + choices: + error: Raise an error. + empty: Return an empty string for the file. + empty_str: + - Return the string C(empty) for the file. + - This cannot be confused with Base64 encoding due to the missing padding. + default: error +notes: + - This lookup does not understand 'globbing' - use the P(ansible.builtin.fileglob#lookup) lookup instead. +seealso: + - plugin: ansible.builtin.b64decode + plugin_type: filter + description: >- + The b64decode filter can be used to decode Base64 encoded data. + Note that Ansible cannot handle binary data, the data will be interpreted as UTF-8 text! + - plugin: ansible.builtin.file + plugin_type: lookup + description: You can use this lookup plugin to read text files from the Ansible controller. + - module: ansible.builtin.slurp + description: >- + Also allows to read binary files Base64 encoded, but from remote targets. + With C(delegate_to: localhost) can be redirected to run on the controller, but you have to know the path to the file to read. + Both this plugin and P(ansible.builtin.file#lookup) use some search path logic to for example also find files in the C(files) + directory of a role. + - ref: playbook_task_paths + description: Search paths used for relative files. +""" + +EXAMPLES = r""" +--- +- name: Output Base64 contents of binary files on screen + ansible.builtin.debug: + msg: "Content: {{ lookup('community.general.binary_file', item) }}" + loop: + - some-binary-file.bin +""" + +RETURN = r""" +_raw: + description: + - Base64 encoded content of requested files, or an empty string resp. the string C(empty), depending on the O(not_exist) option. + - This list contains one string per element of O(_terms) in the same order as O(_terms). + type: list + elements: str + returned: success +""" + +import base64 + +from ansible.errors import AnsibleLookupError +from ansible.plugins.lookup import LookupBase + +from ansible.utils.display import Display + +display = Display() + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + not_exist = self.get_option("not_exist") + + result = [] + for term in terms: + display.debug(f"Searching for binary file: {term!r}") + path = self.find_file_in_search_path(variables, "files", term, ignore_missing=(not_exist != "error")) + display.vvvv(f"community.general.binary_file lookup using {path} as file") + + if not path: + if not_exist == "empty": + result.append("") + continue + if not_exist == "empty_str": + result.append("empty") + continue + raise AnsibleLookupError(f"Could not locate file in community.general.binary_file lookup: {term}") + + try: + with open(path, "rb") as f: + result.append(base64.b64encode(f.read()).decode("utf-8")) + except Exception as exc: + raise AnsibleLookupError(f"Error while reading {path}: {exc}") + + return result diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py new file mode 100644 index 0000000000..e4d958a96f --- /dev/null +++ b/plugins/lookup/bitwarden.py @@ -0,0 +1,295 @@ +# Copyright (c) 2022, Jonathan Lung +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +name: bitwarden +author: + - Jonathan Lung (@lungj) +requirements: + - bw (command line utility) + - be logged into bitwarden + - bitwarden vault unlocked + - E(BW_SESSION) environment variable set +short_description: Retrieve secrets from Bitwarden +version_added: 5.4.0 +description: + - Retrieve secrets from Bitwarden. +options: + _terms: + description: Key(s) to fetch values for from login info. + required: true + type: list + elements: str + search: + description: + - Field to retrieve, for example V(name) or V(id). + - If set to V(id), only zero or one element can be returned. Use the Jinja C(first) filter to get the only list element. + - If set to V(None) or V(''), or if O(_terms) is empty, records are not filtered by fields. + type: str + default: name + version_added: 5.7.0 + field: + description: Field to fetch. Leave unset to fetch whole response. + type: str + collection_id: + description: + - Collection ID to filter results by collection. Leave unset to skip filtering. + - O(collection_id) and O(collection_name) are mutually exclusive. + type: str + version_added: 6.3.0 + collection_name: + description: + - Collection name to filter results by collection. Leave unset to skip filtering. + - O(collection_id) and O(collection_name) are mutually exclusive. + type: str + version_added: 10.4.0 + organization_id: + description: Organization ID to filter results by organization. Leave unset to skip filtering. + type: str + version_added: 8.5.0 + bw_session: + description: Pass session key instead of reading from env. + type: str + version_added: 8.4.0 + result_count: + description: + - Number of results expected for the lookup query. Task fails if O(result_count) is set but does not match the number + of query results. Leave empty to skip this check. + type: int + version_added: 10.4.0 +""" + +EXAMPLES = r""" +- name: "Get 'password' from all Bitwarden records named 'a_test'" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'a_test', field='password') }} + +- name: "Get 'password' from Bitwarden record with ID 'bafba515-af11-47e6-abe3-af1200cd18b2'" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') | first }} + +- name: "Get 'password' from all Bitwarden records named 'a_test' from collection" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }} + +- name: "Get list of all full Bitwarden records named 'a_test'" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'a_test') }} + +- name: "Get custom field 'api_key' from all Bitwarden records named 'a_test'" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'a_test', field='api_key') }} + +- name: "Get 'password' from all Bitwarden records named 'a_test', using given session key" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'a_test', field='password', bw_session='bXZ9B5TXi6...') }} + +- name: "Get all Bitwarden records from collection" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', None, collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }} + +- name: "Get all Bitwarden records from collection" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', None, collection_name='my_collections/test_collection') }} + +- name: "Get Bitwarden record named 'a_test', ensure there is exactly one match" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'a_test', result_count=1) }} +""" + +RETURN = r""" +_raw: + description: + - A one-element list that contains a list of requested fields or JSON objects of matches. + - If you use C(query), you get a list of lists. If you use C(lookup) without C(wantlist=true), this always gets reduced + to a list of field values or JSON objects. + type: list + elements: list +""" + +from subprocess import Popen, PIPE + +from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.module_utils.common.text.converters import to_bytes, to_text +from ansible.parsing.ajson import AnsibleJSONDecoder +from ansible.plugins.lookup import LookupBase + + +class BitwardenException(AnsibleError): + pass + + +class Bitwarden(object): + + def __init__(self, path='bw'): + self._cli_path = path + self._session = None + + @property + def cli_path(self): + return self._cli_path + + @property + def session(self): + return self._session + + @session.setter + def session(self, value): + self._session = value + + @property + def unlocked(self): + out, err = self._run(['status'], stdin="") + decoded = AnsibleJSONDecoder().raw_decode(out)[0] + return decoded['status'] == 'unlocked' + + def _run(self, args, stdin=None, expected_rc=0): + if self.session: + args += ['--session', self.session] + + p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE) + out, err = p.communicate(to_bytes(stdin)) + rc = p.wait() + if rc != expected_rc: + if len(args) > 2 and args[0] == 'get' and args[1] == 'item' and b'Not found.' in err: + return 'null', '' + raise BitwardenException(err) + return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict') + + def _get_matches(self, search_value, search_field, collection_id=None, organization_id=None): + """Return matching records whose search_field is equal to key. + """ + + # Prepare set of params for Bitwarden CLI + if search_field == 'id': + params = ['get', 'item', search_value] + else: + params = ['list', 'items'] + if search_value: + params.extend(['--search', search_value]) + + if collection_id: + params.extend(['--collectionid', collection_id]) + if organization_id: + params.extend(['--organizationid', organization_id]) + + out, err = self._run(params) + + # This includes things that matched in different fields. + initial_matches = AnsibleJSONDecoder().raw_decode(out)[0] + + if search_field == 'id': + if initial_matches is None: + initial_matches = [] + else: + initial_matches = [initial_matches] + + # Filter to only include results from the right field, if a search is requested by value or field + return [item for item in initial_matches + if not search_value or not search_field or item.get(search_field) == search_value] + + def get_field(self, field, search_value, search_field="name", collection_id=None, organization_id=None): + """Return a list of the specified field for records whose search_field match search_value + and filtered by collection if collection has been provided. + + If field is None, return the whole record for each match. + """ + matches = self._get_matches(search_value, search_field, collection_id, organization_id) + if not field: + return matches + field_matches = [] + for match in matches: + # if there are no custom fields, then `match` has no key 'fields' + if 'fields' in match: + custom_field_found = False + for custom_field in match['fields']: + if field == custom_field['name']: + field_matches.append(custom_field['value']) + custom_field_found = True + break + if custom_field_found: + continue + if 'login' in match and field in match['login']: + field_matches.append(match['login'][field]) + continue + if field in match: + field_matches.append(match[field]) + continue + + if matches and not field_matches: + raise AnsibleError(f"field {field} does not exist in {search_value}") + + return field_matches + + def get_collection_ids(self, collection_name: str, organization_id=None) -> list[str]: + """Return matching IDs of collections whose name is equal to collection_name.""" + + # Prepare set of params for Bitwarden CLI + params = ['list', 'collections', '--search', collection_name] + + if organization_id: + params.extend(['--organizationid', organization_id]) + + out, err = self._run(params) + + # This includes things that matched in different fields. + initial_matches = AnsibleJSONDecoder().raw_decode(out)[0] + + # Filter to only return the ID of a collections with exactly matching name + return [item['id'] for item in initial_matches + if str(item.get('name')).lower() == collection_name.lower()] + + +class LookupModule(LookupBase): + + def run(self, terms=None, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + field = self.get_option('field') + search_field = self.get_option('search') + collection_id = self.get_option('collection_id') + collection_name = self.get_option('collection_name') + organization_id = self.get_option('organization_id') + result_count = self.get_option('result_count') + _bitwarden.session = self.get_option('bw_session') + + if not _bitwarden.unlocked: + raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.") + + if not terms: + terms = [None] + + if collection_name and collection_id: + raise AnsibleOptionsError("'collection_name' and 'collection_id' are mutually exclusive!") + elif collection_name: + collection_ids = _bitwarden.get_collection_ids(collection_name, organization_id) + if not collection_ids: + raise BitwardenException("No matching collections found!") + else: + collection_ids = [collection_id] + + results = [ + _bitwarden.get_field(field, term, search_field, collection_id, organization_id) + for collection_id in collection_ids + for term in terms + ] + + for result in results: + if result_count is not None and len(result) != result_count: + raise BitwardenException( + f"Number of results doesn't match result_count! ({len(result)} != {result_count})") + + return results + + +_bitwarden = Bitwarden() diff --git a/plugins/lookup/bitwarden_secrets_manager.py b/plugins/lookup/bitwarden_secrets_manager.py new file mode 100644 index 0000000000..0227c16bae --- /dev/null +++ b/plugins/lookup/bitwarden_secrets_manager.py @@ -0,0 +1,161 @@ +# Copyright (c) 2023, jantari (https://github.com/jantari) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + + +DOCUMENTATION = r""" +name: bitwarden_secrets_manager +author: + - jantari (@jantari) +requirements: + - bws (command line utility) +short_description: Retrieve secrets from Bitwarden Secrets Manager +version_added: 7.2.0 +description: + - Retrieve secrets from Bitwarden Secrets Manager. +options: + _terms: + description: Secret ID(s) to fetch values for. + required: true + type: list + elements: str + bws_access_token: + description: The BWS access token to use for this lookup. + env: + - name: BWS_ACCESS_TOKEN + required: true + type: str +""" + +EXAMPLES = r""" +- name: Get a secret relying on the BWS_ACCESS_TOKEN environment variable for authentication + ansible.builtin.debug: + msg: >- + {{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972") }} + +- name: Get a secret passing an explicit access token for authentication + ansible.builtin.debug: + msg: >- + {{ + lookup( + "community.general.bitwarden_secrets_manager", + "2bc23e48-4932-40de-a047-5524b7ddc972", + bws_access_token="9.4f570d14-4b54-42f5-bc07-60f4450b1db5.YmluYXJ5LXNvbWV0aGluZy0xMjMK:d2h5IGhlbGxvIHRoZXJlCg==" + ) + }} + +- name: Get two different secrets each using a different access token for authentication + ansible.builtin.debug: + msg: + - '{{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972", bws_access_token=token1) }}' + - '{{ lookup("community.general.bitwarden_secrets_manager", "9d89af4c-eb5d-41f5-bb0f-4ae81215c768", bws_access_token=token2) }}' + vars: + token1: "9.4f570d14-4b54-42f5-bc07-60f4450b1db5.YmluYXJ5LXNvbWV0aGluZy0xMjMK:d2h5IGhlbGxvIHRoZXJlCg==" + token2: "1.69b72797-6ea9-4687-a11e-848e41a30ae6.YW5zaWJsZSBpcyBncmVhdD8K:YW5zaWJsZSBpcyBncmVhdAo=" + +- name: Get just the value of a secret + ansible.builtin.debug: + msg: >- + {{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972").value }} +""" + +RETURN = r""" +_raw: + description: List containing one or more secrets. + type: list + elements: dict +""" + +from subprocess import Popen, PIPE +from time import sleep + +from ansible.errors import AnsibleLookupError +from ansible.module_utils.common.text.converters import to_text +from ansible.parsing.ajson import AnsibleJSONDecoder +from ansible.plugins.lookup import LookupBase + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class BitwardenSecretsManagerException(AnsibleLookupError): + pass + + +class BitwardenSecretsManager(object): + def __init__(self, path='bws'): + self._cli_path = path + self._max_retries = 3 + self._retry_delay = 1 + + @property + def cli_path(self): + return self._cli_path + + def _run_with_retry(self, args, stdin=None, retries=0): + out, err, rc = self._run(args, stdin) + + if rc != 0: + if retries >= self._max_retries: + raise BitwardenSecretsManagerException("Max retries exceeded. Unable to retrieve secret.") + + if "Too many requests" in err: + delay = self._retry_delay * (2 ** retries) + sleep(delay) + return self._run_with_retry(args, stdin, retries + 1) + else: + raise BitwardenSecretsManagerException(f"Command failed with return code {rc}: {err}") + + return out, err, rc + + def _run(self, args, stdin=None): + p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE) + out, err = p.communicate(stdin) + rc = p.wait() + return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict'), rc + + def get_bws_version(self): + """Get the version of the Bitwarden Secrets Manager CLI. + """ + out, err, rc = self._run(['--version']) + if rc != 0: + raise BitwardenSecretsManagerException(to_text(err)) + # strip the prefix and grab the last segment, the version number + return out.split()[-1] + + def get_secret(self, secret_id, bws_access_token): + """Get and return the secret with the given secret_id. + """ + + # Prepare set of params for Bitwarden Secrets Manager CLI + # Color output was not always disabled correctly with the default 'auto' setting so explicitly disable it. + params = [ + '--color', 'no', + '--access-token', bws_access_token + ] + + # bws version 0.3.0 introduced a breaking change in the command line syntax: + # pre-0.3.0: verb noun + # 0.3.0 and later: noun verb + bws_version = self.get_bws_version() + if LooseVersion(bws_version) < LooseVersion('0.3.0'): + params.extend(['get', 'secret', secret_id]) + else: + params.extend(['secret', 'get', secret_id]) + + out, err, rc = self._run_with_retry(params) + if rc != 0: + raise BitwardenSecretsManagerException(to_text(err)) + + return AnsibleJSONDecoder().raw_decode(out)[0] + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + bws_access_token = self.get_option('bws_access_token') + + return [_bitwarden_secrets_manager.get_secret(term, bws_access_token) for term in terms] + + +_bitwarden_secrets_manager = BitwardenSecretsManager() diff --git a/plugins/lookup/cartesian.py b/plugins/lookup/cartesian.py index 98043eba34..1e07326a17 100644 --- a/plugins/lookup/cartesian.py +++ b/plugins/lookup/cartesian.py @@ -1,26 +1,27 @@ -# -*- coding: utf-8 -*- -# (c) 2013, Bradley Young -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2013, Bradley Young +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: cartesian - short_description: returns the cartesian product of lists +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: cartesian +short_description: Returns the cartesian product of lists +description: + - Takes the input lists and returns a list that represents the product of the input lists. + - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]. + - You can see the exact syntax in the examples section. +options: + _terms: description: - - Takes the input lists and returns a list that represents the product of the input lists. - - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]. - You can see the exact syntax in the examples section. - options: - _raw: - description: - - a set of lists - required: True -''' + - A set of lists. + type: list + elements: list + required: true +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Example of the change in the description ansible.builtin.debug: msg: "{{ lookup('community.general.cartesian', [1,2,3], [a, b])}}" @@ -31,15 +32,15 @@ EXAMPLES = """ with_community.general.cartesian: - "{{list1}}" - "{{list2}}" - - [1,2,3,4,5,6] + - [1, 2, 3, 4, 5, 6] """ -RETURN = """ - _list: - description: - - list of lists composed of elements of the input lists - type: list - elements: list +RETURN = r""" +_list: + description: + - List of lists composed of elements of the input lists. + type: list + elements: list """ from itertools import product @@ -63,11 +64,11 @@ class LookupModule(LookupBase): """ results = [] for x in terms: - intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader) - results.append(intermediate) + results.append(listify_lookup_plugin_terms(x, templar=self._templar)) return results def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) terms = self._lookup_variables(terms) diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py index f5ccc766c2..69a53d007e 100644 --- a/plugins/lookup/chef_databag.py +++ b/plugins/lookup/chef_databag.py @@ -1,44 +1,44 @@ -# -*- coding: utf-8 -*- -# (c) 2016, Josh Bradley -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2016, Josh Bradley +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: chef_databag - short_description: fetches data from a Chef Databag +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: chef_databag +short_description: Fetches data from a Chef Databag +description: + - 'This is a lookup plugin to provide access to chef data bags using the pychef package. It interfaces with the chef server + API using the same methods to find a knife or chef-client config file to load parameters from, starting from either the + given base path or the current working directory. The lookup order mirrors the one from Chef, all folders in the base + path are walked back looking for the following configuration file in order: C(.chef/knife.rb), C(~/.chef/knife.rb), C(/etc/chef/client.rb).' +requirements: + - "pychef (L(Python library, https://pychef.readthedocs.io), C(pip install pychef))" +options: + name: description: - - "This is a lookup plugin to provide access to chef data bags using the pychef package. - It interfaces with the chef server api using the same methods to find a knife or chef-client config file to load parameters from, - starting from either the given base path or the current working directory. - The lookup order mirrors the one from Chef, all folders in the base path are walked back looking for the following configuration - file in order : .chef/knife.rb, ~/.chef/knife.rb, /etc/chef/client.rb" - requirements: - - "pychef (python library https://pychef.readthedocs.io `pip install pychef`)" - options: - name: - description: - - Name of the databag - required: True - item: - description: - - Item to fetch - required: True -''' - -EXAMPLES = """ - - ansible.builtin.debug: - msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}" + - Name of the databag. + type: string + required: true + item: + description: + - Item to fetch. + type: string + required: true """ -RETURN = """ - _raw: - description: - - The value from the databag. - type: list - elements: dict +EXAMPLES = r""" +- ansible.builtin.debug: + msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}" +""" + +RETURN = r""" +_raw: + description: + - The value from the databag. + type: list + elements: dict """ from ansible.errors import AnsibleError @@ -78,11 +78,11 @@ class LookupModule(LookupBase): setattr(self, arg, parsed) except ValueError: raise AnsibleError( - "can't parse arg {0}={1} as string".format(arg, arg_raw) + f"can't parse arg {arg}={arg_raw} as string" ) if args: raise AnsibleError( - "unrecognized arguments to with_sequence: %r" % list(args.keys()) + f"unrecognized arguments to with_sequence: {list(args.keys())!r}" ) def run(self, terms, variables=None, **kwargs): diff --git a/plugins/lookup/collection_version.py b/plugins/lookup/collection_version.py index bb67b3b153..7a9eaf10bd 100644 --- a/plugins/lookup/collection_version.py +++ b/plugins/lookup/collection_version.py @@ -1,72 +1,68 @@ -# (c) 2021, Felix Fontein -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = """ +DOCUMENTATION = r""" name: collection_version author: Felix Fontein (@felixfontein) version_added: "4.0.0" short_description: Retrieves the version of an installed collection description: - - This lookup allows to query the version of an installed collection, and to determine whether a - collection is installed at all. - - By default it returns C(none) for non-existing collections and C(*) for collections without a - version number. The latter should only happen in development environments, or when installing - a collection from git which has no version in its C(galaxy.yml). This behavior can be adjusted - by providing other values with I(result_not_found) and I(result_no_version). + - This lookup allows to query the version of an installed collection, and to determine whether a collection is installed + at all. + - By default it returns V(none) for non-existing collections and V(*) for collections without a version number. The latter + should only happen in development environments, or when installing a collection from git which has no version in its C(galaxy.yml). + This behavior can be adjusted by providing other values with O(result_not_found) and O(result_no_version). options: _terms: description: - The collections to look for. - - For example C(community.general). + - For example V(community.general). type: list elements: str required: true result_not_found: description: - The value to return when the collection could not be found. - - By default, C(none) is returned. + - By default, V(none) is returned. type: string default: ~ result_no_version: description: - The value to return when the collection has no version number. - - This can happen for collections installed from git which do not have a version number - in C(galaxy.yml). - - By default, C(*) is returned. + - This can happen for collections installed from git which do not have a version number in C(galaxy.yml). + - By default, V(*) is returned. type: string default: '*' """ -EXAMPLES = """ +EXAMPLES = r""" - name: Check version of community.general ansible.builtin.debug: msg: "community.general version {{ lookup('community.general.collection_version', 'community.general') }}" """ -RETURN = """ - _raw: - description: - - The version number of the collections listed as input. - - If a collection can not be found, it will return the value provided in I(result_not_found). - By default, this is C(none). - - If a collection can be found, but the version not identified, it will return the value provided in - I(result_no_version). By default, this is C(*). This can happen for collections installed - from git which do not have a version number in C(galaxy.yml). - type: list - elements: str +RETURN = r""" +_raw: + description: + - The version number of the collections listed as input. + - If a collection can not be found, it returns the value provided in O(result_not_found). By default, this is V(none). + - If a collection can be found, but the version not identified, it returns the value provided in O(result_no_version). + By default, this is V(*). This can happen for collections installed from git which do not have a version number in V(galaxy.yml). + type: list + elements: str """ import json import os import re +from importlib import import_module import yaml from ansible.errors import AnsibleLookupError -from ansible.module_utils.compat.importlib import import_module from ansible.plugins.lookup import LookupBase @@ -97,15 +93,10 @@ def load_collection_meta(collection_pkg, no_version='*'): if os.path.exists(manifest_path): return load_collection_meta_manifest(manifest_path) - # Try to load galaxy.y(a)ml + # Try to load galaxy.yml galaxy_path = os.path.join(path, 'galaxy.yml') - galaxy_alt_path = os.path.join(path, 'galaxy.yaml') - # galaxy.yaml was only supported in ansible-base 2.10 and ansible-core 2.11. Support was removed - # in https://github.com/ansible/ansible/commit/595413d11346b6f26bb3d9df2d8e05f2747508a3 for - # ansible-core 2.12. - for path in (galaxy_path, galaxy_alt_path): - if os.path.exists(path): - return load_collection_meta_galaxy(path, no_version=no_version) + if os.path.exists(galaxy_path): + return load_collection_meta_galaxy(galaxy_path, no_version=no_version) return {} @@ -119,10 +110,10 @@ class LookupModule(LookupBase): for term in terms: if not FQCN_RE.match(term): - raise AnsibleLookupError('"{term}" is not a FQCN'.format(term=term)) + raise AnsibleLookupError(f'"{term}" is not a FQCN') try: - collection_pkg = import_module('ansible_collections.{fqcn}'.format(fqcn=term)) + collection_pkg = import_module(f'ansible_collections.{term}') except ImportError: # Collection not found result.append(not_found) @@ -131,7 +122,7 @@ class LookupModule(LookupBase): try: data = load_collection_meta(collection_pkg, no_version=no_version) except Exception as exc: - raise AnsibleLookupError('Error while loading metadata for {fqcn}: {error}'.format(fqcn=term, error=exc)) + raise AnsibleLookupError(f'Error while loading metadata for {term}: {exc}') result.append(data.get('version', no_version)) diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index 3ad03bfe40..c9cc3c6399 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -1,111 +1,117 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Steve Gargan -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +# Copyright (c) 2015, Steve Gargan +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: consul_kv - short_description: Fetch metadata from a Consul key value store. - description: - - Lookup metadata for a playbook from the key value store in a Consul cluster. - Values can be easily set in the kv store with simple rest commands - - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata) - requirements: - - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)' - options: - _raw: - description: List of key(s) to retrieve. - type: list - elements: string - recurse: - type: boolean - description: If true, will retrieve all the values that have the given key as prefix. - default: False - index: - description: - - If the key has a value with the specified index then this is returned allowing access to historical values. - datacenter: - description: - - Retrieve the key from a consul datacenter other than the default for the consul host. - token: - description: The acl token to allow access to restricted values. - host: - default: localhost - description: - - The target to connect to, must be a resolvable address. - Will be determined from C(ANSIBLE_CONSUL_URL) if that is set. - - "C(ANSIBLE_CONSUL_URL) should look like this: C(https://my.consul.server:8500)" - env: - - name: ANSIBLE_CONSUL_URL - ini: - - section: lookup_consul - key: host - port: - description: - - The port of the target host to connect to. - - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there. - default: 8500 - scheme: - default: http - description: - - Whether to use http or https. - - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there. - validate_certs: - default: True - description: Whether to verify the ssl connection or not. - env: - - name: ANSIBLE_CONSUL_VALIDATE_CERTS - ini: - - section: lookup_consul - key: validate_certs - client_cert: - description: The client cert to verify the ssl connection. - env: - - name: ANSIBLE_CONSUL_CLIENT_CERT - ini: - - section: lookup_consul - key: client_cert - url: - description: "The target to connect to, should look like this: C(https://my.consul.server:8500)." - type: str - version_added: 1.0.0 - env: - - name: ANSIBLE_CONSUL_URL - ini: - - section: lookup_consul - key: url -''' - -EXAMPLES = """ - - ansible.builtin.debug: - msg: 'key contains {{item}}' - with_community.general.consul_kv: - - 'key/to/retrieve' - - - name: Parameters can be provided after the key be more specific about what to retrieve - ansible.builtin.debug: - msg: 'key contains {{item}}' - with_community.general.consul_kv: - - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98' - - - name: retrieving a KV from a remote cluster on non default port - ansible.builtin.debug: - msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port='2000') }}" -""" - -RETURN = """ +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: consul_kv +short_description: Fetch metadata from a Consul key value store +description: + - Lookup metadata for a playbook from the key value store in a Consul cluster. Values can be easily set in the kv store + with simple rest commands. + - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata). +requirements: + - 'py-consul python library U(https://github.com/criteo/py-consul?tab=readme-ov-file#installation)' +options: _raw: + description: List of key(s) to retrieve. + type: list + elements: string + recurse: + type: boolean + description: If V(true), retrieves all the values that have the given key as prefix. + default: false + index: description: - - Value(s) stored in consul. - type: dict + - If the key has a value with the specified index then this is returned allowing access to historical values. + type: int + datacenter: + description: + - Retrieve the key from a consul datacenter other than the default for the consul host. + type: str + token: + description: The acl token to allow access to restricted values. + type: str + host: + default: localhost + type: str + description: + - The target to connect to, must be a resolvable address. + - It is determined from E(ANSIBLE_CONSUL_URL) if that is set. + ini: + - section: lookup_consul + key: host + port: + description: + - The port of the target host to connect to. + - If you use E(ANSIBLE_CONSUL_URL) this value is used from there. + type: int + default: 8500 + scheme: + default: http + type: str + description: + - Whether to use http or https. + - If you use E(ANSIBLE_CONSUL_URL) this value is used from there. + validate_certs: + default: true + description: Whether to verify the TLS connection or not. + type: bool + env: + - name: ANSIBLE_CONSUL_VALIDATE_CERTS + ini: + - section: lookup_consul + key: validate_certs + client_cert: + description: The client cert to verify the TLS connection. + type: str + env: + - name: ANSIBLE_CONSUL_CLIENT_CERT + ini: + - section: lookup_consul + key: client_cert + url: + description: + - The target to connect to. + - 'Should look like this: V(https://my.consul.server:8500).' + type: str + version_added: 1.0.0 + env: + - name: ANSIBLE_CONSUL_URL + ini: + - section: lookup_consul + key: url """ -import os -from ansible.module_utils.six.moves.urllib.parse import urlparse +EXAMPLES = r""" +- ansible.builtin.debug: + msg: 'key contains {{item}}' + with_community.general.consul_kv: + - 'key/to/retrieve' + +- name: Parameters can be provided after the key be more specific about what to retrieve + ansible.builtin.debug: + msg: 'key contains {{item}}' + with_community.general.consul_kv: + - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98' + +- name: retrieving a KV from a remote cluster on non default port + ansible.builtin.debug: + msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port=2000) }}" +""" + +RETURN = r""" +_raw: + description: + - Value(s) stored in consul. + type: dict +""" + +from urllib.parse import urlparse + from ansible.errors import AnsibleError, AnsibleAssertionError from ansible.plugins.lookup import LookupBase from ansible.module_utils.common.text.converters import to_text @@ -124,7 +130,7 @@ class LookupModule(LookupBase): if not HAS_CONSUL: raise AnsibleError( - 'python-consul is required for consul_kv lookup. see http://python-consul.readthedocs.org/en/latest/#installation') + 'py-consul is required for consul_kv lookup. see https://github.com/criteo/py-consul?tab=readme-ov-file#installation') # get options self.set_options(direct=kwargs) @@ -164,7 +170,7 @@ class LookupModule(LookupBase): values.append(to_text(results[1]['Value'])) except Exception as e: raise AnsibleError( - "Error locating '%s' in kv store. Error was %s" % (term, e)) + f"Error locating '{term}' in kv store. Error was {e}") return values @@ -185,7 +191,7 @@ class LookupModule(LookupBase): if param and len(param) > 0: name, value = param.split('=') if name not in paramvals: - raise AnsibleAssertionError("%s not a valid consul lookup parameter" % name) + raise AnsibleAssertionError(f"{name} not a valid consul lookup parameter") paramvals[name] = value except (ValueError, AssertionError) as e: raise AnsibleError(e) diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py index 143c66c112..01e6a1a8fe 100644 --- a/plugins/lookup/credstash.py +++ b/plugins/lookup/credstash.py @@ -1,50 +1,57 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Ensighten -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2015, Ensighten +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: credstash - short_description: retrieve secrets from Credstash on AWS - requirements: - - credstash (python library) - description: - - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash" - options: - _terms: - description: term or list of terms to lookup in the credit store - type: list - elements: string - required: true - table: - description: name of the credstash table to query - default: 'credential-store' - version: - description: Credstash version - region: - description: AWS region - profile_name: - description: AWS profile to use for authentication - env: - - name: AWS_PROFILE - aws_access_key_id: - description: AWS access key ID - env: - - name: AWS_ACCESS_KEY_ID - aws_secret_access_key: - description: AWS access key - env: - - name: AWS_SECRET_ACCESS_KEY - aws_session_token: - description: AWS session token - env: - - name: AWS_SESSION_TOKEN -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: credstash +short_description: Retrieve secrets from Credstash on AWS +requirements: + - credstash (python library) +description: + - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash." +options: + _terms: + description: Term or list of terms to lookup in the credit store. + type: list + elements: string + required: true + table: + description: Name of the credstash table to query. + type: str + default: 'credential-store' + version: + description: Credstash version. + type: str + default: '' + region: + description: AWS region. + type: str + profile_name: + description: AWS profile to use for authentication. + type: str + env: + - name: AWS_PROFILE + aws_access_key_id: + description: AWS access key ID. + type: str + env: + - name: AWS_ACCESS_KEY_ID + aws_secret_access_key: + description: AWS access key. + type: str + env: + - name: AWS_SECRET_ACCESS_KEY + aws_session_token: + description: AWS session token. + type: str + env: + - name: AWS_SESSION_TOKEN +""" -EXAMPLES = """ +EXAMPLES = r""" - name: first use credstash to store your secrets ansible.builtin.shell: credstash put my-github-password secure123 @@ -68,24 +75,22 @@ EXAMPLES = """ environment: production tasks: - - name: "Test credstash lookup plugin -- get the password with a context passed as a variable" - ansible.builtin.debug: - msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}" + - name: "Test credstash lookup plugin -- get the password with a context passed as a variable" + ansible.builtin.debug: + msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}" - - name: "Test credstash lookup plugin -- get the password with a context defined here" - ansible.builtin.debug: - msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}" + - name: "Test credstash lookup plugin -- get the password with a context defined here" + ansible.builtin.debug: + msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}" """ -RETURN = """ - _raw: - description: - - Value(s) stored in Credstash. - type: str +RETURN = r""" +_raw: + description: + - Value(s) stored in Credstash. + type: str """ -import os - from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase @@ -99,28 +104,39 @@ except ImportError: class LookupModule(LookupBase): - def run(self, terms, variables, **kwargs): - + def run(self, terms, variables=None, **kwargs): if not CREDSTASH_INSTALLED: raise AnsibleError('The credstash lookup plugin requires credstash to be installed.') + self.set_options(var_options=variables, direct=kwargs) + + version = self.get_option('version') + region = self.get_option('region') + table = self.get_option('table') + profile_name = self.get_option('profile_name') + aws_access_key_id = self.get_option('aws_access_key_id') + aws_secret_access_key = self.get_option('aws_secret_access_key') + aws_session_token = self.get_option('aws_session_token') + + context = { + k: v for k, v in kwargs.items() + if k not in ('version', 'region', 'table', 'profile_name', 'aws_access_key_id', 'aws_secret_access_key', 'aws_session_token') + } + + kwargs_pass = { + 'profile_name': profile_name, + 'aws_access_key_id': aws_access_key_id, + 'aws_secret_access_key': aws_secret_access_key, + 'aws_session_token': aws_session_token, + } + ret = [] for term in terms: try: - version = kwargs.pop('version', '') - region = kwargs.pop('region', None) - table = kwargs.pop('table', 'credential-store') - profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None)) - aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None)) - aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None)) - aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None)) - kwargs_pass = {'profile_name': profile_name, 'aws_access_key_id': aws_access_key_id, - 'aws_secret_access_key': aws_secret_access_key, 'aws_session_token': aws_session_token} - val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass) + ret.append(credstash.getSecret(term, version, region, table, context=context, **kwargs_pass)) except credstash.ItemNotFound: - raise AnsibleError('Key {0} not found'.format(term)) + raise AnsibleError(f'Key {term} not found') except Exception as e: - raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e)) - ret.append(val) + raise AnsibleError(f'Encountered exception while fetching {term}: {e}') return ret diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py index 663ec38808..955ba4a89a 100644 --- a/plugins/lookup/cyberarkpassword.py +++ b/plugins/lookup/cyberarkpassword.py @@ -1,62 +1,67 @@ -# -*- coding: utf-8 -*- -# (c) 2017, Edward Nunez -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2017, Edward Nunez +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: cyberarkpassword - short_description: get secrets from CyberArk AIM - requirements: - - CyberArk AIM tool installed +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: cyberarkpassword +short_description: Get secrets from CyberArk AIM +requirements: + - CyberArk AIM tool installed +description: + - Get secrets from CyberArk AIM. +options: + _command: + description: Cyberark CLI utility. + type: string + env: + - name: AIM_CLIPASSWORDSDK_CMD + default: '/opt/CARKaim/sdk/clipasswordsdk' + appid: + description: Defines the unique ID of the application that is issuing the password request. + type: string + required: true + query: + description: Describes the filter criteria for the password retrieval. + type: string + required: true + output: description: - - Get secrets from CyberArk AIM. - options : - _command: - description: Cyberark CLI utility. - env: - - name: AIM_CLIPASSWORDSDK_CMD - default: '/opt/CARKaim/sdk/clipasswordsdk' - appid: - description: Defines the unique ID of the application that is issuing the password request. - required: True - query: - description: Describes the filter criteria for the password retrieval. - required: True - output: - description: - - Specifies the desired output fields separated by commas. - - "They could be: Password, PassProps., PasswordChangeInProcess" - default: 'password' - _extra: - description: for extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide" - notes: - - For Ansible on Windows, please change the -parameters (-p, -d, and -o) to /parameters (/p, /d, and /o) and change the location of CLIPasswordSDK.exe. -''' - -EXAMPLES = """ - - name: passing options to the lookup - ansible.builtin.debug: - msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}' - vars: - cyquery: - appid: "app_ansible" - query: "safe=CyberArk_Passwords;folder=root;object=AdminPass" - output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess" - - - - name: used in a loop - ansible.builtin.debug: - msg: "{{item}}" - with_community.general.cyberarkpassword: - appid: 'app_ansible' - query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass' - output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess' + - Specifies the desired output fields separated by commas. + - 'They could be: Password, PassProps., PasswordChangeInProcess.' + type: string + default: 'password' + _extra: + description: For extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and + ASCP Implementation Guide". +notes: + - For Ansible on Windows, please change the -parameters (C(-p), C(-d), and C(-o)) to /parameters (C(/p), C(/d), and C(/o)) + and change the location of C(CLIPasswordSDK.exe). """ -RETURN = """ +EXAMPLES = r""" +- name: passing options to the lookup + ansible.builtin.debug: + msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}' + vars: + cyquery: + appid: "app_ansible" + query: "safe=CyberArk_Passwords;folder=root;object=AdminPass" + output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess" + + +- name: used in a loop + ansible.builtin.debug: + msg: "{{item}}" + with_community.general.cyberarkpassword: + appid: 'app_ansible' + query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass' + output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess' +""" + +RETURN = r""" _result: description: A list containing one dictionary. type: list @@ -64,12 +69,12 @@ _result: contains: password: description: - - The actual value stored + - The actual value stored. passprops: - description: properties assigned to the entry + description: Properties assigned to the entry. type: dictionary passwordchangeinprocess: - description: did the password change? + description: Did the password change? """ import os @@ -79,8 +84,7 @@ from subprocess import Popen from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase -from ansible.parsing.splitter import parse_kv -from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native from ansible.utils.display import Display display = Display() @@ -101,7 +105,7 @@ class CyberarkPassword: self.extra_parms = [] for key, value in kwargs.items(): self.extra_parms.append('-p') - self.extra_parms.append("%s=%s" % (key, value)) + self.extra_parms.append(f"{key}={value}") if self.appid is None: raise AnsibleError("CyberArk Error: No Application ID specified") @@ -126,8 +130,8 @@ class CyberarkPassword: all_parms = [ CLIPASSWORDSDK_CMD, 'GetPassword', - '-p', 'AppDescs.AppID=%s' % self.appid, - '-p', 'Query=%s' % self.query, + '-p', f'AppDescs.AppID={self.appid}', + '-p', f'Query={self.query}', '-o', self.output, '-d', self.b_delimiter] all_parms.extend(self.extra_parms) @@ -140,7 +144,7 @@ class CyberarkPassword: b_credential = to_bytes(tmp_output) if tmp_error: - raise AnsibleError("ERROR => %s " % (tmp_error)) + raise AnsibleError(f"ERROR => {tmp_error} ") if b_credential and b_credential.endswith(b'\n'): b_credential = b_credential[:-1] @@ -160,7 +164,7 @@ class CyberarkPassword: except subprocess.CalledProcessError as e: raise AnsibleError(e.output) except OSError as e: - raise AnsibleError("ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=(%s) => %s " % (to_text(e.errno), e.strerror)) + raise AnsibleError(f"ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=({e.errno}) => {e.strerror} ") return [result_dict] @@ -173,12 +177,11 @@ class LookupModule(LookupBase): """ def run(self, terms, variables=None, **kwargs): - - display.vvvv("%s" % terms) + display.vvvv(f"{terms}") if isinstance(terms, list): return_values = [] for term in terms: - display.vvvv("Term: %s" % term) + display.vvvv(f"Term: {term}") cyberark_conn = CyberarkPassword(**term) return_values.append(cyberark_conn.get()) return return_values diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py index 1fb75ece66..89502e9518 100644 --- a/plugins/lookup/dependent.py +++ b/plugins/lookup/dependent.py @@ -1,35 +1,33 @@ -# -*- coding: utf-8 -*- -# (c) 2015-2021, Felix Fontein -# (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2015-2021, Felix Fontein +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = """ +DOCUMENTATION = r""" name: dependent short_description: Composes a list with nested elements of other lists or dicts which can depend on previous loop variables author: Felix Fontein (@felixfontein) version_added: 3.1.0 description: - - "Takes the input lists and returns a list with elements that are lists, dictionaries, - or template expressions which evaluate to lists or dicts, composed of the elements of - the input evaluated lists and dictionaries." + - Takes the input lists and returns a list with elements that are lists, dictionaries, or template expressions which evaluate + to lists or dicts, composed of the elements of the input evaluated lists and dictionaries. options: - _raw: + _terms: description: - - A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary. - The name is the index that is used in the result object. The value is iterated over as described below. + - A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary. The name + is the index that is used in the result object. The value is iterated over as described below. - If the value is a list, it is simply iterated over. - - If the value is a dictionary, it is iterated over and returned as if they would be processed by the - R(ansible.builtin.dict2items filter,ansible_collections.ansible.builtin.dict2items_filter). - - If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen - elements with C(item.). The result must be a list or a dictionary. + - If the value is a dictionary, it is iterated over and returned as if they would be processed by the P(ansible.builtin.dict2items#filter) + filter. + - If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen elements with + C(item.). The result must be a list or a dictionary. type: list elements: dict required: true """ -EXAMPLES = """ +EXAMPLES = r""" - name: Install/remove public keys for active admin users ansible.posix.authorized_key: user: "{{ item.admin.key }}" @@ -75,9 +73,9 @@ EXAMPLES = """ loop_control: # Makes the output readable, so that it doesn't contain the whole subdictionaries and lists label: |- - {{ [item.zone.key, item.prefix.key, item.entry.key, - item.entry.value.ttl | default(3600), - item.entry.value.absent | default(False), item.entry.value.value] }} + {{ [item.zone.key, item.prefix.key, item.entry.key, + item.entry.value.ttl | default(3600), + item.entry.value.absent | default(False), item.entry.value.value] }} with_community.general.dependent: - zone: dns_setup - prefix: item.zone.value @@ -88,44 +86,55 @@ EXAMPLES = """ '': A: value: - - 1.2.3.4 + - 1.2.3.4 AAAA: value: - - "2a01:1:2:3::1" + - "2a01:1:2:3::1" 'test._domainkey': TXT: ttl: 300 value: - - '"k=rsa; t=s; p=MIGfMA..."' + - '"k=rsa; t=s; p=MIGfMA..."' example.org: 'www': A: value: - - 1.2.3.4 - - 5.6.7.8 + - 1.2.3.4 + - 5.6.7.8 """ -RETURN = """ - _list: - description: - - A list composed of dictionaries whose keys are the variable names from the input list. - type: list - elements: dict - sample: - - key1: a - key2: test - - key1: a - key2: foo - - key1: b - key2: bar +RETURN = r""" +_list: + description: + - A list composed of dictionaries whose keys are the variable names from the input list. + type: list + elements: dict + sample: + - key1: a + key2: test + - key1: a + key2: foo + - key1: b + key2: bar """ from ansible.errors import AnsibleLookupError -from ansible.module_utils.common._collections_compat import Mapping, Sequence -from ansible.module_utils.six import string_types +from collections.abc import Mapping, Sequence from ansible.plugins.lookup import LookupBase from ansible.template import Templar +try: + from ansible.template import trust_as_template as _trust_as_template + HAS_DATATAGGING = True +except ImportError: + HAS_DATATAGGING = False + + +def _make_safe(value): + if HAS_DATATAGGING and isinstance(value, str): + return _trust_as_template(value) + return value + class LookupModule(LookupBase): def __evaluate(self, expression, templar, variables): @@ -135,7 +144,11 @@ class LookupModule(LookupBase): ``variables`` are the variables to use. """ templar.available_variables = variables or {} - return templar.template("{0}{1}{2}".format("{{", expression, "}}"), cache=False) + quoted_expression = "{0}{1}{2}".format("{{", expression, "}}") + if hasattr(templar, 'evaluate_expression'): + # This is available since the Data Tagging PR has been merged + return templar.evaluate_expression(_make_safe(expression)) + return templar.template(quoted_expression) def __process(self, result, terms, index, current, templar, variables): """Fills ``result`` list with evaluated items. @@ -161,12 +174,11 @@ class LookupModule(LookupBase): values = self.__evaluate(expression, templar, variables=vars) except Exception as e: raise AnsibleLookupError( - 'Caught "{error}" while evaluating {key!r} with item == {item!r}'.format( - error=e, key=key, item=current)) + f'Caught "{e}" while evaluating {key!r} with item == {current!r}') if isinstance(values, Mapping): for idx, val in sorted(values.items()): - current[key] = dict([('key', idx), ('value', val)]) + current[key] = dict(key=idx, value=val) self.__process(result, terms, index + 1, current, templar, variables) elif isinstance(values, Sequence): for elt in values: @@ -174,37 +186,38 @@ class LookupModule(LookupBase): self.__process(result, terms, index + 1, current, templar, variables) else: raise AnsibleLookupError( - 'Did not obtain dictionary or list while evaluating {key!r} with item == {item!r}, but {type}'.format( - key=key, item=current, type=type(values))) + f'Did not obtain dictionary or list while evaluating {key!r} with item == {current!r}, but {type(values)}') def run(self, terms, variables=None, **kwargs): """Generate list.""" + self.set_options(var_options=variables, direct=kwargs) + result = [] if len(terms) > 0: - templar = Templar(loader=self._templar._loader) + if HAS_DATATAGGING: + templar = self._templar.copy_with_new_env(available_variables={}) + else: + templar = Templar(loader=self._templar._loader) data = [] vars_so_far = set() for index, term in enumerate(terms): if not isinstance(term, Mapping): raise AnsibleLookupError( - 'Parameter {index} must be a dictionary, got {type}'.format( - index=index, type=type(term))) + f'Parameter {index} must be a dictionary, got {type(term)}') if len(term) != 1: raise AnsibleLookupError( - 'Parameter {index} must be a one-element dictionary, got {count} elements'.format( - index=index, count=len(term))) + f'Parameter {index} must be a one-element dictionary, got {len(term)} elements') k, v = list(term.items())[0] if k in vars_so_far: raise AnsibleLookupError( - 'The variable {key!r} appears more than once'.format(key=k)) + f'The variable {k!r} appears more than once') vars_so_far.add(k) - if isinstance(v, string_types): + if isinstance(v, str): data.append((k, v, None)) elif isinstance(v, (Sequence, Mapping)): data.append((k, None, v)) else: raise AnsibleLookupError( - 'Parameter {key!r} (index {index}) must have a value of type string, dictionary or list, got type {type}'.format( - index=index, key=k, type=type(v))) + f'Parameter {k!r} (index {index}) must have a value of type string, dictionary or list, got type {type(v)}') self.__process(result, data, 0, {}, templar, variables) return result diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index 291bac5e45..b36f02d7d4 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -1,63 +1,123 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Jan-Piet Mens -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2015, Jan-Piet Mens +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: dig - author: Jan-Piet Mens (@jpmens) - short_description: query DNS using the dnspython library - requirements: - - dnspython (python library, http://www.dnspython.org/) +DOCUMENTATION = r""" +name: dig +author: Jan-Piet Mens (@jpmens) +short_description: Query DNS using the dnspython library +requirements: + - dnspython (python library, http://www.dnspython.org/) +description: + - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain + name). It is possible to lookup any DNS record in this manner. + - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name. + It is also possible to explicitly specify the DNS server(s) to use for lookups. + - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN. + - In addition to (default) A record, it is also possible to specify a different record type that should be queried. This + can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to + the FQDN being queried. + - If multiple values are associated with the requested record, the results are returned as a comma-separated list. In + such cases you may want to pass option C(wantlist=true) to the lookup call, or alternatively use C(query) instead of C(lookup), + which results in the record values being returned as a list over which you can iterate later on. + - By default, the lookup relies on system-wide configured DNS servers for performing the query. It is also possible to + explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation. This needs to + be passed-in as an additional parameter to the lookup. +options: + _terms: + description: Domain(s) to query. + type: list + elements: str + qtype: description: - - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain name). - It is possible to lookup any DNS record in this manner. - - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name. - It is also possible to explicitly specify the DNS server(s) to use for lookups. - - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN - - In addition to (default) A record, it is also possible to specify a different record type that should be queried. - This can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to the FQDN being queried. - - If multiple values are associated with the requested record, the results will be returned as a comma-separated list. - In such cases you may want to pass option wantlist=True to the plugin, which will result in the record values being returned as a list - over which you can iterate later on. - - By default, the lookup will rely on system-wide configured DNS servers for performing the query. - It is also possible to explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation. - This needs to be passed-in as an additional parameter to the lookup - options: - _terms: - description: Domain(s) to query. - qtype: - description: - - Record type to query. - - C(DLV) is deprecated and will be removed in community.general 6.0.0. - default: 'A' - choices: [A, ALL, AAAA, CNAME, DNAME, DLV, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT] - flat: - description: If 0 each record is returned as a dictionary, otherwise a string. - default: 1 - retry_servfail: - description: Retry a nameserver if it returns SERVFAIL. - default: false - type: bool - version_added: 3.6.0 - notes: - - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary. - - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary. - - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly. - Syntax for specifying the record type is shown in the examples below. - - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake. -''' + - Record type to query. + - V(DLV) has been removed in community.general 6.0.0. + - V(CAA) has been added in community.general 6.3.0. + type: str + default: 'A' + choices: + - A + - ALL + - AAAA + - CAA + - CNAME + - DNAME + - DNSKEY + - DS + - HINFO + - LOC + - MX + - NAPTR + - NS + - NSEC3PARAM + - PTR + - RP + - RRSIG + - SOA + - SPF + - SRV + - SSHFP + - TLSA + - TXT + flat: + description: If 0 each record is returned as a dictionary, otherwise a string. + type: int + default: 1 + retry_servfail: + description: Retry a nameserver if it returns SERVFAIL. + default: false + type: bool + version_added: 3.6.0 + fail_on_error: + description: + - Abort execution on lookup errors. + - The default for this option is likely to change to V(true) in the future. The current default, V(false), is used for + backwards compatibility, and results in empty strings or the string V(NXDOMAIN) in the result in case of errors. + default: false + type: bool + version_added: 5.4.0 + real_empty: + description: + - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN). + - The default for this option is likely to change to V(true) in the future. + - This option is forced to V(true) if multiple domains to be queried are specified. + default: false + type: bool + version_added: 6.0.0 + class: + description: + - Class. + type: str + default: 'IN' + tcp: + description: Use TCP to lookup DNS records. + default: false + type: bool + version_added: 7.5.0 + port: + description: Use port as target port when looking up DNS records. + default: 53 + type: int + version_added: 9.5.0 +notes: + - V(ALL) is not a record in itself, merely the listed fields are available for any record results you retrieve in the form + of a dictionary. + - While the plugin supports anything which C(dnspython) supports out of the box, only a subset can be converted into a dictionary. + - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly. Syntax for specifying + the record type is shown in the examples below. + - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake. +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Simple A record (IPV4 address) lookup for example.com ansible.builtin.debug: msg: "{{ lookup('community.general.dig', 'example.com.')}}" - name: "The TXT record for example.org." ansible.builtin.debug: - msg: "{{ lookup('community.general.dig', 'example.org.', 'qtype=TXT') }}" + msg: "{{ lookup('community.general.dig', 'example.org.', qtype='TXT') }}" - name: "The TXT record for example.org, alternative syntax." ansible.builtin.debug: @@ -66,105 +126,123 @@ EXAMPLES = """ - name: use in a loop ansible.builtin.debug: msg: "MX record for gmail.com {{ item }}" - with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=True) }}" + with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=true) }}" + +- name: Lookup multiple names at once + ansible.builtin.debug: + msg: "A record found {{ item }}" + loop: "{{ query('community.general.dig', 'example.org.', 'example.com.', 'gmail.com.') }}" + +- name: Lookup multiple names at once (from list variable) + ansible.builtin.debug: + msg: "A record found {{ item }}" + loop: "{{ query('community.general.dig', *hosts) }}" + vars: + hosts: + - example.org. + - example.com. + - gmail.com. - ansible.builtin.debug: msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '192.0.2.5/PTR') }}" - ansible.builtin.debug: msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa./PTR') }}" - ansible.builtin.debug: - msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa.', 'qtype=PTR') }}" + msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa.', qtype='PTR') }}" - ansible.builtin.debug: msg: "Querying 198.51.100.23 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@198.51.100.23') }}" - ansible.builtin.debug: msg: "XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}" - with_items: "{{ lookup('community.general.dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}" + with_items: "{{ lookup('community.general.dig', '_xmpp-server._tcp.gmail.com./SRV', flat=0, wantlist=true) }}" - name: Retry nameservers that return SERVFAIL ansible.builtin.debug: - msg: "{{ lookup('community.general.dig', 'example.org./A', 'retry_servfail=True') }}" + msg: "{{ lookup('community.general.dig', 'example.org./A', retry_servfail=true) }}" """ -RETURN = """ - _list: - description: - - List of composed strings or dictionaries with key and value - If a dictionary, fields shows the keys returned depending on query type - type: list - elements: raw - contains: - ALL: - description: - - owner, ttl, type - A: - description: - - address - AAAA: - description: - - address - CNAME: - description: - - target - DNAME: - description: - - target - DLV: - description: - - algorithm, digest_type, key_tag, digest - DNSKEY: - description: - - flags, algorithm, protocol, key - DS: - description: - - algorithm, digest_type, key_tag, digest - HINFO: - description: - - cpu, os - LOC: - description: - - latitude, longitude, altitude, size, horizontal_precision, vertical_precision - MX: - description: - - preference, exchange - NAPTR: - description: - - order, preference, flags, service, regexp, replacement - NS: - description: - - target - NSEC3PARAM: - description: - - algorithm, flags, iterations, salt - PTR: - description: - - target - RP: - description: - - mbox, txt - SOA: - description: - - mname, rname, serial, refresh, retry, expire, minimum - SPF: - description: - - strings - SRV: - description: - - priority, weight, port, target - SSHFP: - description: - - algorithm, fp_type, fingerprint - TLSA: - description: - - usage, selector, mtype, cert - TXT: - description: - - strings +RETURN = r""" +_list: + description: + - List of composed strings or of dictionaries, with fields depending + on query type. + type: list + elements: raw + contains: + ALL: + description: + - C(owner), C(ttl), C(type). + A: + description: + - C(address). + AAAA: + description: + - C(address). + CAA: + description: + - C(flags). + - C(tag). + - C(value). + version_added: 6.3.0 + CNAME: + description: + - C(target). + DNAME: + description: + - C(target). + DNSKEY: + description: + - C(flags), C(algorithm), C(protocol), C(key). + DS: + description: + - C(algorithm), C(digest_type), C(key_tag), C(digest). + HINFO: + description: + - C(cpu), C(os). + LOC: + description: + - C(latitude), C(longitude), C(altitude), C(size), C(horizontal_precision), C(vertical_precision). + MX: + description: + - C(preference), C(exchange). + NAPTR: + description: + - C(order), C(preference), C(flags), C(service), C(regexp), C(replacement). + NS: + description: + - C(target). + NSEC3PARAM: + description: + - C(algorithm), C(flags), C(iterations), C(salt). + PTR: + description: + - C(target). + RP: + description: + - C(mbox), C(txt). + SOA: + description: + - C(mname), C(rname), C(serial), C(refresh), C(retry), C(expire), C(minimum). + SPF: + description: + - C(strings). + SRV: + description: + - C(priority), C(weight), C(port), C(target). + SSHFP: + description: + - C(algorithm), C(fp_type), C(fingerprint). + TLSA: + description: + - C(usage), C(selector), C(mtype), C(cert). + TXT: + description: + - C(strings). """ from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase -from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.parsing.convert_bool import boolean from ansible.utils.display import Display import socket @@ -174,7 +252,7 @@ try: import dns.resolver import dns.reversename import dns.rdataclass - from dns.rdatatype import (A, AAAA, CNAME, DLV, DNAME, DNSKEY, DS, HINFO, LOC, + from dns.rdatatype import (A, AAAA, CAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, SOA, SPF, SRV, SSHFP, TLSA, TXT) HAVE_DNS = True except ImportError: @@ -194,9 +272,9 @@ def make_rdata_dict(rdata): supported_types = { A: ['address'], AAAA: ['address'], + CAA: ['flags', 'tag', 'value'], CNAME: ['target'], DNAME: ['target'], - DLV: ['algorithm', 'digest_type', 'key_tag', 'digest'], DNSKEY: ['flags', 'algorithm', 'protocol', 'key'], DS: ['algorithm', 'digest_type', 'key_tag', 'digest'], HINFO: ['cpu', 'os'], @@ -207,7 +285,7 @@ def make_rdata_dict(rdata): NSEC3PARAM: ['algorithm', 'flags', 'iterations', 'salt'], PTR: ['target'], RP: ['mbox', 'txt'], - # RRSIG: ['algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'signature'], + # RRSIG: ['type_covered', 'algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'key_tag', 'signer', 'signature'], SOA: ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'], SPF: ['strings'], SRV: ['priority', 'weight', 'port', 'target'], @@ -226,10 +304,10 @@ def make_rdata_dict(rdata): if isinstance(val, dns.name.Name): val = dns.name.Name.to_text(val) - if rdata.rdtype == DLV and f == 'digest': - val = dns.rdata._hexify(rdata.digest).replace(' ', '') if rdata.rdtype == DS and f == 'digest': val = dns.rdata._hexify(rdata.digest).replace(' ', '') + if rdata.rdtype == DNSKEY and f == 'algorithm': + val = int(val) if rdata.rdtype == DNSKEY and f == 'key': val = dns.rdata._base64ify(rdata.key).replace(' ', '') if rdata.rdtype == NSEC3PARAM and f == 'salt': @@ -267,25 +345,34 @@ class LookupModule(LookupBase): ... flat=0 # returns a dict; default is 1 == string ''' - if HAVE_DNS is False: raise AnsibleError("The dig lookup requires the python 'dnspython' library and it is not installed") + self.set_options(var_options=variables, direct=kwargs) + # Create Resolver object so that we can set NS if necessary myres = dns.resolver.Resolver(configure=True) edns_size = 4096 myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size) - domain = None - qtype = 'A' - flat = True - rdclass = dns.rdataclass.from_text('IN') + domains = [] + nameservers = [] + qtype = self.get_option('qtype') + flat = self.get_option('flat') + fail_on_error = self.get_option('fail_on_error') + real_empty = self.get_option('real_empty') + tcp = self.get_option('tcp') + port = self.get_option('port') + try: + rdclass = dns.rdataclass.from_text(self.get_option('class')) + except Exception as e: + raise AnsibleError(f"dns lookup illegal CLASS: {e}") + myres.retry_servfail = self.get_option('retry_servfail') for t in terms: if t.startswith('@'): # e.g. "@10.0.1.2,192.0.2.1" is ok. nsset = t[1:].split(',') for ns in nsset: - nameservers = [] # Check if we have a valid IP address. If so, use that, otherwise # try to resolve name to address using system's resolver. If that # fails we bail out. @@ -297,12 +384,11 @@ class LookupModule(LookupBase): nsaddr = dns.resolver.query(ns)[0].address nameservers.append(nsaddr) except Exception as e: - raise AnsibleError("dns lookup NS: %s" % to_native(e)) - myres.nameservers = nameservers + raise AnsibleError(f"dns lookup NS: {e}") continue if '=' in t: try: - opt, arg = t.split('=') + opt, arg = t.split('=', 1) except Exception: pass @@ -314,66 +400,86 @@ class LookupModule(LookupBase): try: rdclass = dns.rdataclass.from_text(arg) except Exception as e: - raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e)) + raise AnsibleError(f"dns lookup illegal CLASS: {e}") elif opt == 'retry_servfail': - myres.retry_servfail = bool(arg) + myres.retry_servfail = boolean(arg) + elif opt == 'fail_on_error': + fail_on_error = boolean(arg) + elif opt == 'real_empty': + real_empty = boolean(arg) + elif opt == 'tcp': + tcp = boolean(arg) continue if '/' in t: try: domain, qtype = t.split('/') + domains.append(domain) except Exception: - domain = t + domains.append(t) else: - domain = t + domains.append(t) - # print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass) + # print "--- domain = {domain} qtype={qtype} rdclass={rdclass}" + + if port: + myres.port = port + if len(nameservers) > 0: + myres.nameservers = nameservers + + if qtype.upper() == 'PTR': + reversed_domains = [] + for domain in domains: + try: + n = dns.reversename.from_address(domain) + reversed_domains.append(n.to_text()) + except dns.exception.SyntaxError: + pass + except Exception as e: + raise AnsibleError(f"dns.reversename unhandled exception {e}") + domains = reversed_domains + + if len(domains) > 1: + real_empty = True ret = [] - if qtype.upper() == 'DLV': - display.deprecated('The DLV record type has been decommissioned in 2017 and support for' - ' it will be removed from community.general 6.0.0', - version='6.0.0', collection_name='community.general') - - if qtype.upper() == 'PTR': + for domain in domains: try: - n = dns.reversename.from_address(domain) - domain = n.to_text() - except dns.exception.SyntaxError: - pass - except Exception as e: - raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e)) + answers = myres.query(domain, qtype, rdclass=rdclass, tcp=tcp) + for rdata in answers: + s = rdata.to_text() + if qtype.upper() == 'TXT': + s = s[1:-1] # Strip outside quotes on TXT rdata - try: - answers = myres.query(domain, qtype, rdclass=rdclass) - for rdata in answers: - s = rdata.to_text() - if qtype.upper() == 'TXT': - s = s[1:-1] # Strip outside quotes on TXT rdata + if flat: + ret.append(s) + else: + try: + rd = make_rdata_dict(rdata) + rd['owner'] = answers.canonical_name.to_text() + rd['type'] = dns.rdatatype.to_text(rdata.rdtype) + rd['ttl'] = answers.rrset.ttl + rd['class'] = dns.rdataclass.to_text(rdata.rdclass) - if flat: - ret.append(s) - else: - try: - rd = make_rdata_dict(rdata) - rd['owner'] = answers.canonical_name.to_text() - rd['type'] = dns.rdatatype.to_text(rdata.rdtype) - rd['ttl'] = answers.rrset.ttl - rd['class'] = dns.rdataclass.to_text(rdata.rdclass) + ret.append(rd) + except Exception as err: + if fail_on_error: + raise AnsibleError(f"Lookup failed: {err}") + ret.append(str(err)) - ret.append(rd) - except Exception as e: - ret.append(str(e)) - - except dns.resolver.NXDOMAIN: - ret.append('NXDOMAIN') - except dns.resolver.NoAnswer: - ret.append("") - except dns.resolver.Timeout: - ret.append('') - except dns.exception.DNSException as e: - raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e)) + except dns.resolver.NXDOMAIN as err: + if fail_on_error: + raise AnsibleError(f"Lookup failed: {err}") + if not real_empty: + ret.append('NXDOMAIN') + except (dns.resolver.NoAnswer, dns.resolver.Timeout, dns.resolver.NoNameservers) as err: + if fail_on_error: + raise AnsibleError(f"Lookup failed: {err}") + if not real_empty: + ret.append("") + except dns.exception.DNSException as err: + raise AnsibleError(f"dns.resolver unhandled exception {err}") return ret diff --git a/plugins/lookup/dnstxt.py b/plugins/lookup/dnstxt.py index e724f8c8f7..d83f08bb09 100644 --- a/plugins/lookup/dnstxt.py +++ b/plugins/lookup/dnstxt.py @@ -1,27 +1,33 @@ -# -*- coding: utf-8 -*- -# (c) 2012, Jan-Piet Mens -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2012, Jan-Piet Mens +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: dnstxt - author: Jan-Piet Mens (@jpmens) - short_description: query a domain(s)'s DNS txt fields - requirements: - - dns/dns.resolver (python library) +DOCUMENTATION = r""" +name: dnstxt +author: Jan-Piet Mens (@jpmens) +short_description: Query a domain(s)'s DNS txt fields +requirements: + - dns/dns.resolver (python library) +description: + - Uses a python library to return the DNS TXT record for a domain. +options: + _terms: + description: Domain or list of domains to query TXT records from. + required: true + type: list + elements: string + real_empty: description: - - Uses a python library to return the DNS TXT record for a domain. - options: - _terms: - description: domain or list of domains to query TXT records from - required: True - type: list - elements: string -''' + - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN). + - The default for this option is likely to change to V(true) in the future. + default: false + type: bool + version_added: 6.0.0 +""" -EXAMPLES = """ +EXAMPLES = r""" - name: show txt entry ansible.builtin.debug: msg: "{{lookup('community.general.dnstxt', ['test.example.com'])}}" @@ -40,11 +46,11 @@ EXAMPLES = """ with_community.general.dnstxt: "{{lookup('community.general.dnstxt', ['test.example.com']).split(',')}}" """ -RETURN = """ - _list: - description: - - values returned by the DNS TXT record. - type: list +RETURN = r""" +_list: + description: + - Values returned by the DNS TXT record. + type: list """ HAVE_DNS = False @@ -56,7 +62,6 @@ except ImportError: pass from ansible.errors import AnsibleError -from ansible.module_utils.common.text.converters import to_native from ansible.plugins.lookup import LookupBase # ============================================================== @@ -70,10 +75,13 @@ from ansible.plugins.lookup import LookupBase class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) if HAVE_DNS is False: raise AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed") + real_empty = self.get_option('real_empty') + ret = [] for term in terms: domain = term.split()[0] @@ -85,13 +93,19 @@ class LookupModule(LookupBase): string.append(s[1:-1]) # Strip outside quotes on TXT rdata except dns.resolver.NXDOMAIN: + if real_empty: + continue string = 'NXDOMAIN' except dns.resolver.Timeout: + if real_empty: + continue string = '' except dns.resolver.NoAnswer: + if real_empty: + continue string = '' except DNSException as e: - raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e)) + raise AnsibleError(f"dns.resolver unhandled exception {e}") ret.append(''.join(string)) diff --git a/plugins/lookup/dsv.py b/plugins/lookup/dsv.py index 1cd9041a2e..594dd40f4e 100644 --- a/plugins/lookup/dsv.py +++ b/plugins/lookup/dsv.py @@ -1,9 +1,8 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Adam Migus -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function +# Copyright (c) 2020, Adam Migus +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" name: dsv @@ -11,76 +10,78 @@ author: Adam Migus (@amigus) short_description: Get secrets from Thycotic DevOps Secrets Vault version_added: 1.0.0 description: - - Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a - DSV I(tenant) using a I(client_id) and I(client_secret). + - Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a DSV O(tenant) using a O(client_id) and O(client_secret). requirements: - - python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/ + - python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/ options: - _terms: - description: The path to the secret, e.g. C(/staging/servers/web1). - required: true - tenant: - description: The first format parameter in the default I(url_template). - env: - - name: DSV_TENANT - ini: - - section: dsv_lookup - key: tenant - required: true - tld: - default: com - description: The top-level domain of the tenant; the second format - parameter in the default I(url_template). - env: - - name: DSV_TLD - ini: - - section: dsv_lookup - key: tld - required: false - client_id: - description: The client_id with which to request the Access Grant. - env: - - name: DSV_CLIENT_ID - ini: - - section: dsv_lookup - key: client_id - required: true - client_secret: - description: The client secret associated with the specific I(client_id). - env: - - name: DSV_CLIENT_SECRET - ini: - - section: dsv_lookup - key: client_secret - required: true - url_template: - default: https://{}.secretsvaultcloud.{}/v1 - description: The path to prepend to the base URL to form a valid REST - API request. - env: - - name: DSV_URL_TEMPLATE - ini: - - section: dsv_lookup - key: url_template - required: false + _terms: + description: The path to the secret, for example V(/staging/servers/web1). + required: true + tenant: + description: The first format parameter in the default O(url_template). + type: string + env: + - name: DSV_TENANT + ini: + - section: dsv_lookup + key: tenant + required: true + tld: + default: com + description: The top-level domain of the tenant; the second format parameter in the default O(url_template). + type: string + env: + - name: DSV_TLD + ini: + - section: dsv_lookup + key: tld + required: false + client_id: + description: The client_id with which to request the Access Grant. + type: string + env: + - name: DSV_CLIENT_ID + ini: + - section: dsv_lookup + key: client_id + required: true + client_secret: + description: The client secret associated with the specific O(client_id). + type: string + env: + - name: DSV_CLIENT_SECRET + ini: + - section: dsv_lookup + key: client_secret + required: true + url_template: + default: https://{}.secretsvaultcloud.{}/v1 + description: The path to prepend to the base URL to form a valid REST API request. + type: string + env: + - name: DSV_URL_TEMPLATE + ini: + - section: dsv_lookup + key: url_template + required: false """ RETURN = r""" _list: - description: - - One or more JSON responses to C(GET /secrets/{path}). - - See U(https://dsv.thycotic.com/api/index.html#operation/getSecret). - type: list - elements: dict + description: + - One or more JSON responses to C(GET /secrets/{path}). + - See U(https://dsv.thycotic.com/api/index.html#operation/getSecret). + type: list + elements: dict """ EXAMPLES = r""" - hosts: localhost vars: - secret: "{{ lookup('community.general.dsv', '/test/secret') }}" + secret: "{{ lookup('community.general.dsv', '/test/secret') }}" tasks: - - ansible.builtin.debug: - msg: 'the password is {{ secret["data"]["password"] }}' + - ansible.builtin.debug: + msg: 'the password is {{ secret["data"]["password"] }}' """ from ansible.errors import AnsibleError, AnsibleOptionsError @@ -122,23 +123,24 @@ class LookupModule(LookupBase): "tenant": self.get_option("tenant"), "client_id": self.get_option("client_id"), "client_secret": self.get_option("client_secret"), + "tld": self.get_option("tld"), "url_template": self.get_option("url_template"), } ) result = [] for term in terms: - display.debug("dsv_lookup term: %s" % term) + display.debug(f"dsv_lookup term: {term}") try: path = term.lstrip("[/:]") if path == "": - raise AnsibleOptionsError("Invalid secret path: %s" % term) + raise AnsibleOptionsError(f"Invalid secret path: {term}") - display.vvv(u"DevOps Secrets Vault GET /secrets/%s" % path) + display.vvv(f"DevOps Secrets Vault GET /secrets/{path}") result.append(vault.get_secret_json(path)) except SecretsVaultError as error: raise AnsibleError( - "DevOps Secrets Vault lookup failure: %s" % error.message + f"DevOps Secrets Vault lookup failure: {error.message}" ) return result diff --git a/plugins/lookup/etcd.py b/plugins/lookup/etcd.py index 0c81d0215b..65a9d23d2f 100644 --- a/plugins/lookup/etcd.py +++ b/plugins/lookup/etcd.py @@ -1,59 +1,51 @@ -# -*- coding: utf-8 -*- -# (c) 2013, Jan-Piet Mens +# Copyright (c) 2013, Jan-Piet Mens # (m) 2016, Mihai Moldovanu # (m) 2017, Juan Manuel Parrilla -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -DOCUMENTATION = ''' - author: - - Jan-Piet Mens (@jpmens) - name: etcd - short_description: get info from an etcd server +from __future__ import annotations + +DOCUMENTATION = r""" +author: + - Jan-Piet Mens (@jpmens) +name: etcd +short_description: Get info from an etcd server +description: + - Retrieves data from an etcd server. +options: + _terms: description: - - Retrieves data from an etcd server - options: - _terms: - description: - - the list of keys to lookup on the etcd server - type: list - elements: string - required: True - url: - description: - - Environment variable with the url for the etcd server - default: 'http://127.0.0.1:4001' - env: - - name: ANSIBLE_ETCD_URL - version: - description: - - Environment variable with the etcd protocol version - default: 'v1' - env: - - name: ANSIBLE_ETCD_VERSION - validate_certs: - description: - - toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs. - default: True - type: boolean -''' + - The list of keys to lookup on the etcd server. + type: list + elements: string + required: true + url: + description: + - Environment variable with the URL for the etcd server. + type: string + default: 'http://127.0.0.1:4001' + env: + - name: ANSIBLE_ETCD_URL + version: + description: + - Environment variable with the etcd protocol version. + type: string + default: 'v1' + env: + - name: ANSIBLE_ETCD_VERSION + validate_certs: + description: + - Toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs. + default: true + type: boolean +seealso: + - module: community.general.etcd3 + - plugin: community.general.etcd3 + plugin_type: lookup +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "a value from a locally running etcd" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd', 'foo/bar') }}" @@ -62,18 +54,18 @@ EXAMPLES = ''' ansible.builtin.debug: msg: "{{ lookup('community.general.etcd', 'foo', 'bar', 'baz') }}" -- name: "since Ansible 2.5 you can set server options inline" +- name: "you can set server options inline" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd', 'foo', version='v2', url='http://192.168.0.27:4001') }}" -''' +""" -RETURN = ''' - _raw: - description: - - list of values associated with input keys - type: list - elements: string -''' +RETURN = r""" +_raw: + description: + - List of values associated with input keys. + type: list + elements: string +""" import json @@ -110,7 +102,7 @@ class Etcd: def __init__(self, url, version, validate_certs): self.url = url self.version = version - self.baseurl = '%s/%s/keys' % (self.url, self.version) + self.baseurl = f'{self.url}/{self.version}/keys' self.validate_certs = validate_certs def _parse_node(self, node): @@ -131,7 +123,7 @@ class Etcd: return path def get(self, key): - url = "%s/%s?recursive=true" % (self.baseurl, key) + url = f"{self.baseurl}/{key}?recursive=true" data = None value = {} try: diff --git a/plugins/lookup/etcd3.py b/plugins/lookup/etcd3.py index a34fae7bf3..0312f17127 100644 --- a/plugins/lookup/etcd3.py +++ b/plugins/lookup/etcd3.py @@ -1,106 +1,105 @@ -# -*- coding: utf-8 -*- # -# (c) 2020, SCC France, Eric Belhomme -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, SCC France, Eric Belhomme +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: - - Eric Belhomme (@eric-belhomme) - version_added: '0.2.0' - name: etcd3 - short_description: Get key values from etcd3 server +DOCUMENTATION = r""" +author: + - Eric Belhomme (@eric-belhomme) +version_added: '0.2.0' +name: etcd3 +short_description: Get key values from etcd3 server +description: + - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API. + - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some E(ETCDCTL_*) environment + variables. + - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview. +options: + _terms: description: - - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API. - - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some C(ETCDCTL_*) environment variables. - - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview. + - The list of keys (or key prefixes) to look up on the etcd3 server. + type: list + elements: str + required: true + prefix: + description: + - Look for key or prefix key. + type: bool + default: false + endpoints: + description: + - Counterpart of E(ETCDCTL_ENDPOINTS) environment variable. Specify the etcd3 connection with an URL form, for example + V(https://hostname:2379), or V(:) form. + - The V(host) part is overwritten by O(host) option, if defined. + - The V(port) part is overwritten by O(port) option, if defined. + env: + - name: ETCDCTL_ENDPOINTS + default: '127.0.0.1:2379' + type: str + host: + description: + - Etcd3 listening client host. + - Takes precedence over O(endpoints). + type: str + port: + description: + - Etcd3 listening client port. + - Takes precedence over O(endpoints). + type: int + ca_cert: + description: + - Etcd3 CA authority. + env: + - name: ETCDCTL_CACERT + type: str + cert_cert: + description: + - Etcd3 client certificate. + env: + - name: ETCDCTL_CERT + type: str + cert_key: + description: + - Etcd3 client private key. + env: + - name: ETCDCTL_KEY + type: str + timeout: + description: + - Client timeout. + default: 60 + env: + - name: ETCDCTL_DIAL_TIMEOUT + type: int + user: + description: + - Authenticated user name. + env: + - name: ETCDCTL_USER + type: str + password: + description: + - Authenticated user password. + env: + - name: ETCDCTL_PASSWORD + type: str - options: - _terms: - description: - - The list of keys (or key prefixes) to look up on the etcd3 server. - type: list - elements: str - required: True - prefix: - description: - - Look for key or prefix key. - type: bool - default: False - endpoints: - description: - - Counterpart of C(ETCDCTL_ENDPOINTS) environment variable. - Specify the etcd3 connection with and URL form eg. C(https://hostname:2379) or C(:) form. - - The C(host) part is overwritten by I(host) option, if defined. - - The C(port) part is overwritten by I(port) option, if defined. - env: - - name: ETCDCTL_ENDPOINTS - default: '127.0.0.1:2379' - type: str - host: - description: - - etcd3 listening client host. - - Takes precedence over I(endpoints). - type: str - port: - description: - - etcd3 listening client port. - - Takes precedence over I(endpoints). - type: int - ca_cert: - description: - - etcd3 CA authority. - env: - - name: ETCDCTL_CACERT - type: str - cert_cert: - description: - - etcd3 client certificate. - env: - - name: ETCDCTL_CERT - type: str - cert_key: - description: - - etcd3 client private key. - env: - - name: ETCDCTL_KEY - type: str - timeout: - description: - - Client timeout. - default: 60 - env: - - name: ETCDCTL_DIAL_TIMEOUT - type: int - user: - description: - - Authenticated user name. - env: - - name: ETCDCTL_USER - type: str - password: - description: - - Authenticated user password. - env: - - name: ETCDCTL_PASSWORD - type: str +notes: + - O(host) and O(port) options take precedence over (endpoints) option. + - The recommended way to connect to etcd3 server is using E(ETCDCTL_ENDPOINT) environment variable and keep O(endpoints), + O(host), and O(port) unused. +seealso: + - module: community.general.etcd3 + - plugin: community.general.etcd + plugin_type: lookup - notes: - - I(host) and I(port) options take precedence over (endpoints) option. - - The recommended way to connect to etcd3 server is using C(ETCDCTL_ENDPOINT) - environment variable and keep I(endpoints), I(host), and I(port) unused. - seealso: - - module: community.general.etcd3 - - ref: ansible_collections.community.general.etcd_lookup - description: The etcd v2 lookup. +requirements: + - "etcd3 >= 0.10" +""" - requirements: - - "etcd3 >= 0.10" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: "a value from a locally running etcd" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd3', 'foo/bar') }}" @@ -116,31 +115,30 @@ EXAMPLES = ''' - name: "connect to etcd3 with a client certificate" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd3', 'foo/bar', cert_cert='/etc/ssl/etcd/client.pem', cert_key='/etc/ssl/etcd/client.key') }}" -''' +""" -RETURN = ''' - _raw: - description: - - List of keys and associated values. - type: list - elements: dict - contains: - key: - description: The element's key. - type: str - value: - description: The element's value. - type: str -''' +RETURN = r""" +_raw: + description: + - List of keys and associated values. + type: list + elements: dict + contains: + key: + description: The element's key. + type: str + value: + description: The element's value. + type: str +""" import re -from ansible.plugins.lookup import LookupBase -from ansible.utils.display import Display +from ansible.errors import AnsibleLookupError from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.common.text.converters import to_native from ansible.plugins.lookup import LookupBase -from ansible.errors import AnsibleError, AnsibleLookupError +from ansible.utils.display import Display try: import etcd3 @@ -168,7 +166,7 @@ def etcd3_client(client_params): etcd = etcd3.client(**client_params) etcd.status() except Exception as exp: - raise AnsibleLookupError('Cannot connect to etcd cluster: %s' % (to_native(exp))) + raise AnsibleLookupError(f'Cannot connect to etcd cluster: {exp}') return etcd @@ -204,7 +202,7 @@ class LookupModule(LookupBase): cnx_log = dict(client_params) if 'password' in cnx_log: cnx_log['password'] = '' - display.verbose("etcd3 connection parameters: %s" % cnx_log) + display.verbose(f"etcd3 connection parameters: {cnx_log}") # connect to etcd3 server etcd = etcd3_client(client_params) @@ -218,12 +216,12 @@ class LookupModule(LookupBase): if val and meta: ret.append({'key': to_native(meta.key), 'value': to_native(val)}) except Exception as exp: - display.warning('Caught except during etcd3.get_prefix: %s' % (to_native(exp))) + display.warning(f'Caught except during etcd3.get_prefix: {exp}') else: try: val, meta = etcd.get(term) if val and meta: ret.append({'key': to_native(meta.key), 'value': to_native(val)}) except Exception as exp: - display.warning('Caught except during etcd3.get: %s' % (to_native(exp))) + display.warning(f'Caught except during etcd3.get: {exp}') return ret diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py index 1c83486b05..49326edb87 100644 --- a/plugins/lookup/filetree.py +++ b/plugins/lookup/filetree.py @@ -1,24 +1,26 @@ -# -*- coding: utf-8 -*- -# (c) 2016 Dag Wieers -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2016 Dag Wieers +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" name: filetree author: Dag Wieers (@dagwieers) -short_description: recursively match all files in a directory tree +short_description: Recursively match all files in a directory tree description: -- This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership. -- Supports directories, files and symlinks, including SELinux and other file properties. -- If you provide more than one path, it will implement a first_found logic, and will not process entries it already processed in previous paths. - This enables merging different trees in order of importance, or add role_vars to specific paths to influence different instances of the same role. + - This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership. + - Supports directories, files and symlinks, including SELinux and other file properties. + - If you provide more than one path, it implements a first_found logic, and does not process entries it already processed + in previous paths. This enables merging different trees in order of importance, or add role_vars to specific paths to + influence different instances of the same role. options: _terms: - description: path(s) of files to read - required: True -''' + description: Path(s) of files to read. + required: true + type: list + elements: string +""" EXAMPLES = r""" - name: Create directories @@ -45,7 +47,7 @@ EXAMPLES = r""" dest: /web/{{ item.path }} state: link follow: false # avoid corrupting target files if the link already exists - force: yes + force: true mode: '{{ item.mode }}' with_community.general.filetree: web/ when: item.state == 'link' @@ -56,61 +58,61 @@ EXAMPLES = r""" """ RETURN = r""" - _raw: - description: List of dictionaries with file information. - type: list - elements: dict - contains: - src: - description: - - Full path to file. - - Not returned when I(item.state) is set to C(directory). - type: path - root: - description: Allows filtering by original location. - type: path - path: - description: Contains the relative path to root. - type: path - mode: - description: The permissions the resulting file or directory. - type: str - state: - description: TODO - type: str - owner: - description: Name of the user that owns the file/directory. - type: raw - group: - description: Name of the group that owns the file/directory. - type: raw - seuser: - description: The user part of the SELinux file context. - type: raw - serole: - description: The role part of the SELinux file context. - type: raw - setype: - description: The type part of the SELinux file context. - type: raw - selevel: - description: The level part of the SELinux file context. - type: raw - uid: - description: Owner ID of the file/directory. - type: int - gid: - description: Group ID of the file/directory. - type: int - size: - description: Size of the target. - type: int - mtime: - description: Time of last modification. - type: float - ctime: - description: Time of last metadata update or creation (depends on OS). - type: float +_raw: + description: List of dictionaries with file information. + type: list + elements: dict + contains: + src: + description: + - Full path to file. + - Not returned when RV(_raw[].state) is set to V(directory). + type: path + root: + description: Allows filtering by original location. + type: path + path: + description: Contains the relative path to root. + type: path + mode: + description: The permissions the resulting file or directory. + type: str + state: + description: TODO. + type: str + owner: + description: Name of the user that owns the file/directory. + type: raw + group: + description: Name of the group that owns the file/directory. + type: raw + seuser: + description: The user part of the SELinux file context. + type: raw + serole: + description: The role part of the SELinux file context. + type: raw + setype: + description: The type part of the SELinux file context. + type: raw + selevel: + description: The level part of the SELinux file context. + type: raw + uid: + description: Owner ID of the file/directory. + type: int + gid: + description: Group ID of the file/directory. + type: int + size: + description: Size of the target. + type: int + mtime: + description: Time of last modification. + type: float + ctime: + description: Time of last metadata update or creation (depends on OS). + type: float """ import os import pwd @@ -155,7 +157,7 @@ def file_props(root, path): try: st = os.lstat(abspath) except OSError as e: - display.warning('filetree: Error using stat() on path %s (%s)' % (abspath, e)) + display.warning(f'filetree: Error using stat() on path {abspath} ({e})') return None ret = dict(root=root, path=path) @@ -169,7 +171,7 @@ def file_props(root, path): ret['state'] = 'file' ret['src'] = abspath else: - display.warning('filetree: Error file type of %s is not supported' % abspath) + display.warning(f'filetree: Error file type of {abspath} is not supported') return None ret['uid'] = st.st_uid @@ -182,7 +184,7 @@ def file_props(root, path): ret['group'] = to_text(grp.getgrgid(st.st_gid).gr_name) except KeyError: ret['group'] = st.st_gid - ret['mode'] = '0%03o' % (stat.S_IMODE(st.st_mode)) + ret['mode'] = f'0{stat.S_IMODE(st.st_mode):03o}' ret['size'] = st.st_size ret['mtime'] = st.st_mtime ret['ctime'] = st.st_ctime @@ -200,6 +202,8 @@ def file_props(root, path): class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + basedir = self.get_basedir(variables) ret = [] @@ -207,7 +211,7 @@ class LookupModule(LookupBase): term_file = os.path.basename(term) dwimmed_path = self._loader.path_dwim_relative(basedir, 'files', os.path.dirname(term)) path = os.path.join(dwimmed_path, term_file) - display.debug("Walking '{0}'".format(path)) + display.debug(f"Walking '{path}'") for root, dirs, files in os.walk(path, topdown=True): for entry in dirs + files: relpath = os.path.relpath(os.path.join(root, entry), path) @@ -216,7 +220,7 @@ class LookupModule(LookupBase): if relpath not in [entry['path'] for entry in ret]: props = file_props(path, relpath) if props is not None: - display.debug(" found '{0}'".format(os.path.join(path, relpath))) + display.debug(f" found '{os.path.join(path, relpath)}'") ret.append(props) return ret diff --git a/plugins/lookup/flattened.py b/plugins/lookup/flattened.py index edc546ff83..0ed92afa27 100644 --- a/plugins/lookup/flattened.py +++ b/plugins/lookup/flattened.py @@ -1,39 +1,40 @@ -# -*- coding: utf-8 -*- -# (c) 2013, Serge van Ginderachter -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2013, Serge van Ginderachter +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: flattened - author: Serge van Ginderachter (!UNKNOWN) - short_description: return single list completely flattened - description: - - given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left. - options: - _terms: - description: lists to flatten - required: True - notes: - - unlike 'items' which only flattens 1 level, this plugin will continue to flatten until it cannot find lists anymore. - - aka highlander plugin, there can only be one (list). -''' +DOCUMENTATION = r""" +name: flattened +author: Serge van Ginderachter (!UNKNOWN) +short_description: Return single list completely flattened +description: + - Given one or more lists, this lookup flattens any list elements found recursively until only 1 list is left. +options: + _terms: + description: Lists to flatten. + type: list + elements: raw + required: true +notes: + - Unlike the P(ansible.builtin.items#lookup) lookup which only flattens 1 level, this plugin continues to flatten until + it cannot find lists anymore. + - Aka highlander plugin, there can only be one (list). +""" -EXAMPLES = """ +EXAMPLES = r""" - name: "'unnest' all elements into single list" ansible.builtin.debug: msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], ['a','b','c'], [[5,6,1,3], [34,'a','b','c']])}}" """ -RETURN = """ - _raw: - description: - - flattened list - type: list +RETURN = r""" +_raw: + description: + - Flattened list. + type: list """ from ansible.errors import AnsibleError -from ansible.module_utils.six import string_types from ansible.plugins.lookup import LookupBase from ansible.utils.listify import listify_lookup_plugin_terms @@ -61,15 +62,15 @@ class LookupModule(LookupBase): # ignore undefined items break - if isinstance(term, string_types): + if isinstance(term, str): # convert a variable to a list - term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader) + term2 = listify_lookup_plugin_terms(term, templar=self._templar) # but avoid converting a plain string to a list of one string if term2 != [term]: term = term2 if isinstance(term, list): - # if it's a list, check recursively for items that are a list + # if it is a list, check recursively for items that are a list term = self._do_flatten(term, variables) ret.extend(term) else: @@ -77,9 +78,10 @@ class LookupModule(LookupBase): return ret - def run(self, terms, variables, **kwargs): - + def run(self, terms, variables=None, **kwargs): if not isinstance(terms, list): raise AnsibleError("with_flattened expects a list") + self.set_options(var_options=variables, direct=kwargs) + return self._do_flatten(terms, variables) diff --git a/plugins/lookup/github_app_access_token.py b/plugins/lookup/github_app_access_token.py new file mode 100644 index 0000000000..0b4f4d53ee --- /dev/null +++ b/plugins/lookup/github_app_access_token.py @@ -0,0 +1,223 @@ +# Copyright (c) 2023, Poh Wei Sheng +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +name: github_app_access_token +author: + - Poh Wei Sheng (@weisheng-p) + - Bruno Lavoie (@blavoie) +short_description: Obtain short-lived Github App Access tokens +version_added: '8.2.0' +requirements: + - jwt (https://github.com/GehirnInc/python-jwt) OR + - PyJWT (https://pypi.org/project/PyJWT/) AND cryptography (https://pypi.org/project/cryptography/) +description: + - This generates a Github access token that can be used with a C(git) command, if you use a Github App. +options: + key_path: + description: + - Path to your private key. + - Either O(key_path) or O(private_key) must be specified. + type: path + app_id: + description: + - Your GitHub App ID, you can find this in the Settings page. + required: true + type: str + installation_id: + description: + - The installation ID that contains the git repository you would like access to. + - As of 2023-12-24, this can be found at Settings page > Integrations > Application. The last part of the URL in the + configure button is the installation ID. + - Alternatively, you can use PyGithub (U(https://github.com/PyGithub/PyGithub)) to get your installation ID. + required: true + type: str + private_key: + description: + - GitHub App private key in PEM file format as string. + - Either O(key_path) or O(private_key) must be specified. + type: str + version_added: 10.0.0 + token_expiry: + description: + - How long the token should last for in seconds. + default: 600 + type: int + github_url: + description: + - Base URL for the GitHub API (for GitHub Enterprise Server). + - "Example: C(https://github-enterprise-server.example.com/api/v3)" + default: https://api.github.com + type: str + version_added: 11.4.0 +""" + +EXAMPLES = r""" +- name: Get access token to be used for git checkout with app_id=123456, installation_id=64209 + ansible.builtin.git: + repo: >- + https://x-access-token:{{ github_token }}@github.com/hidden_user/super-secret-repo.git + dest: /srv/checkout + vars: + github_token: >- + {{ lookup('community.general.github_app_access_token', key_path='/home/to_your/key', + app_id='123456', installation_id='64209') }} +""" + +RETURN = r""" +_raw: + description: A one-element list containing your GitHub access token. + type: list + elements: str +""" + +try: + import jwt + HAS_JWT = True +except ImportError: + HAS_JWT = False + +HAS_PYTHON_JWT = False # vs pyjwt +if HAS_JWT and hasattr(jwt, 'JWT'): + HAS_PYTHON_JWT = True + from jwt import jwk_from_pem, JWT + jwt_instance = JWT() + +try: + from cryptography.hazmat.primitives import serialization + HAS_CRYPTOGRAPHY = True +except ImportError: + HAS_CRYPTOGRAPHY = False + + +import time +import json +from urllib.error import HTTPError + +from ansible.module_utils.urls import open_url +from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.plugins.lookup import LookupBase +from ansible.utils.display import Display + +display = Display() + + +class PythonJWT: + + @staticmethod + def read_key(path, private_key=None): + try: + if private_key: + return jwk_from_pem(private_key.encode('utf-8')) + with open(path, 'rb') as pem_file: + return jwk_from_pem(pem_file.read()) + except Exception as e: + raise AnsibleError(f"Error while parsing key file: {e}") + + @staticmethod + def encode_jwt(app_id, jwk, exp=600): + now = int(time.time()) + payload = { + 'iat': now, + 'exp': now + exp, + 'iss': app_id, + } + try: + return jwt_instance.encode(payload, jwk, alg='RS256') + except Exception as e: + raise AnsibleError(f"Error while encoding jwt: {e}") + + +def read_key(path, private_key=None): + if HAS_PYTHON_JWT: + return PythonJWT.read_key(path, private_key) + try: + if private_key: + key_bytes = private_key.encode('utf-8') + else: + with open(path, 'rb') as pem_file: + key_bytes = pem_file.read() + return serialization.load_pem_private_key(key_bytes, password=None) + except Exception as e: + raise AnsibleError(f"Error while parsing key file: {e}") + + +def encode_jwt(app_id, private_key_obj, exp=600): + if HAS_PYTHON_JWT: + return PythonJWT.encode_jwt(app_id, private_key_obj) + now = int(time.time()) + payload = { + 'iat': now, + 'exp': now + exp, + 'iss': app_id, + } + try: + return jwt.encode(payload, private_key_obj, algorithm='RS256') + except Exception as e: + raise AnsibleError(f"Error while encoding jwt: {e}") + + +def post_request(generated_jwt, installation_id, api_base): + base = api_base.rstrip('/') + github_url = f"{base}/app/installations/{installation_id}/access_tokens" + + headers = { + "Authorization": f'Bearer {generated_jwt}', + "Accept": "application/vnd.github.v3+json", + } + try: + response = open_url(github_url, headers=headers, method='POST') + except HTTPError as e: + try: + error_body = json.loads(e.read().decode()) + display.vvv(f"Error returned: {error_body}") + except Exception: + error_body = {} + if e.code == 404: + raise AnsibleError("Github return error. Please confirm your installation_id value is valid") + elif e.code == 401: + raise AnsibleError("Github return error. Please confirm your private key is valid") + raise AnsibleError(f"Unexpected data returned: {e} -- {error_body}") + response_body = response.read() + try: + json_data = json.loads(response_body.decode('utf-8')) + except json.decoder.JSONDecodeError as e: + raise AnsibleError(f"Error while dencoding JSON respone from github: {e}") + return json_data.get('token') + + +def get_token(key_path, app_id, installation_id, private_key, github_url, expiry=600): + jwk = read_key(key_path, private_key) + generated_jwt = encode_jwt(app_id, jwk, exp=expiry) + return post_request(generated_jwt, installation_id, github_url) + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + if not HAS_JWT: + raise AnsibleError('Python jwt library is required. ' + 'Please install using "pip install pyjwt"') + + if not HAS_PYTHON_JWT and not HAS_CRYPTOGRAPHY: + raise AnsibleError('Python cryptography library is required. ' + 'Please install using "pip install cryptography"') + + self.set_options(var_options=variables, direct=kwargs) + + if not (self.get_option("key_path") or self.get_option("private_key")): + raise AnsibleOptionsError("One of key_path or private_key is required") + if self.get_option("key_path") and self.get_option("private_key"): + raise AnsibleOptionsError("key_path and private_key are mutually exclusive") + + t = get_token( + self.get_option('key_path'), + self.get_option('app_id'), + self.get_option('installation_id'), + self.get_option('private_key'), + self.get_option('github_url'), + self.get_option('token_expiry'), + ) + + return [t] diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index 5b440469eb..d031987a81 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -1,42 +1,49 @@ -# -*- coding: utf-8 -*- -# (c) 2017, Juan Manuel Parrilla -# (c) 2012-17 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2017, Juan Manuel Parrilla +# Copyright (c) 2012-17 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - author: - - Juan Manuel Parrilla (@jparrill) - name: hiera - short_description: get info from hiera data - requirements: - - hiera (command line utility) +DOCUMENTATION = r""" +author: + - Juan Manuel Parrilla (@jparrill) +name: hiera +short_description: Get info from hiera data +requirements: + - hiera (command line utility) +description: + - Retrieves data from an Puppetmaster node using Hiera as ENC. +deprecated: + removed_in: 13.0.0 + why: >- + Hiera has been deprecated a long time ago. + If you disagree with this deprecation, please create an issue in the community.general repository. + alternative: Unknown. +options: + _terms: description: - - Retrieves data from an Puppetmaster node using Hiera as ENC - options: - _hiera_key: - description: - - The list of keys to lookup on the Puppetmaster - type: list - elements: string - required: True - _bin_file: - description: - - Binary file to execute Hiera - default: '/usr/bin/hiera' - env: - - name: ANSIBLE_HIERA_BIN - _hierarchy_file: - description: - - File that describes the hierarchy of Hiera - default: '/etc/hiera.yaml' - env: - - name: ANSIBLE_HIERA_CFG + - The list of keys to lookup on the Puppetmaster. + type: list + elements: string + required: true + executable: + description: + - Binary file to execute Hiera. + type: string + default: '/usr/bin/hiera' + env: + - name: ANSIBLE_HIERA_BIN + config_file: + description: + - File that describes the hierarchy of Hiera. + type: string + default: '/etc/hiera.yaml' + env: + - name: ANSIBLE_HIERA_CFG # FIXME: incomplete options .. _terms? environment/fqdn? -''' +""" -EXAMPLES = """ +EXAMPLES = r""" # All this examples depends on hiera.yml that describes the hierarchy - name: "a value from Hiera 'DB'" @@ -52,39 +59,39 @@ EXAMPLES = """ msg: "{{ lookup('community.general.hiera', 'foo fqdn=puppet01.localdomain') }}" """ -RETURN = """ - _raw: - description: - - a value associated with input key - type: list - elements: str +RETURN = r""" +_raw: + description: + - A value associated with input key. + type: list + elements: str """ -import os - from ansible.plugins.lookup import LookupBase from ansible.utils.cmd_functions import run_cmd from ansible.module_utils.common.text.converters import to_text -ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml') -ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera') - class Hiera(object): + def __init__(self, hiera_cfg, hiera_bin): + self.hiera_cfg = hiera_cfg + self.hiera_bin = hiera_bin + def get(self, hiera_key): - pargs = [ANSIBLE_HIERA_BIN] - pargs.extend(['-c', ANSIBLE_HIERA_CFG]) + pargs = [self.hiera_bin] + pargs.extend(['-c', self.hiera_cfg]) pargs.extend(hiera_key) - rc, output, err = run_cmd("{0} -c {1} {2}".format( - ANSIBLE_HIERA_BIN, ANSIBLE_HIERA_CFG, hiera_key[0])) + rc, output, err = run_cmd(f"{self.hiera_bin} -c {self.hiera_cfg} {hiera_key[0]}") return to_text(output.strip()) class LookupModule(LookupBase): - def run(self, terms, variables=''): - hiera = Hiera() + def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + + hiera = Hiera(self.get_option('config_file'), self.get_option('executable')) ret = [hiera.get(terms)] return ret diff --git a/plugins/lookup/keyring.py b/plugins/lookup/keyring.py index 73f9c5f4a9..73fca84e6f 100644 --- a/plugins/lookup/keyring.py +++ b/plugins/lookup/keyring.py @@ -1,38 +1,39 @@ -# -*- coding: utf-8 -*- -# (c) 2016, Samuel Boucher -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Samuel Boucher +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: keyring - author: - - Samuel Boucher (!UNKNOWN) - requirements: - - keyring (python library) - short_description: grab secrets from the OS keyring - description: - - Allows you to access data stored in the OS provided keyring/keychain. -''' +DOCUMENTATION = r""" +name: keyring +author: + - Samuel Boucher (!UNKNOWN) +requirements: + - keyring (python library) +short_description: Grab secrets from the OS keyring +description: + - Allows you to access data stored in the OS provided keyring/keychain. +""" -EXAMPLES = """ -- name : output secrets to screen (BAD IDEA) +EXAMPLES = r""" +- name: output secrets to screen (BAD IDEA) ansible.builtin.debug: msg: "Password: {{item}}" with_community.general.keyring: - 'servicename username' - name: access mysql with password from keyring - mysql_db: login_password={{lookup('community.general.keyring','mysql joe')}} login_user=joe + community.mysql.mysql_db: + login_password: "{{ lookup('community.general.keyring', 'mysql joe') }}" + login_user: joe """ -RETURN = """ - _raw: - description: Secrets stored. - type: list - elements: str +RETURN = r""" +_raw: + description: Secrets stored. + type: list + elements: str """ HAS_KEYRING = True @@ -52,17 +53,19 @@ display = Display() class LookupModule(LookupBase): - def run(self, terms, **kwargs): + def run(self, terms, variables=None, **kwargs): if not HAS_KEYRING: - raise AnsibleError(u"Can't LOOKUP(keyring): missing required python library 'keyring'") + raise AnsibleError("Can't LOOKUP(keyring): missing required python library 'keyring'") - display.vvvv(u"keyring: %s" % keyring.get_keyring()) + self.set_options(var_options=variables, direct=kwargs) + + display.vvvv(f"keyring: {keyring.get_keyring()}") ret = [] for term in terms: (servicename, username) = (term.split()[0], term.split()[1]) - display.vvvv(u"username: %s, servicename: %s " % (username, servicename)) + display.vvvv(f"username: {username}, servicename: {servicename} ") password = keyring.get_password(servicename, username) if password is None: - raise AnsibleError(u"servicename: %s for user %s not found" % (servicename, username)) + raise AnsibleError(f"servicename: {servicename} for user {username} not found") ret.append(password.rstrip()) return ret diff --git a/plugins/lookup/lastpass.py b/plugins/lookup/lastpass.py index 920d33176f..8a3999c372 100644 --- a/plugins/lookup/lastpass.py +++ b/plugins/lookup/lastpass.py @@ -1,40 +1,42 @@ -# -*- coding: utf-8 -*- -# (c) 2016, Andrew Zenk -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2016, Andrew Zenk +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: lastpass - author: - - Andrew Zenk (!UNKNOWN) - requirements: - - lpass (command line utility) - - must have already logged into lastpass - short_description: fetch data from lastpass - description: - - use the lpass command line utility to fetch specific fields from lastpass - options: - _terms: - description: key from which you want to retrieve the field - required: True - field: - description: field to return from lastpass - default: 'password' -''' +DOCUMENTATION = r""" +name: lastpass +author: + - Andrew Zenk (!UNKNOWN) +requirements: + - lpass (command line utility) + - must have already logged into LastPass +short_description: Fetch data from LastPass +description: + - Use the lpass command line utility to fetch specific fields from LastPass. +options: + _terms: + description: Key from which you want to retrieve the field. + required: true + type: list + elements: str + field: + description: Field to return from LastPass. + default: 'password' + type: str +""" -EXAMPLES = """ -- name: get 'custom_field' from lastpass entry 'entry-name' +EXAMPLES = r""" +- name: get 'custom_field' from LastPass entry 'entry-name' ansible.builtin.debug: msg: "{{ lookup('community.general.lastpass', 'entry-name', field='custom_field') }}" """ -RETURN = """ - _raw: - description: secrets stored - type: list - elements: str +RETURN = r""" +_raw: + description: Secrets stored. + type: list + elements: str """ from subprocess import Popen, PIPE @@ -79,21 +81,23 @@ class LPass(object): def get_field(self, key, field): if field in ['username', 'password', 'url', 'notes', 'id', 'name']: - out, err = self._run(self._build_args("show", ["--{0}".format(field), key])) + out, err = self._run(self._build_args("show", [f"--{field}", key])) else: - out, err = self._run(self._build_args("show", ["--field={0}".format(field), key])) + out, err = self._run(self._build_args("show", [f"--field={field}", key])) return out.strip() class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + field = self.get_option('field') + lp = LPass() if not lp.logged_in: - raise AnsibleError("Not logged into lastpass: please run 'lpass login' first") + raise AnsibleError("Not logged into LastPass: please run 'lpass login' first") - field = kwargs.get('field', 'password') values = [] for term in terms: values.append(lp.get_field(term, field)) diff --git a/plugins/lookup/lmdb_kv.py b/plugins/lookup/lmdb_kv.py index 9dd46e338a..f9b0d9482f 100644 --- a/plugins/lookup/lmdb_kv.py +++ b/plugins/lookup/lmdb_kv.py @@ -1,29 +1,33 @@ -# -*- coding: utf-8 -*- -# (c) 2017-2018, Jan-Piet Mens -# (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2017-2018, Jan-Piet Mens +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: lmdb_kv - author: - - Jan-Piet Mens (@jpmens) - version_added: '0.2.0' - short_description: fetch data from LMDB - description: - - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it - requirements: - - lmdb (python library https://lmdb.readthedocs.io/en/release/) - options: - _terms: - description: list of keys to query - db: - description: path to LMDB database - default: 'ansible.mdb' -''' +DOCUMENTATION = r""" +name: lmdb_kv +author: + - Jan-Piet Mens (@jpmens) +version_added: '0.2.0' +short_description: Fetch data from LMDB +description: + - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it. +requirements: + - lmdb (Python library U(https://lmdb.readthedocs.io/en/release/)) +options: + _terms: + description: List of keys to query. + type: list + elements: str + db: + description: Path to LMDB database. + type: str + default: 'ansible.mdb' + vars: + - name: lmdb_kv_db +""" -EXAMPLES = """ +EXAMPLES = r""" - name: query LMDB for a list of country codes ansible.builtin.debug: msg: "{{ query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') }}" @@ -34,7 +38,7 @@ EXAMPLES = """ vars: - lmdb_kv_db: jp.mdb with_community.general.lmdb_kv: - - "n*" + - "n*" - name: get an item by key ansible.builtin.assert: @@ -42,13 +46,13 @@ EXAMPLES = """ - item == 'Belgium' vars: - lmdb_kv_db: jp.mdb - with_community.general.lmdb_kv: - - be + with_community.general.lmdb_kv: + - be """ -RETURN = """ +RETURN = r""" _raw: - description: value(s) stored in LMDB + description: Value(s) stored in LMDB. type: list elements: raw """ @@ -57,6 +61,7 @@ _raw: from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase from ansible.module_utils.common.text.converters import to_native, to_text + HAVE_LMDB = True try: import lmdb @@ -66,8 +71,7 @@ except ImportError: class LookupModule(LookupBase): - def run(self, terms, variables, **kwargs): - + def run(self, terms, variables=None, **kwargs): ''' terms contain any number of keys to be retrieved. If terms is None, all keys from the database are returned @@ -80,19 +84,17 @@ class LookupModule(LookupBase): vars: - lmdb_kv_db: "jp.mdb" ''' - if HAVE_LMDB is False: raise AnsibleError("Can't LOOKUP(lmdb_kv): this module requires lmdb to be installed") - db = variables.get('lmdb_kv_db', None) - if db is None: - db = kwargs.get('db', 'ansible.mdb') - db = str(db) + self.set_options(var_options=variables, direct=kwargs) + + db = self.get_option('db') try: - env = lmdb.open(db, readonly=True) + env = lmdb.open(str(db), readonly=True) except Exception as e: - raise AnsibleError("LMDB can't open database %s: %s" % (db, to_native(e))) + raise AnsibleError(f"LMDB cannot open database {db}: {e}") ret = [] if len(terms) == 0: diff --git a/plugins/lookup/manifold.py b/plugins/lookup/manifold.py deleted file mode 100644 index 01bb13cf0b..0000000000 --- a/plugins/lookup/manifold.py +++ /dev/null @@ -1,279 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2018, Arigato Machine Inc. -# (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: - - Kyrylo Galanov (!UNKNOWN) - name: manifold - short_description: get credentials from Manifold.co - description: - - Retrieves resources' credentials from Manifold.co - options: - _terms: - description: - - Optional list of resource labels to lookup on Manifold.co. If no resources are specified, all - matched resources will be returned. - type: list - elements: string - required: False - api_token: - description: - - manifold API token - type: string - required: True - env: - - name: MANIFOLD_API_TOKEN - project: - description: - - The project label you want to get the resource for. - type: string - required: False - team: - description: - - The team label you want to get the resource for. - type: string - required: False -''' - -EXAMPLES = ''' - - name: all available resources - ansible.builtin.debug: - msg: "{{ lookup('community.general.manifold', api_token='SecretToken') }}" - - name: all available resources for a specific project in specific team - ansible.builtin.debug: - msg: "{{ lookup('community.general.manifold', api_token='SecretToken', project='poject-1', team='team-2') }}" - - name: two specific resources - ansible.builtin.debug: - msg: "{{ lookup('community.general.manifold', 'resource-1', 'resource-2') }}" -''' - -RETURN = ''' - _raw: - description: - - dictionary of credentials ready to be consumed as environment variables. If multiple resources define - the same environment variable(s), the last one returned by the Manifold API will take precedence. - type: dict -''' -from ansible.errors import AnsibleError -from ansible.plugins.lookup import LookupBase -from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError -from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils import six -from ansible.utils.display import Display -from traceback import format_exception -import json -import sys -import os - -display = Display() - - -class ApiError(Exception): - pass - - -class ManifoldApiClient(object): - base_url = 'https://api.{api}.manifold.co/v1/{endpoint}' - http_agent = 'python-manifold-ansible-1.0.0' - - def __init__(self, token): - self._token = token - - def request(self, api, endpoint, *args, **kwargs): - """ - Send a request to API backend and pre-process a response. - :param api: API to send a request to - :type api: str - :param endpoint: API endpoint to fetch data from - :type endpoint: str - :param args: other args for open_url - :param kwargs: other kwargs for open_url - :return: server response. JSON response is automatically deserialized. - :rtype: dict | list | str - """ - - default_headers = { - 'Authorization': "Bearer {0}".format(self._token), - 'Accept': "*/*" # Otherwise server doesn't set content-type header - } - - url = self.base_url.format(api=api, endpoint=endpoint) - - headers = default_headers - arg_headers = kwargs.pop('headers', None) - if arg_headers: - headers.update(arg_headers) - - try: - display.vvvv('manifold lookup connecting to {0}'.format(url)) - response = open_url(url, headers=headers, http_agent=self.http_agent, *args, **kwargs) - data = response.read() - if response.headers.get('content-type') == 'application/json': - data = json.loads(data) - return data - except ValueError: - raise ApiError('JSON response can\'t be parsed while requesting {url}:\n{json}'.format(json=data, url=url)) - except HTTPError as e: - raise ApiError('Server returned: {err} while requesting {url}:\n{response}'.format( - err=str(e), url=url, response=e.read())) - except URLError as e: - raise ApiError('Failed lookup url for {url} : {err}'.format(url=url, err=str(e))) - except SSLValidationError as e: - raise ApiError('Error validating the server\'s certificate for {url}: {err}'.format(url=url, err=str(e))) - except ConnectionError as e: - raise ApiError('Error connecting to {url}: {err}'.format(url=url, err=str(e))) - - def get_resources(self, team_id=None, project_id=None, label=None): - """ - Get resources list - :param team_id: ID of the Team to filter resources by - :type team_id: str - :param project_id: ID of the project to filter resources by - :type project_id: str - :param label: filter resources by a label, returns a list with one or zero elements - :type label: str - :return: list of resources - :rtype: list - """ - api = 'marketplace' - endpoint = 'resources' - query_params = {} - - if team_id: - query_params['team_id'] = team_id - if project_id: - query_params['project_id'] = project_id - if label: - query_params['label'] = label - - if query_params: - endpoint += '?' + urlencode(query_params) - - return self.request(api, endpoint) - - def get_teams(self, label=None): - """ - Get teams list - :param label: filter teams by a label, returns a list with one or zero elements - :type label: str - :return: list of teams - :rtype: list - """ - api = 'identity' - endpoint = 'teams' - data = self.request(api, endpoint) - # Label filtering is not supported by API, however this function provides uniform interface - if label: - data = list(filter(lambda x: x['body']['label'] == label, data)) - return data - - def get_projects(self, label=None): - """ - Get projects list - :param label: filter projects by a label, returns a list with one or zero elements - :type label: str - :return: list of projects - :rtype: list - """ - api = 'marketplace' - endpoint = 'projects' - query_params = {} - - if label: - query_params['label'] = label - - if query_params: - endpoint += '?' + urlencode(query_params) - - return self.request(api, endpoint) - - def get_credentials(self, resource_id): - """ - Get resource credentials - :param resource_id: ID of the resource to filter credentials by - :type resource_id: str - :return: - """ - api = 'marketplace' - endpoint = 'credentials?' + urlencode({'resource_id': resource_id}) - return self.request(api, endpoint) - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, api_token=None, project=None, team=None): - """ - :param terms: a list of resources lookups to run. - :param variables: ansible variables active at the time of the lookup - :param api_token: API token - :param project: optional project label - :param team: optional team label - :return: a dictionary of resources credentials - """ - - if not api_token: - api_token = os.getenv('MANIFOLD_API_TOKEN') - if not api_token: - raise AnsibleError('API token is required. Please set api_token parameter or MANIFOLD_API_TOKEN env var') - - try: - labels = terms - client = ManifoldApiClient(api_token) - - if team: - team_data = client.get_teams(team) - if len(team_data) == 0: - raise AnsibleError("Team '{0}' does not exist".format(team)) - team_id = team_data[0]['id'] - else: - team_id = None - - if project: - project_data = client.get_projects(project) - if len(project_data) == 0: - raise AnsibleError("Project '{0}' does not exist".format(project)) - project_id = project_data[0]['id'] - else: - project_id = None - - if len(labels) == 1: # Use server-side filtering if one resource is requested - resources_data = client.get_resources(team_id=team_id, project_id=project_id, label=labels[0]) - else: # Get all resources and optionally filter labels - resources_data = client.get_resources(team_id=team_id, project_id=project_id) - if labels: - resources_data = list(filter(lambda x: x['body']['label'] in labels, resources_data)) - - if labels and len(resources_data) < len(labels): - fetched_labels = [r['body']['label'] for r in resources_data] - not_found_labels = [label for label in labels if label not in fetched_labels] - raise AnsibleError("Resource(s) {0} do not exist".format(', '.join(not_found_labels))) - - credentials = {} - cred_map = {} - for resource in resources_data: - resource_credentials = client.get_credentials(resource['id']) - if len(resource_credentials) and resource_credentials[0]['body']['values']: - for cred_key, cred_val in six.iteritems(resource_credentials[0]['body']['values']): - label = resource['body']['label'] - if cred_key in credentials: - display.warning("'{cred_key}' with label '{old_label}' was replaced by resource data " - "with label '{new_label}'".format(cred_key=cred_key, - old_label=cred_map[cred_key], - new_label=label)) - credentials[cred_key] = cred_val - cred_map[cred_key] = label - - ret = [credentials] - return ret - except ApiError as e: - raise AnsibleError('API Error: {0}'.format(str(e))) - except AnsibleError as e: - raise e - except Exception: - exc_type, exc_value, exc_traceback = sys.exc_info() - raise AnsibleError(format_exception(exc_type, exc_value, exc_traceback)) diff --git a/plugins/lookup/merge_variables.py b/plugins/lookup/merge_variables.py new file mode 100644 index 0000000000..5c1686b499 --- /dev/null +++ b/plugins/lookup/merge_variables.py @@ -0,0 +1,232 @@ +# Copyright (c) 2020, Thales Netherlands +# Copyright (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +author: + - Roy Lenferink (@rlenferink) + - Mark Ettema (@m-a-r-k-e) + - Alexander Petrenz (@alpex8) +name: merge_variables +short_description: Merge variables whose names match a given pattern +description: + - This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or regular expressions, + optionally. +version_added: 6.5.0 +options: + _terms: + description: + - Depending on the value of O(pattern_type), this is a list of prefixes, suffixes, or regular expressions that is used + to match all variables that should be merged. + required: true + type: list + elements: str + pattern_type: + description: + - Change the way of searching for the specified pattern. + type: str + default: 'regex' + choices: + - prefix + - suffix + - regex + env: + - name: ANSIBLE_MERGE_VARIABLES_PATTERN_TYPE + ini: + - section: merge_variables_lookup + key: pattern_type + initial_value: + description: + - An initial value to start with. + type: raw + override: + description: + - Return an error, print a warning or ignore it when a key is overwritten. + - The default behavior V(error) makes the plugin fail when a key would be overwritten. + - When V(warn) and V(ignore) are used, note that it is important to know that the variables are sorted by name before + being merged. Keys for later variables in this order overwrite keys of the same name for variables earlier in this + order. To avoid potential confusion, better use O(override=error) whenever possible. + type: str + default: 'error' + choices: + - error + - warn + - ignore + env: + - name: ANSIBLE_MERGE_VARIABLES_OVERRIDE + ini: + - section: merge_variables_lookup + key: override + groups: + description: + - Search for variables across hosts that belong to the given groups. This allows to collect configuration pieces across + different hosts (for example a service on a host with its database on another host). + type: list + elements: str + version_added: 8.5.0 +""" + +EXAMPLES = r""" +# Some example variables, they can be defined anywhere as long as they are in scope +test_init_list: + - "list init item 1" + - "list init item 2" + +testa__test_list: + - "test a item 1" + +testb__test_list: + - "test b item 1" + +testa__test_dict: + ports: + - 1 + +testb__test_dict: + ports: + - 3 + +# Merge variables that end with '__test_dict' and store the result in a variable 'example_a' +example_a: "{{ lookup('community.general.merge_variables', '__test_dict', pattern_type='suffix') }}" + +# The variable example_a now contains: +# ports: +# - 1 +# - 3 + +# Merge variables that match the '^.+__test_list$' regular expression, starting with an initial value and store the +# result in a variable 'example_b' +example_b: "{{ lookup('community.general.merge_variables', '^.+__test_list$', initial_value=test_init_list) }}" + +# The variable example_b now contains: +# - "list init item 1" +# - "list init item 2" +# - "test a item 1" +# - "test b item 1" +""" + +RETURN = r""" +_raw: + description: In case the search matches list items, a list is returned. In case the search matches dicts, a dict is returned. + type: raw + elements: raw +""" + +import re + +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase +from ansible.utils.display import Display + +display = Display() + + +def _verify_and_get_type(variable): + if isinstance(variable, list): + return "list" + elif isinstance(variable, dict): + return "dict" + else: + raise AnsibleError("Not supported type detected, variable must be a list or a dict") + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + self.set_options(direct=kwargs) + initial_value = self.get_option("initial_value", None) + self._override = self.get_option('override', 'error') + self._pattern_type = self.get_option('pattern_type', 'regex') + self._groups = self.get_option('groups', None) + + ret = [] + for term in terms: + if not isinstance(term, str): + raise AnsibleError(f"Non-string type '{type(term)}' passed, only 'str' types are allowed!") + + if not self._groups: # consider only own variables + ret.append(self._merge_vars(term, initial_value, variables)) + else: # consider variables of hosts in given groups + cross_host_merge_result = initial_value + for host in variables["hostvars"]: + if self._is_host_in_allowed_groups(variables["hostvars"][host]["group_names"]): + host_variables = dict(variables["hostvars"].raw_get(host)) + host_variables["hostvars"] = variables["hostvars"] # re-add hostvars + cross_host_merge_result = self._merge_vars(term, cross_host_merge_result, host_variables) + ret.append(cross_host_merge_result) + + return ret + + def _is_host_in_allowed_groups(self, host_groups): + if 'all' in self._groups: + return True + + group_intersection = [host_group_name for host_group_name in host_groups if host_group_name in self._groups] + if group_intersection: + return True + + return False + + def _var_matches(self, key, search_pattern): + if self._pattern_type == "prefix": + return key.startswith(search_pattern) + elif self._pattern_type == "suffix": + return key.endswith(search_pattern) + elif self._pattern_type == "regex": + matcher = re.compile(search_pattern) + return matcher.search(key) + + return False + + def _merge_vars(self, search_pattern, initial_value, variables): + display.vvv(f"Merge variables with {self._pattern_type}: {search_pattern}") + var_merge_names = sorted([key for key in variables.keys() if self._var_matches(key, search_pattern)]) + display.vvv(f"The following variables will be merged: {var_merge_names}") + prev_var_type = None + result = None + + if initial_value is not None: + prev_var_type = _verify_and_get_type(initial_value) + result = initial_value + + for var_name in var_merge_names: + temp_templar = self._templar.copy_with_new_env(available_variables=variables) # tmp. switch renderer to context of current variables + var_value = temp_templar.template(variables[var_name]) # Render jinja2 templates + var_type = _verify_and_get_type(var_value) + + if prev_var_type is None: + prev_var_type = var_type + elif prev_var_type != var_type: + raise AnsibleError("Unable to merge, not all variables are of the same type") + + if result is None: + result = var_value + continue + + if var_type == "dict": + result = self._merge_dict(var_value, result, [var_name]) + else: # var_type == "list" + result += var_value + + return result + + def _merge_dict(self, src, dest, path): + for key, value in src.items(): + if isinstance(value, dict): + node = dest.setdefault(key, {}) + self._merge_dict(value, node, path + [key]) + elif isinstance(value, list) and key in dest: + dest[key] += value + else: + if (key in dest) and dest[key] != value: + msg = f"The key '{key}' with value '{dest[key]}' will be overwritten with value '{value}' from '{'.'.join(path)}.{key}'" + + if self._override == "error": + raise AnsibleError(msg) + if self._override == "warn": + display.warning(msg) + + dest[key] = value + + return dest diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py index e0be0cd27e..ab68796ed1 100644 --- a/plugins/lookup/onepassword.py +++ b/plugins/lookup/onepassword.py @@ -1,63 +1,44 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Scott Buchanan -# Copyright: (c) 2016, Andrew Zenk (lastpass.py used as starting point) -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Scott Buchanan +# Copyright (c) 2016, Andrew Zenk (lastpass.py used as starting point) +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: onepassword - author: - - Scott Buchanan (@scottsb) - - Andrew Zenk (@azenk) - - Sam Doran (@samdoran) - requirements: - - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) - short_description: fetch field values from 1Password - description: - - C(onepassword) wraps the C(op) command line utility to fetch specific field values from 1Password. - options: - _terms: - description: identifier(s) (UUID, name, or subdomain; case-insensitive) of item(s) to retrieve. - required: True - field: - description: field to return from each matching item (case-insensitive). - default: 'password' - master_password: - description: The password used to unlock the specified vault. - aliases: ['vault_password'] - section: - description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section. - domain: - description: Domain of 1Password. Default is U(1password.com). - version_added: 3.2.0 - default: '1password.com' - type: str - subdomain: - description: The 1Password subdomain to authenticate against. - username: - description: The username used to sign in. - secret_key: - description: The secret key used when performing an initial sign in. - vault: - description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults. - notes: - - This lookup will use an existing 1Password session if one exists. If not, and you have already - performed an initial sign in (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the - C(master_password) is required. You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op). - - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password). - - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials - needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength - to the 1Password master password. - - This lookup stores potentially sensitive data from 1Password as Ansible facts. - Facts are subject to caching if enabled, which means this data could be stored in clear text - on disk or in a database. - - Tested with C(op) version 0.5.3 -''' +DOCUMENTATION = r""" +name: onepassword +author: + - Scott Buchanan (@scottsb) + - Andrew Zenk (@azenk) + - Sam Doran (@samdoran) +short_description: Fetch field values from 1Password +description: + - P(community.general.onepassword#lookup) wraps the C(op) command line utility to fetch specific field values from 1Password. +requirements: + - C(op) 1Password command line utility +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string + account_id: + version_added: 7.5.0 + domain: + version_added: 3.2.0 + field: + description: Field to return from each matching item (case-insensitive). + default: 'password' + type: str + service_account_token: + version_added: 7.1.0 +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" -EXAMPLES = """ +EXAMPLES = r""" # These examples only work when already signed in to 1Password - name: Retrieve password for KITT when already signed in to 1Password ansible.builtin.debug: @@ -73,128 +54,165 @@ EXAMPLES = """ - name: Retrieve password for HAL when not signed in to 1Password ansible.builtin.debug: - var: lookup('community.general.onepassword' - 'HAL 9000' - subdomain='Discovery' - master_password=vault_master_password) + var: lookup('community.general.onepassword', 'HAL 9000', subdomain='Discovery', master_password=vault_master_password) - name: Retrieve password for HAL when never signed in to 1Password ansible.builtin.debug: - var: lookup('community.general.onepassword' - 'HAL 9000' - subdomain='Discovery' - master_password=vault_master_password - username='tweety@acme.com' - secret_key=vault_secret_key) + var: >- + lookup('community.general.onepassword', 'HAL 9000', subdomain='Discovery', master_password=vault_master_password, + username='tweety@acme.com', secret_key=vault_secret_key) + +- name: Retrieve password from specific account + ansible.builtin.debug: + var: lookup('community.general.onepassword', 'HAL 9000', account_id='abc123') """ -RETURN = """ - _raw: - description: field data requested - type: list - elements: str +RETURN = r""" +_raw: + description: Field data requested. + type: list + elements: str """ -import errno -import json +import abc import os - -from subprocess import Popen, PIPE +import json +import subprocess from ansible.plugins.lookup import LookupBase -from ansible.errors import AnsibleLookupError +from ansible.errors import AnsibleLookupError, AnsibleOptionsError +from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible_collections.community.general.plugins.module_utils.onepassword import OnePasswordConfig -class OnePass(object): - def __init__(self, path='op'): - self.cli_path = path - self.logged_in = False - self.token = None - self.subdomain = None - self.domain = None - self.username = None - self.secret_key = None - self.master_password = None +def _lower_if_possible(value): + """Return the lower case version value, otherwise return the value""" + try: + return value.lower() + except AttributeError: + return value - self._config = OnePasswordConfig() - def get_token(self): - # If the config file exists, assume an initial signin has taken place and try basic sign in - if os.path.isfile(self._config.config_file_path): +class OnePassCLIBase(object, metaclass=abc.ABCMeta): + bin = "op" - if not self.master_password: - raise AnsibleLookupError('Unable to sign in to 1Password. master_password is required.') + def __init__( + self, + subdomain=None, + domain="1password.com", + username=None, + secret_key=None, + master_password=None, + service_account_token=None, + account_id=None, + connect_host=None, + connect_token=None, + ): + self.subdomain = subdomain + self.domain = domain + self.username = username + self.master_password = master_password + self.secret_key = secret_key + self.service_account_token = service_account_token + self.account_id = account_id + self.connect_host = connect_host + self.connect_token = connect_token - try: - args = ['signin', '--output=raw'] + self._path = None + self._version = None - if self.subdomain: - args = ['signin', self.subdomain, '--output=raw'] + def _check_required_params(self, required_params): + non_empty_attrs = {param: getattr(self, param) for param in required_params if getattr(self, param, None)} + missing = set(required_params).difference(non_empty_attrs) + if missing: + prefix = "Unable to sign in to 1Password. Missing required parameter" + plural = "" + suffix = f": {', '.join(missing)}." + if len(missing) > 1: + plural = "s" - rc, out, err = self._run(args, command_input=to_bytes(self.master_password)) - self.token = out.strip() + msg = f"{prefix}{plural}{suffix}" + raise AnsibleLookupError(msg) - except AnsibleLookupError: - self.full_login() + @abc.abstractmethod + def _parse_field(self, data_json, field_name, section_title): + """Main method for parsing data returned from the op command line tool""" - else: - # Attempt a full sign in since there appears to be no existing sign in - self.full_login() + def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False, environment_update=None): + command = [self.path] + args + call_kwargs = { + "stdout": subprocess.PIPE, + "stderr": subprocess.PIPE, + "stdin": subprocess.PIPE, + } - def assert_logged_in(self): - try: - rc, out, err = self._run(['get', 'account'], ignore_errors=True) - if rc == 0: - self.logged_in = True - if not self.logged_in: - self.get_token() - except OSError as e: - if e.errno == errno.ENOENT: - raise AnsibleLookupError("1Password CLI tool '%s' not installed in path on control machine" % self.cli_path) - raise e + if environment_update: + env = os.environ.copy() + env.update(environment_update) + call_kwargs["env"] = env - def get_raw(self, item_id, vault=None): - args = ["get", "item", item_id] - if vault is not None: - args += ['--vault={0}'.format(vault)] - if not self.logged_in: - args += [to_bytes('--session=') + self.token] - rc, output, dummy = self._run(args) - return output - - def get_field(self, item_id, field, section=None, vault=None): - output = self.get_raw(item_id, vault) - return self._parse_field(output, field, section) if output != '' else '' - - def full_login(self): - if None in [self.subdomain, self.username, self.secret_key, self.master_password]: - raise AnsibleLookupError('Unable to perform initial sign in to 1Password. ' - 'subdomain, username, secret_key, and master_password are required to perform initial sign in.') - - args = [ - 'signin', - '{0}.{1}'.format(self.subdomain, self.domain), - to_bytes(self.username), - to_bytes(self.secret_key), - '--output=raw', - ] - - rc, out, err = self._run(args, command_input=to_bytes(self.master_password)) - self.token = out.strip() - - def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False): - command = [self.cli_path] + args - p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE) + p = subprocess.Popen(command, **call_kwargs) out, err = p.communicate(input=command_input) rc = p.wait() + if not ignore_errors and rc != expected_rc: - raise AnsibleLookupError(to_text(err)) + raise AnsibleLookupError(str(err)) + return rc, out, err - def _parse_field(self, data_json, field_name, section_title=None): + @abc.abstractmethod + def assert_logged_in(self): + """Check whether a login session exists""" + + @abc.abstractmethod + def full_signin(self): + """Performa full login""" + + @abc.abstractmethod + def get_raw(self, item_id, vault=None, token=None): + """Gets the specified item from the vault""" + + @abc.abstractmethod + def signin(self): + """Sign in using the master password""" + + @property + def path(self): + if self._path is None: + self._path = get_bin_path(self.bin) + + return self._path + + @property + def version(self): + if self._version is None: + self._version = self.get_current_version() + + return self._version + + @classmethod + def get_current_version(cls): + """Standalone method to get the op CLI version. Useful when determining which class to load + based on the current version.""" + try: + bin_path = get_bin_path(cls.bin) + except ValueError: + raise AnsibleLookupError(f"Unable to locate '{cls.bin}' command line tool") + + try: + b_out = subprocess.check_output([bin_path, "--version"], stderr=subprocess.PIPE) + except subprocess.CalledProcessError as cpe: + raise AnsibleLookupError(f"Unable to get the op version: {cpe}") + + return to_text(b_out).strip() + + +class OnePassCLIv1(OnePassCLIBase): + supports_version = "1" + + def _parse_field(self, data_json, field_name, section_title): """ Retrieves the desired field from the `op` response payload @@ -248,36 +266,433 @@ class OnePass(object): # check the details dictionary for `field_name` and return it immediately if it exists # when the entry is a "password" instead of a "login" item, the password field is a key # in the `details` dictionary: - if field_name in data['details']: - return data['details'][field_name] + if field_name in data["details"]: + return data["details"][field_name] # when the field is not found above, iterate through the fields list in the object details - for field_data in data['details'].get('fields', []): - if field_data.get('name', '').lower() == field_name.lower(): - return field_data.get('value', '') - for section_data in data['details'].get('sections', []): - if section_title is not None and section_title.lower() != section_data['title'].lower(): + for field_data in data["details"].get("fields", []): + if field_data.get("name", "").lower() == field_name.lower(): + return field_data.get("value", "") + + for section_data in data["details"].get("sections", []): + if section_title is not None and section_title.lower() != section_data["title"].lower(): continue - for field_data in section_data.get('fields', []): - if field_data.get('t', '').lower() == field_name.lower(): - return field_data.get('v', '') - return '' + + for field_data in section_data.get("fields", []): + if field_data.get("t", "").lower() == field_name.lower(): + return field_data.get("v", "") + + return "" + + def assert_logged_in(self): + args = ["get", "account"] + if self.account_id: + args.extend(["--account", self.account_id]) + elif self.subdomain: + account = f"{self.subdomain}.{self.domain}" + args.extend(["--account", account]) + + rc, out, err = self._run(args, ignore_errors=True) + + return not bool(rc) + + def full_signin(self): + if self.connect_host or self.connect_token: + raise AnsibleLookupError( + "1Password Connect is not available with 1Password CLI version 1. Please use version 2 or later.") + + if self.service_account_token: + raise AnsibleLookupError( + "1Password CLI version 1 does not support Service Accounts. Please use version 2 or later.") + + required_params = [ + "subdomain", + "username", + "secret_key", + "master_password", + ] + self._check_required_params(required_params) + + args = [ + "signin", + f"{self.subdomain}.{self.domain}", + to_bytes(self.username), + to_bytes(self.secret_key), + "--raw", + ] + + return self._run(args, command_input=to_bytes(self.master_password)) + + def get_raw(self, item_id, vault=None, token=None): + args = ["get", "item", item_id] + + if self.account_id: + args.extend(["--account", self.account_id]) + + if vault is not None: + args += [f"--vault={vault}"] + + if token is not None: + args += [to_bytes("--session=") + token] + + return self._run(args) + + def signin(self): + self._check_required_params(['master_password']) + + args = ["signin", "--raw"] + if self.subdomain: + args.append(self.subdomain) + + return self._run(args, command_input=to_bytes(self.master_password)) + + +class OnePassCLIv2(OnePassCLIBase): + """ + CLIv2 Syntax Reference: https://developer.1password.com/docs/cli/upgrade#step-2-update-your-scripts + """ + supports_version = "2" + + def _parse_field(self, data_json, field_name, section_title=None): + """ + Schema reference: https://developer.1password.com/docs/cli/item-template-json + + Example Data: + + # Password item + { + "id": "ywvdbojsguzgrgnokmcxtydgdv", + "title": "Authy Backup", + "version": 1, + "vault": { + "id": "bcqxysvcnejjrwzoqrwzcqjqxc", + "name": "Personal" + }, + "category": "PASSWORD", + "last_edited_by": "7FUPZ8ZNE02KSHMAIMKHIVUE17", + "created_at": "2015-01-18T13:13:38Z", + "updated_at": "2016-02-20T16:23:54Z", + "additional_information": "Jan 18, 2015, 08:13:38", + "fields": [ + { + "id": "password", + "type": "CONCEALED", + "purpose": "PASSWORD", + "label": "password", + "value": "OctoberPoppyNuttyDraperySabbath", + "reference": "op://Personal/Authy Backup/password", + "password_details": { + "strength": "FANTASTIC" + } + }, + { + "id": "notesPlain", + "type": "STRING", + "purpose": "NOTES", + "label": "notesPlain", + "value": "Backup password to restore Authy", + "reference": "op://Personal/Authy Backup/notesPlain" + } + ] + } + + # Login item + { + "id": "awk4s2u44fhnrgppszcsvc663i", + "title": "Dummy Login", + "version": 2, + "vault": { + "id": "stpebbaccrq72xulgouxsk4p7y", + "name": "Personal" + }, + "category": "LOGIN", + "last_edited_by": "LSGPJERUYBH7BFPHMZ2KKGL6AU", + "created_at": "2018-04-25T21:55:19Z", + "updated_at": "2018-04-25T21:56:06Z", + "additional_information": "agent.smith", + "urls": [ + { + "primary": true, + "href": "https://acme.com" + } + ], + "sections": [ + { + "id": "linked items", + "label": "Related Items" + } + ], + "fields": [ + { + "id": "username", + "type": "STRING", + "purpose": "USERNAME", + "label": "username", + "value": "agent.smith", + "reference": "op://Personal/Dummy Login/username" + }, + { + "id": "password", + "type": "CONCEALED", + "purpose": "PASSWORD", + "label": "password", + "value": "Q7vFwTJcqwxKmTU]Dzx7NW*wrNPXmj", + "entropy": 159.6083697084228, + "reference": "op://Personal/Dummy Login/password", + "password_details": { + "entropy": 159, + "generated": true, + "strength": "FANTASTIC" + } + }, + { + "id": "notesPlain", + "type": "STRING", + "purpose": "NOTES", + "label": "notesPlain", + "reference": "op://Personal/Dummy Login/notesPlain" + } + ] + } + """ + data = json.loads(data_json) + field_name = _lower_if_possible(field_name) + for field in data.get("fields", []): + if section_title is None: + # If the field name exists in the section, return that value + if field.get(field_name): + return field.get(field_name) + + # If the field name doesn't exist in the section, match on the value of "label" + # then "id" and return "value" + if field.get("label", "").lower() == field_name: + return field.get("value", "") + + if field.get("id", "").lower() == field_name: + return field.get("value", "") + + # Look at the section data and get an identifier. The value of 'id' is either a unique ID + # or a human-readable string. If a 'label' field exists, prefer that since + # it is the value visible in the 1Password UI when both 'id' and 'label' exist. + section = field.get("section", {}) + section_title = _lower_if_possible(section_title) + + current_section_title = section.get("label", section.get("id", "")).lower() + if section_title == current_section_title: + # In the correct section. Check "label" then "id" for the desired field_name + if field.get("label", "").lower() == field_name: + return field.get("value", "") + + if field.get("id", "").lower() == field_name: + return field.get("value", "") + + return "" + + def assert_logged_in(self): + if self.connect_host and self.connect_token: + return True + + if self.service_account_token: + args = ["whoami"] + environment_update = {"OP_SERVICE_ACCOUNT_TOKEN": self.service_account_token} + rc, out, err = self._run(args, environment_update=environment_update) + + return not bool(rc) + + args = ["account", "list"] + if self.subdomain: + account = f"{self.subdomain}.{self.domain}" + args.extend(["--account", account]) + + rc, out, err = self._run(args) + + if out: + # Running 'op account get' if there are no accounts configured on the system drops into + # an interactive prompt. Only run 'op account get' after first listing accounts to see + # if there are any previously configured accounts. + args = ["account", "get"] + if self.account_id: + args.extend(["--account", self.account_id]) + elif self.subdomain: + account = f"{self.subdomain}.{self.domain}" + args.extend(["--account", account]) + + rc, out, err = self._run(args, ignore_errors=True) + + return not bool(rc) + + return False + + def full_signin(self): + required_params = [ + "subdomain", + "username", + "secret_key", + "master_password", + ] + self._check_required_params(required_params) + + args = [ + "account", "add", "--raw", + "--address", f"{self.subdomain}.{self.domain}", + "--email", to_bytes(self.username), + "--signin", + ] + + environment_update = {"OP_SECRET_KEY": self.secret_key} + return self._run(args, command_input=to_bytes(self.master_password), environment_update=environment_update) + + def _add_parameters_and_run(self, args, vault=None, token=None): + if self.account_id: + args.extend(["--account", self.account_id]) + + if vault is not None: + args += [f"--vault={vault}"] + + if self.connect_host and self.connect_token: + if vault is None: + raise AnsibleLookupError("'vault' is required with 1Password Connect") + environment_update = { + "OP_CONNECT_HOST": self.connect_host, + "OP_CONNECT_TOKEN": self.connect_token, + } + return self._run(args, environment_update=environment_update) + + if self.service_account_token: + if vault is None: + raise AnsibleLookupError("'vault' is required with 'service_account_token'") + environment_update = {"OP_SERVICE_ACCOUNT_TOKEN": self.service_account_token} + return self._run(args, environment_update=environment_update) + + if token is not None: + args += [to_bytes("--session=") + token] + + return self._run(args) + + def get_raw(self, item_id, vault=None, token=None): + args = ["item", "get", item_id, "--format", "json"] + return self._add_parameters_and_run(args, vault=vault, token=token) + + def signin(self): + self._check_required_params(['master_password']) + + args = ["signin", "--raw"] + if self.subdomain: + args.extend(["--account", self.subdomain]) + + return self._run(args, command_input=to_bytes(self.master_password)) + + +class OnePass(object): + def __init__(self, subdomain=None, domain="1password.com", username=None, secret_key=None, master_password=None, + service_account_token=None, account_id=None, connect_host=None, connect_token=None, cli_class=None): + self.subdomain = subdomain + self.domain = domain + self.username = username + self.secret_key = secret_key + self.master_password = master_password + self.service_account_token = service_account_token + self.account_id = account_id + self.connect_host = connect_host + self.connect_token = connect_token + + self.logged_in = False + self.token = None + + self._config = OnePasswordConfig() + self._cli = self._get_cli_class(cli_class) + + if (self.connect_host or self.connect_token) and None in (self.connect_host, self.connect_token): + raise AnsibleOptionsError("connect_host and connect_token are required together") + + def _get_cli_class(self, cli_class=None): + if cli_class is not None: + return cli_class(self.subdomain, self.domain, self.username, self.secret_key, self.master_password, self.service_account_token) + + version = OnePassCLIBase.get_current_version() + for cls in OnePassCLIBase.__subclasses__(): + if cls.supports_version == version.split(".")[0]: + try: + return cls(self.subdomain, self.domain, self.username, self.secret_key, self.master_password, self.service_account_token, + self.account_id, self.connect_host, self.connect_token) + except TypeError as e: + raise AnsibleLookupError(e) + + raise AnsibleLookupError(f"op version {version} is unsupported") + + def set_token(self): + if self._config.config_file_path and os.path.isfile(self._config.config_file_path): + # If the config file exists, assume an initial sign in has taken place and try basic sign in + try: + rc, out, err = self._cli.signin() + except AnsibleLookupError as exc: + test_strings = ( + "missing required parameters", + "unauthorized", + ) + if any(string in exc.message.lower() for string in test_strings): + # A required parameter is missing, or a bad master password was supplied + # so don't bother attempting a full signin + raise + + rc, out, err = self._cli.full_signin() + + self.token = out.strip() + + else: + # Attempt a full signin since there appears to be no existing signin + rc, out, err = self._cli.full_signin() + self.token = out.strip() + + def assert_logged_in(self): + logged_in = self._cli.assert_logged_in() + if logged_in: + self.logged_in = logged_in + pass + else: + self.set_token() + + def get_raw(self, item_id, vault=None): + rc, out, err = self._cli.get_raw(item_id, vault, self.token) + return out + + def get_field(self, item_id, field, section=None, vault=None): + output = self.get_raw(item_id, vault) + if output: + return self._cli._parse_field(output, field, section) + + return "" class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): - op = OnePass() + self.set_options(var_options=variables, direct=kwargs) - field = kwargs.get('field', 'password') - section = kwargs.get('section') - vault = kwargs.get('vault') - op.subdomain = kwargs.get('subdomain') - op.domain = kwargs.get('domain', '1password.com') - op.username = kwargs.get('username') - op.secret_key = kwargs.get('secret_key') - op.master_password = kwargs.get('master_password', kwargs.get('vault_password')) + field = self.get_option("field") + section = self.get_option("section") + vault = self.get_option("vault") + subdomain = self.get_option("subdomain") + domain = self.get_option("domain") + username = self.get_option("username") + secret_key = self.get_option("secret_key") + master_password = self.get_option("master_password") + service_account_token = self.get_option("service_account_token") + account_id = self.get_option("account_id") + connect_host = self.get_option("connect_host") + connect_token = self.get_option("connect_token") + op = OnePass( + subdomain=subdomain, + domain=domain, + username=username, + secret_key=secret_key, + master_password=master_password, + service_account_token=service_account_token, + account_id=account_id, + connect_host=connect_host, + connect_token=connect_token, + ) op.assert_logged_in() values = [] diff --git a/plugins/lookup/onepassword_doc.py b/plugins/lookup/onepassword_doc.py new file mode 100644 index 0000000000..e62db6d1e2 --- /dev/null +++ b/plugins/lookup/onepassword_doc.py @@ -0,0 +1,89 @@ +# Copyright (c) 2023, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: onepassword_doc +author: + - Sam Doran (@samdoran) +requirements: + - C(op) 1Password command line utility version 2 or later. +short_description: Fetch documents stored in 1Password +version_added: "8.1.0" +description: + - P(community.general.onepassword_doc#lookup) wraps C(op) command line utility to fetch one or more documents from 1Password. +notes: + - The document contents are a string exactly as stored in 1Password. + - This plugin requires C(op) version 2 or later. +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string + +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" + +EXAMPLES = r""" +--- +- name: Retrieve a private key from 1Password + ansible.builtin.debug: + var: lookup('community.general.onepassword_doc', 'Private key') +""" + +RETURN = r""" +_raw: + description: Requested document. + type: list + elements: string +""" + +from ansible_collections.community.general.plugins.lookup.onepassword import OnePass, OnePassCLIv2 +from ansible.plugins.lookup import LookupBase + + +class OnePassCLIv2Doc(OnePassCLIv2): + def get_raw(self, item_id, vault=None, token=None): + args = ["document", "get", item_id] + return self._add_parameters_and_run(args, vault=vault, token=token) + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + + vault = self.get_option("vault") + subdomain = self.get_option("subdomain") + domain = self.get_option("domain", "1password.com") + username = self.get_option("username") + secret_key = self.get_option("secret_key") + master_password = self.get_option("master_password") + service_account_token = self.get_option("service_account_token") + account_id = self.get_option("account_id") + connect_host = self.get_option("connect_host") + connect_token = self.get_option("connect_token") + + op = OnePass( + subdomain=subdomain, + domain=domain, + username=username, + secret_key=secret_key, + master_password=master_password, + service_account_token=service_account_token, + account_id=account_id, + connect_host=connect_host, + connect_token=connect_token, + cli_class=OnePassCLIv2Doc, + ) + op.assert_logged_in() + + values = [] + for term in terms: + values.append(op.get_raw(term, vault)) + + return values diff --git a/plugins/lookup/onepassword_raw.py b/plugins/lookup/onepassword_raw.py index d1958f78cd..b75be3d630 100644 --- a/plugins/lookup/onepassword_raw.py +++ b/plugins/lookup/onepassword_raw.py @@ -1,55 +1,41 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Scott Buchanan -# Copyright: (c) 2016, Andrew Zenk (lastpass.py used as starting point) -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Scott Buchanan +# Copyright (c) 2016, Andrew Zenk (lastpass.py used as starting point) +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: onepassword_raw - author: - - Scott Buchanan (@scottsb) - - Andrew Zenk (@azenk) - - Sam Doran (@samdoran) - requirements: - - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) - short_description: fetch an entire item from 1Password - description: - - C(onepassword_raw) wraps C(op) command line utility to fetch an entire item from 1Password - options: - _terms: - description: identifier(s) (UUID, name, or domain; case-insensitive) of item(s) to retrieve. - required: True - master_password: - description: The password used to unlock the specified vault. - aliases: ['vault_password'] - section: - description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section. - subdomain: - description: The 1Password subdomain to authenticate against. - username: - description: The username used to sign in. - secret_key: - description: The secret key used when performing an initial sign in. - vault: - description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults. - notes: - - This lookup will use an existing 1Password session if one exists. If not, and you have already - performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required. - You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op). - - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password). - - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials - needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength - to the 1Password master password. - - This lookup stores potentially sensitive data from 1Password as Ansible facts. - Facts are subject to caching if enabled, which means this data could be stored in clear text - on disk or in a database. - - Tested with C(op) version 0.5.3 -''' +DOCUMENTATION = r""" +name: onepassword_raw +author: + - Scott Buchanan (@scottsb) + - Andrew Zenk (@azenk) + - Sam Doran (@samdoran) +requirements: + - C(op) 1Password command line utility +short_description: Fetch an entire item from 1Password +description: + - P(community.general.onepassword_raw#lookup) wraps C(op) command line utility to fetch an entire item from 1Password. +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string + account_id: + version_added: 7.5.0 + domain: + version_added: 6.0.0 + service_account_token: + version_added: 7.1.0 +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" -EXAMPLES = """ +EXAMPLES = r""" +--- - name: Retrieve all data about Wintermute ansible.builtin.debug: var: lookup('community.general.onepassword_raw', 'Wintermute') @@ -59,11 +45,11 @@ EXAMPLES = """ var: lookup('community.general.onepassword_raw', 'Wintermute', subdomain='Turing', vault_password='DmbslfLvasjdl') """ -RETURN = """ - _raw: - description: field data requested - type: list - elements: dict +RETURN = r""" +_raw: + description: Entire item requested. + type: list + elements: dict """ import json @@ -75,18 +61,35 @@ from ansible.plugins.lookup import LookupBase class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): - op = OnePass() + self.set_options(var_options=variables, direct=kwargs) - vault = kwargs.get('vault') - op.subdomain = kwargs.get('subdomain') - op.username = kwargs.get('username') - op.secret_key = kwargs.get('secret_key') - op.master_password = kwargs.get('master_password', kwargs.get('vault_password')) + vault = self.get_option("vault") + subdomain = self.get_option("subdomain") + domain = self.get_option("domain", "1password.com") + username = self.get_option("username") + secret_key = self.get_option("secret_key") + master_password = self.get_option("master_password") + service_account_token = self.get_option("service_account_token") + account_id = self.get_option("account_id") + connect_host = self.get_option("connect_host") + connect_token = self.get_option("connect_token") + op = OnePass( + subdomain=subdomain, + domain=domain, + username=username, + secret_key=secret_key, + master_password=master_password, + service_account_token=service_account_token, + account_id=account_id, + connect_host=connect_host, + connect_token=connect_token, + ) op.assert_logged_in() values = [] for term in terms: data = json.loads(op.get_raw(term, vault)) values.append(data) + return values diff --git a/plugins/lookup/onepassword_ssh_key.py b/plugins/lookup/onepassword_ssh_key.py new file mode 100644 index 0000000000..35e3034e04 --- /dev/null +++ b/plugins/lookup/onepassword_ssh_key.py @@ -0,0 +1,118 @@ +# Copyright (c) 2025, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: onepassword_ssh_key +author: + - Mohammed Babelly (@mohammedbabelly20) +requirements: + - C(op) 1Password command line utility version 2 or later. +short_description: Fetch SSH keys stored in 1Password +version_added: "10.3.0" +description: + - P(community.general.onepassword_ssh_key#lookup) wraps C(op) command line utility to fetch SSH keys from 1Password. +notes: + - By default, it returns the private key value in PKCS#8 format, unless O(ssh_format=true) is passed. + - The pluging works only for C(SSHKEY) type items. + - This plugin requires C(op) version 2 or later. +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string + ssh_format: + description: Output key in SSH format if V(true). Otherwise, outputs in the default format (PKCS#8). + default: false + type: bool + +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" + +EXAMPLES = r""" +--- +- name: Retrieve the private SSH key from 1Password + ansible.builtin.debug: + msg: "{{ lookup('community.general.onepassword_ssh_key', 'SSH Key', ssh_format=true) }}" +""" + +RETURN = r""" +_raw: + description: Private key of SSH keypair. + type: list + elements: string +""" +import json + +from ansible_collections.community.general.plugins.lookup.onepassword import ( + OnePass, + OnePassCLIv2, +) +from ansible.errors import AnsibleLookupError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + def get_ssh_key(self, out, item_id, ssh_format=False): + data = json.loads(out) + + if data.get("category") != "SSH_KEY": + raise AnsibleLookupError(f"Item {item_id} is not an SSH key") + + private_key_field = next( + ( + field + for field in data.get("fields", {}) + if field.get("id") == "private_key" and field.get("type") == "SSHKEY" + ), + None, + ) + if not private_key_field: + raise AnsibleLookupError(f"No private key found for item {item_id}.") + + if ssh_format: + return ( + private_key_field.get("ssh_formats", {}) + .get("openssh", {}) + .get("value", "") + ) + return private_key_field.get("value", "") + + def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + + ssh_format = self.get_option("ssh_format") + vault = self.get_option("vault") + subdomain = self.get_option("subdomain") + domain = self.get_option("domain", "1password.com") + username = self.get_option("username") + secret_key = self.get_option("secret_key") + master_password = self.get_option("master_password") + service_account_token = self.get_option("service_account_token") + account_id = self.get_option("account_id") + connect_host = self.get_option("connect_host") + connect_token = self.get_option("connect_token") + + op = OnePass( + subdomain=subdomain, + domain=domain, + username=username, + secret_key=secret_key, + master_password=master_password, + service_account_token=service_account_token, + account_id=account_id, + connect_host=connect_host, + connect_token=connect_token, + cli_class=OnePassCLIv2, + ) + op.assert_logged_in() + + return [ + self.get_ssh_key(op.get_raw(term, vault), term, ssh_format=ssh_format) + for term in terms + ] diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index a221e49625..31305d81bb 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -1,119 +1,172 @@ -# -*- coding: utf-8 -*- -# (c) 2017, Patrick Deelman -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2017, Patrick Deelman +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: passwordstore - author: - - Patrick Deelman (!UNKNOWN) - short_description: manage passwords with passwordstore.org's pass utility +DOCUMENTATION = r""" +name: passwordstore +author: + - Patrick Deelman (!UNKNOWN) +short_description: Manage passwords with passwordstore.org's pass utility +description: + - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility. It can also retrieve, + create or update YAML style keys stored as multilines in the passwordfile. + - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to C(~/.gnupg/gpg-agent.conf). Where + this is not possible, consider using O(lock=readwrite) instead. +options: + _terms: + description: Query key. + required: true + directory: description: - - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility. - It also retrieves YAML style keys stored as multilines in the passwordfile. - - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to - C(~/.gnupg/gpg-agent.conf). Where this is not possible, consider using I(lock=readwrite) instead. - options: - _terms: - description: query key. - required: True - passwordstore: - description: location of the password store. - default: '~/.password-store' - directory: - description: The directory of the password store. - env: - - name: PASSWORD_STORE_DIR - create: - description: Create the password if it does not already exist. Takes precedence over C(missing). - type: bool - default: false - overwrite: - description: Overwrite the password if it does already exist. - type: bool - default: 'no' - umask: - description: - - Sets the umask for the created .gpg files. The first octed must be greater than 3 (user readable). - - Note pass' default value is C('077'). - env: - - name: PASSWORD_STORE_UMASK - version_added: 1.3.0 - returnall: - description: Return all the content of the password, not only the first line. - type: bool - default: 'no' - subkey: - description: Return a specific subkey of the password. When set to C(password), always returns the first line. - default: password - userpass: - description: Specify a password to save, instead of a generated one. - length: - description: The length of the generated password. - type: integer - default: 16 - backup: - description: Used with C(overwrite=yes). Backup the previous password in a subkey. - type: bool - default: 'no' - nosymbols: - description: use alphanumeric characters. - type: bool - default: 'no' - missing: - description: - - List of preference about what to do if the password file is missing. - - If I(create=true), the value for this option is ignored and assumed to be C(create). - - If set to C(error), the lookup will error out if the passname does not exist. - - If set to C(create), the passname will be created with the provided length I(length) if it does not exist. - - If set to C(empty) or C(warn), will return a C(none) in case the passname does not exist. - When using C(lookup) and not C(query), this will be translated to an empty string. - version_added: 3.1.0 - type: str - default: error - choices: - - error - - warn - - empty - - create - lock: - description: - - How to synchronize operations. - - The default of C(write) only synchronizes write operations. - - C(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel. - - C(none) does not do any synchronization. - ini: - - section: passwordstore_lookup - key: lock - type: str - default: write - choices: - - readwrite - - write - - none - version_added: 4.5.0 - locktimeout: - description: - - Lock timeout applied when I(lock) is not C(none). - - Time with a unit suffix, C(s), C(m), C(h) for seconds, minutes, and hours, respectively. For example, C(900s) equals C(15m). - - Correlates with C(pinentry-timeout) in C(~/.gnupg/gpg-agent.conf), see C(man gpg-agent) for details. - ini: - - section: passwordstore_lookup - key: locktimeout - type: str - default: 15m - version_added: 4.5.0 -''' -EXAMPLES = """ + - The directory of the password store. + - If O(backend=pass), the default is V(~/.password-store) is used. + - If O(backend=gopass), then the default is the C(path) field in C(~/.config/gopass/config.yml), falling back to V(~/.local/share/gopass/stores/root) + if C(path) is not defined in the gopass config. + type: path + vars: + - name: passwordstore + env: + - name: PASSWORD_STORE_DIR + create: + description: Create the password or the subkey if it does not already exist. Takes precedence over O(missing). + type: bool + default: false + overwrite: + description: Overwrite the password or the subkey if it does already exist. + type: bool + default: false + umask: + description: + - Sets the umask for the created V(.gpg) files. The first octed must be greater than 3 (user readable). + - Note pass' default value is V('077'). + type: string + env: + - name: PASSWORD_STORE_UMASK + version_added: 1.3.0 + returnall: + description: Return all the content of the password, not only the first line. + type: bool + default: false + subkey: + description: + - By default return a specific subkey of the password. When set to V(password), always returns the first line. + - With O(overwrite=true), it creates the subkey and returns it. + type: str + default: password + userpass: + description: Specify a password to save, instead of a generated one. + type: str + length: + description: The length of the generated password. + type: integer + default: 16 + backup: + description: Used with O(overwrite=true). Backup the previous password or subkey in a subkey. + type: bool + default: false + nosymbols: + description: Use alphanumeric characters. + type: bool + default: false + missing: + description: + - List of preference about what to do if the password file is missing. + - If O(create=true), the value for this option is ignored and assumed to be V(create). + - If set to V(error), the lookup fails out if the passname does not exist. + - If set to V(create), the passname is created with the provided length O(length) if it does not exist. + - If set to V(empty) or V(warn), it returns a V(none) in case the passname does not exist. When using C(lookup) and + not C(query), this is translated to an empty string. + version_added: 3.1.0 + type: str + default: error + choices: + - error + - warn + - empty + - create + lock: + description: + - How to synchronize operations. + - The default of V(write) only synchronizes write operations. + - V(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel. + - V(none) does not do any synchronization. + ini: + - section: passwordstore_lookup + key: lock + type: str + default: write + choices: + - readwrite + - write + - none + version_added: 4.5.0 + locktimeout: + description: + - Lock timeout applied when O(lock) is not V(none). + - Time with a unit suffix, V(s), V(m), V(h) for seconds, minutes, and hours, respectively. For example, V(900s) equals + V(15m). + - Correlates with C(pinentry-timeout) in C(~/.gnupg/gpg-agent.conf), see C(man gpg-agent) for details. + ini: + - section: passwordstore_lookup + key: locktimeout + type: str + default: 15m + version_added: 4.5.0 + backend: + description: + - Specify which backend to use. + - Defaults to V(pass), passwordstore.org's original pass utility. + - V(gopass) support is incomplete. + ini: + - section: passwordstore_lookup + key: backend + vars: + - name: passwordstore_backend + type: str + default: pass + choices: + - pass + - gopass + version_added: 5.2.0 + timestamp: + description: Add the password generation information to the end of the file. + type: bool + default: true + version_added: 8.1.0 + preserve: + description: Include the old (edited) password inside the pass file. + type: bool + default: true + version_added: 8.1.0 + missing_subkey: + description: + - Preference about what to do if the password subkey is missing. + - If set to V(error), the lookup fails out if the subkey does not exist. + - If set to V(empty) or V(warn), it returns a V(none) in case the subkey does not exist. + version_added: 8.6.0 + type: str + default: empty + choices: + - error + - warn + - empty + ini: + - section: passwordstore_lookup + key: missing_subkey +notes: + - The lookup supports passing all options as lookup parameters since community.general 6.0.0. +""" +EXAMPLES = r""" ansible.cfg: | [passwordstore_lookup] lock=readwrite locktimeout=45s + missing_subkey=warn -playbook.yml: | +tasks.yml: |- --- # Debug is used for examples, BAD IDEA to show passwords on screen @@ -123,51 +176,66 @@ playbook.yml: | - name: Basic lookup. Warns if example/test does not exist and returns empty string ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test missing=warn')}}" + msg: "{{ lookup('community.general.passwordstore', 'example/test', missing='warn')}}" - name: Create pass with random 16 character password. If password exists just give the password ansible.builtin.debug: var: mypassword vars: - mypassword: "{{ lookup('community.general.passwordstore', 'example/test create=true')}}" + mypassword: "{{ lookup('community.general.passwordstore', 'example/test', create=true)}}" - name: Create pass with random 16 character password. If password exists just give the password ansible.builtin.debug: var: mypassword vars: - mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=create')}}" + mypassword: "{{ lookup('community.general.passwordstore', 'example/test', missing='create')}}" + + - name: >- + Create a random 16 character password in a subkey. If the password file already exists, just add the subkey in it. + If the subkey exists, returns it + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, subkey='foo') }}" + + - name: >- + Create a random 16 character password in a subkey. Overwrite if it already exists and backup the old one. + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, subkey='user', overwrite=true, backup=true) }}" - name: Prints 'abc' if example/test does not exist, just give the password otherwise ansible.builtin.debug: var: mypassword vars: - mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=empty') | default('abc', true) }}" + mypassword: >- + {{ lookup('community.general.passwordstore', 'example/test', missing='empty') + | default('abc', true) }} - name: Different size password ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test create=true length=42')}}" + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, length=42)}}" - - name: Create password and overwrite the password if it exists. As a bonus, this module includes the old password inside the pass file + - name: >- + Create password and overwrite the password if it exists. + As a bonus, this module includes the old password inside the pass file ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test create=true overwrite=true')}}" + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, overwrite=true)}}" - name: Create an alphanumeric password ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test create=true nosymbols=true') }}" + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, nosymbols=true) }}" - name: Return the value for user in the KV pair user, username ansible.builtin.debug: - msg: "{{ lookup('community.general.passwordstore', 'example/test subkey=user')}}" + msg: "{{ lookup('community.general.passwordstore', 'example/test', subkey='user')}}" - name: Return the entire password file content ansible.builtin.set_fact: - passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test returnall=true')}}" + passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test', returnall=true)}}" """ -RETURN = """ +RETURN = r""" _raw: description: - - a password + - A password. type: list elements: str """ @@ -180,7 +248,6 @@ import time import yaml from ansible.errors import AnsibleError, AnsibleAssertionError -from ansible.module_utils.common.file import FileLock from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.module_utils.parsing.convert_bool import boolean from ansible.utils.display import Display @@ -188,6 +255,8 @@ from ansible.utils.encrypt import random_password from ansible.plugins.lookup import LookupBase from ansible import constants as C +from ansible_collections.community.general.plugins.module_utils._filelock import FileLock + display = Display() @@ -231,6 +300,24 @@ def check_output2(*popenargs, **kwargs): class LookupModule(LookupBase): + def __init__(self, loader=None, templar=None, **kwargs): + + super(LookupModule, self).__init__(loader, templar, **kwargs) + self.realpass = None + + def is_real_pass(self): + if self.realpass is None: + try: + passoutput = to_text( + check_output2([self.pass_cmd, "--version"], env=self.env), + errors='surrogate_or_strict' + ) + self.realpass = 'pass: the standard unix password manager' in passoutput + except (subprocess.CalledProcessError) as e: + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') + + return self.realpass + def parse_params(self, term): # I went with the "traditional" param followed with space separated KV pairs. # Waiting for final implementation of lookup parameter parsing. @@ -244,7 +331,7 @@ class LookupModule(LookupBase): for param in params[1:]: name, value = param.split('=', 1) if name not in self.paramvals: - raise AnsibleAssertionError('%s not in paramvals' % name) + raise AnsibleAssertionError(f'{name} not in paramvals') self.paramvals[name] = value except (ValueError, AssertionError) as e: raise AnsibleError(e) @@ -256,12 +343,12 @@ class LookupModule(LookupBase): except (ValueError, AssertionError) as e: raise AnsibleError(e) if self.paramvals['missing'] not in ['error', 'warn', 'create', 'empty']: - raise AnsibleError("{0} is not a valid option for missing".format(self.paramvals['missing'])) + raise AnsibleError(f"{self.paramvals['missing']} is not a valid option for missing") if not isinstance(self.paramvals['length'], int): if self.paramvals['length'].isdigit(): self.paramvals['length'] = int(self.paramvals['length']) else: - raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length'])) + raise AnsibleError(f"{self.paramvals['length']} is not a correct value for length") if self.paramvals['create']: self.paramvals['missing'] = 'create' @@ -270,14 +357,16 @@ class LookupModule(LookupBase): self.env = os.environ.copy() self.env['LANGUAGE'] = 'C' # make sure to get errors in English as required by check_output2 - # Set PASSWORD_STORE_DIR - if os.path.isdir(self.paramvals['directory']): + if self.backend == 'gopass': + self.env['GOPASS_NO_REMINDER'] = "YES" + elif os.path.isdir(self.paramvals['directory']): + # Set PASSWORD_STORE_DIR self.env['PASSWORD_STORE_DIR'] = self.paramvals['directory'] - else: - raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory'])) + elif self.is_real_pass(): + raise AnsibleError(f"Passwordstore directory '{self.paramvals['directory']}' does not exist") # Set PASSWORD_STORE_UMASK if umask is set - if 'umask' in self.paramvals: + if self.paramvals.get('umask') is not None: if len(self.paramvals['umask']) != 3: raise AnsibleError('Passwordstore umask must have a length of 3.') elif int(self.paramvals['umask'][0]) > 3: @@ -288,7 +377,8 @@ class LookupModule(LookupBase): def check_pass(self): try: self.passoutput = to_text( - check_output2(["pass", "show", self.passname], env=self.env), + check_output2([self.pass_cmd, 'show'] + + [self.passname], env=self.env), errors='surrogate_or_strict' ).splitlines() self.password = self.passoutput[0] @@ -302,18 +392,20 @@ class LookupModule(LookupBase): if ':' in line: name, value = line.split(':', 1) self.passdict[name.strip()] = value.strip() - if os.path.isfile(os.path.join(self.paramvals['directory'], self.passname + ".gpg")): - # Only accept password as found, if there a .gpg file for it (might be a tree node otherwise) + if (self.backend == 'gopass' or + os.path.isfile(os.path.join(self.paramvals['directory'], f"{self.passname}.gpg")) + or not self.is_real_pass()): + # When using real pass, only accept password as found if there is a .gpg file for it (might be a tree node otherwise) return True except (subprocess.CalledProcessError) as e: # 'not in password store' is the expected error if a password wasn't found if 'not in the password store' not in e.output: - raise AnsibleError(e) + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') if self.paramvals['missing'] == 'error': - raise AnsibleError('passwordstore: passname {0} not found and missing=error is set'.format(self.passname)) + raise AnsibleError(f'passwordstore: passname {self.passname} not found and missing=error is set') elif self.paramvals['missing'] == 'warn': - display.warning('passwordstore: passname {0} not found'.format(self.passname)) + display.warning(f'passwordstore: passname {self.passname} not found') return False @@ -331,17 +423,51 @@ class LookupModule(LookupBase): def update_password(self): # generate new password, insert old lines from current result and return new password + # if the target is a subkey, only modify the subkey newpass = self.get_newpass() datetime = time.strftime("%d/%m/%Y %H:%M:%S") - msg = newpass + '\n' - if self.passoutput[1:]: - msg += '\n'.join(self.passoutput[1:]) + '\n' - if self.paramvals['backup']: - msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime) + subkey = self.paramvals["subkey"] + + if subkey != "password": + + msg_lines = [] + subkey_exists = False + subkey_line = f"{subkey}: {newpass}" + oldpass = None + + for line in self.passoutput: + if line.startswith(f"{subkey}: "): + oldpass = self.passdict[subkey] + line = subkey_line + subkey_exists = True + + msg_lines.append(line) + + if not subkey_exists: + msg_lines.insert(2, subkey_line) + + if self.paramvals["timestamp"] and self.paramvals["backup"] and oldpass and oldpass != newpass: + msg_lines.append( + f"lookup_pass: old subkey '{subkey}' password was {oldpass} (Updated on {datetime})\n" + ) + + msg = os.linesep.join(msg_lines) + + else: + msg = newpass + + if self.paramvals['preserve'] or self.paramvals['timestamp']: + msg += '\n' + if self.paramvals['preserve'] and self.passoutput[1:]: + msg += '\n'.join(self.passoutput[1:]) + msg += '\n' + if self.paramvals['timestamp'] and self.paramvals['backup']: + msg += f"lookup_pass: old password was {self.password} (Updated on {datetime})\n" + try: - check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg, env=self.env) + check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env) except (subprocess.CalledProcessError) as e: - raise AnsibleError(e) + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') return newpass def generate_password(self): @@ -349,11 +475,21 @@ class LookupModule(LookupBase): # use pwgen to generate the password and insert values with pass -m newpass = self.get_newpass() datetime = time.strftime("%d/%m/%Y %H:%M:%S") - msg = newpass + '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime) + subkey = self.paramvals["subkey"] + + if subkey != "password": + msg = f"\n\n{subkey}: {newpass}" + else: + msg = newpass + + if self.paramvals['timestamp']: + msg += f"\nlookup_pass: First generated by ansible on {datetime}\n" + try: - check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg, env=self.env) + check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env) except (subprocess.CalledProcessError) as e: - raise AnsibleError(e) + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') + return newpass def get_passresult(self): @@ -365,13 +501,24 @@ class LookupModule(LookupBase): if self.paramvals['subkey'] in self.passdict: return self.passdict[self.paramvals['subkey']] else: + if self.paramvals["missing_subkey"] == "error": + raise AnsibleError( + f"passwordstore: subkey {self.paramvals['subkey']} for passname {self.passname} not found and missing_subkey=error is set" + ) + + if self.paramvals["missing_subkey"] == "warn": + display.warning( + f"passwordstore: subkey {self.paramvals['subkey']} for passname {self.passname} not found" + ) + return None @contextmanager def opt_lock(self, type): if self.get_option('lock') == type: tmpdir = os.environ.get('TMPDIR', '/tmp') - lockfile = os.path.join(tmpdir, '.passwordstore.lock') + user = os.environ.get('USER') + lockfile = os.path.join(tmpdir, f'.{user}.passwordstore.lock') with FileLock().lock_file(lockfile, tmpdir, self.lock_timeout): self.locked = type yield @@ -380,41 +527,65 @@ class LookupModule(LookupBase): yield def setup(self, variables): + self.backend = self.get_option('backend') + self.pass_cmd = self.backend # pass and gopass are commands as well self.locked = None timeout = self.get_option('locktimeout') if not re.match('^[0-9]+[smh]$', timeout): - raise AnsibleError("{0} is not a correct value for locktimeout".format(timeout)) + raise AnsibleError(f"{timeout} is not a correct value for locktimeout") unit_to_seconds = {"s": 1, "m": 60, "h": 3600} self.lock_timeout = int(timeout[:-1]) * unit_to_seconds[timeout[-1]] + + directory = self.get_option('directory') + if directory is None: + if self.backend == 'gopass': + try: + with open(os.path.expanduser('~/.config/gopass/config.yml')) as f: + directory = yaml.safe_load(f)['path'] + except (FileNotFoundError, KeyError, yaml.YAMLError): + directory = os.path.expanduser('~/.local/share/gopass/stores/root') + else: + directory = os.path.expanduser('~/.password-store') + self.paramvals = { - 'subkey': 'password', - 'directory': variables.get('passwordstore', os.environ.get( - 'PASSWORD_STORE_DIR', - os.path.expanduser('~/.password-store'))), - 'create': False, - 'returnall': False, - 'overwrite': False, - 'nosymbols': False, - 'userpass': '', - 'length': 16, - 'backup': False, - 'missing': 'error', + 'subkey': self.get_option('subkey'), + 'directory': directory, + 'create': self.get_option('create'), + 'returnall': self.get_option('returnall'), + 'overwrite': self.get_option('overwrite'), + 'nosymbols': self.get_option('nosymbols'), + 'userpass': self.get_option('userpass') or '', + 'length': self.get_option('length'), + 'backup': self.get_option('backup'), + 'missing': self.get_option('missing'), + 'umask': self.get_option('umask'), + 'timestamp': self.get_option('timestamp'), + 'preserve': self.get_option('preserve'), + "missing_subkey": self.get_option("missing_subkey"), } def run(self, terms, variables, **kwargs): + self.set_options(var_options=variables, direct=kwargs) self.setup(variables) result = [] for term in terms: self.parse_params(term) # parse the input into paramvals with self.opt_lock('readwrite'): - if self.check_pass(): # password exists - if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password': + if self.check_pass(): # password file exists + if self.paramvals['overwrite']: # if "overwrite", always update password + with self.opt_lock('write'): + result.append(self.update_password()) + elif ( + self.paramvals["subkey"] != "password" + and not self.passdict.get(self.paramvals["subkey"]) + and self.paramvals["missing"] == "create" + ): # target is a subkey, this subkey is not in passdict BUT missing == create with self.opt_lock('write'): result.append(self.update_password()) else: result.append(self.get_passresult()) - else: # password does not exist + else: # password does not exist if self.paramvals['missing'] == 'create': with self.opt_lock('write'): if self.locked == 'write' and self.check_pass(): # lookup password again if under write lock diff --git a/plugins/lookup/random_pet.py b/plugins/lookup/random_pet.py index 6caf178e4b..0ab3ee29d3 100644 --- a/plugins/lookup/random_pet.py +++ b/plugins/lookup/random_pet.py @@ -1,44 +1,43 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Abhijeet Kasurde -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Abhijeet Kasurde +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' - name: random_pet - author: - - Abhijeet Kasurde (@Akasurde) - short_description: Generates random pet names - version_added: '3.1.0' - requirements: - - petname U(https://github.com/dustinkirkland/python-petname) +DOCUMENTATION = r""" +name: random_pet +author: + - Abhijeet Kasurde (@Akasurde) +short_description: Generates random pet names +version_added: '3.1.0' +requirements: + - petname U(https://github.com/dustinkirkland/python-petname) +description: + - Generates random pet names that can be used as unique identifiers for the resources. +options: + words: description: - - Generates random pet names that can be used as unique identifiers for the resources. - options: - words: - description: - - The number of words in the pet name. - default: 2 - type: int - length: - description: - - The maximal length of every component of the pet name. - - Values below 3 will be set to 3 by petname. - default: 6 - type: int - prefix: - description: A string to prefix with the name. - type: str - separator: - description: The character to separate words in the pet name. - default: "-" - type: str -''' + - The number of words in the pet name. + default: 2 + type: int + length: + description: + - The maximal length of every component of the pet name. + - Values below V(3) are set to V(3) by petname. + default: 6 + type: int + prefix: + description: A string to prefix with the name. + type: str + separator: + description: The character to separate words in the pet name. + default: "-" + type: str +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Generate pet name ansible.builtin.debug: var: lookup('community.general.random_pet') @@ -58,14 +57,14 @@ EXAMPLES = r''' ansible.builtin.debug: var: lookup('community.general.random_pet', length=7) # Example result: 'natural-peacock' -''' +""" -RETURN = r''' - _raw: - description: A one-element list containing a random pet name - type: list - elements: str -''' +RETURN = r""" +_raw: + description: A one-element list containing a random pet name. + type: list + elements: str +""" try: import petname @@ -94,6 +93,6 @@ class LookupModule(LookupBase): values = petname.Generate(words=words, separator=separator, letters=length) if prefix: - values = "%s%s%s" % (prefix, separator, values) + values = f"{prefix}{separator}{values}" return [values] diff --git a/plugins/lookup/random_string.py b/plugins/lookup/random_string.py index d67a75ed99..027a587ad8 100644 --- a/plugins/lookup/random_string.py +++ b/plugins/lookup/random_string.py @@ -1,124 +1,157 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Abhijeet Kasurde -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Abhijeet Kasurde +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" - name: random_string - author: - - Abhijeet Kasurde (@Akasurde) - short_description: Generates random string - version_added: '3.2.0' +name: random_string +author: + - Abhijeet Kasurde (@Akasurde) +short_description: Generates random string +version_added: '3.2.0' +description: + - Generates random string based upon the given constraints. + - Uses L(secrets.SystemRandom,https://docs.python.org/3/library/secrets.html#secrets.SystemRandom), so should be strong enough + for cryptographic purposes. +options: + length: + description: The length of the string. + default: 8 + type: int + upper: description: - - Generates random string based upon the given constraints. - options: - length: - description: The length of the string. - default: 8 - type: int - upper: - description: - - Include uppercase letters in the string. - default: true - type: bool - lower: - description: - - Include lowercase letters in the string. - default: true - type: bool - numbers: - description: - - Include numbers in the string. - default: true - type: bool - special: - description: - - Include special characters in the string. - - Special characters are taken from Python standard library C(string). - See L(the documentation of string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation) - for which characters will be used. - - The choice of special characters can be changed to setting I(override_special). - default: true - type: bool - min_numeric: - description: - - Minimum number of numeric characters in the string. - - If set, overrides I(numbers=false). - default: 0 - type: int - min_upper: - description: - - Minimum number of uppercase alphabets in the string. - - If set, overrides I(upper=false). - default: 0 - type: int - min_lower: - description: - - Minimum number of lowercase alphabets in the string. - - If set, overrides I(lower=false). - default: 0 - type: int - min_special: - description: - - Minimum number of special character in the string. - default: 0 - type: int - override_special: - description: - - Overide a list of special characters to use in the string. - - If set I(min_special) should be set to a non-default value. - type: str - override_all: - description: - - Override all values of I(numbers), I(upper), I(lower), and I(special) with - the given list of characters. - type: str - base64: - description: - - Returns base64 encoded string. - type: bool - default: false + - Possibly include uppercase letters in the string. + - To ensure atleast one uppercase letter, set O(min_upper) to V(1). + default: true + type: bool + lower: + description: + - Possibly include lowercase letters in the string. + - To ensure atleast one lowercase letter, set O(min_lower) to V(1). + default: true + type: bool + numbers: + description: + - Possibly include numbers in the string. + - To ensure atleast one numeric character, set O(min_numeric) to V(1). + default: true + type: bool + special: + description: + - Possibly include special characters in the string. + - Special characters are taken from Python standard library C(string). See L(the documentation of + string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation) + for which characters are used. + - The choice of special characters can be changed to setting O(override_special). + - To ensure atleast one special character, set O(min_special) to V(1). + default: true + type: bool + min_numeric: + description: + - Minimum number of numeric characters in the string. + - If set, overrides O(numbers=false). + default: 0 + type: int + min_upper: + description: + - Minimum number of uppercase alphabets in the string. + - If set, overrides O(upper=false). + default: 0 + type: int + min_lower: + description: + - Minimum number of lowercase alphabets in the string. + - If set, overrides O(lower=false). + default: 0 + type: int + min_special: + description: + - Minimum number of special character in the string. + default: 0 + type: int + override_special: + description: + - Override a list of special characters to use in the string. + - If set O(min_special) should be set to a non-default value. + type: str + override_all: + description: + - Override all values of O(numbers), O(upper), O(lower), and O(special) with the given list of characters. + type: str + ignore_similar_chars: + description: + - Ignore similar characters, such as V(l) and V(1), or V(O) and V(0). + - These characters can be configured in O(similar_chars). + default: false + type: bool + version_added: 7.5.0 + similar_chars: + description: + - Override a list of characters not to be use in the string. + default: "il1LoO0" + type: str + version_added: 7.5.0 + base64: + description: + - Returns base64 encoded string. + type: bool + default: false + seed: + description: + - Seed for random string generator. + - B(Note) that this drastically reduces the security of this plugin. First, when O(seed) is provided, a non-cryptographic random number generator is used. + Second, if the seed does not contain enough entropy, the generated string is weak. + B(Do not use the generated string as a password or a secure token when using this option!) + type: str + version_added: 11.3.0 """ EXAMPLES = r""" - name: Generate random string ansible.builtin.debug: var: lookup('community.general.random_string') - # Example result: ['DeadBeeF'] + # Example result: 'DeadBeeF' + +- name: Generate random string with seed + ansible.builtin.debug: + var: lookup('community.general.random_string', seed=12345) + # Example result: '6[~(2q5O' + # NOTE: Do **not** use this string as a password or a secure token, + # unless you know exactly what you are doing! + # Specifying seed uses a non-secure random number generator. - name: Generate random string with length 12 ansible.builtin.debug: var: lookup('community.general.random_string', length=12) - # Example result: ['Uan0hUiX5kVG'] + # Example result: 'Uan0hUiX5kVG' - name: Generate base64 encoded random string ansible.builtin.debug: var: lookup('community.general.random_string', base64=True) - # Example result: ['NHZ6eWN5Qk0='] + # Example result: 'NHZ6eWN5Qk0=' -- name: Generate a random string with 1 lower, 1 upper, 1 number and 1 special char (atleast) +- name: Generate a random string with 1 lower, 1 upper, 1 number and 1 special char (at least) ansible.builtin.debug: var: lookup('community.general.random_string', min_lower=1, min_upper=1, min_special=1, min_numeric=1) - # Example result: ['&Qw2|E[-'] + # Example result: '&Qw2|E[-' - name: Generate a random string with all lower case characters - debug: + ansible.builtin.debug: var: query('community.general.random_string', upper=false, numbers=false, special=false) # Example result: ['exolxzyz'] - name: Generate random hexadecimal string - debug: + ansible.builtin.debug: var: query('community.general.random_string', upper=false, lower=false, override_special=hex_chars, numbers=false) vars: hex_chars: '0123456789ABCDEF' # Example result: ['D2A40737'] - name: Generate random hexadecimal string with override_all - debug: + ansible.builtin.debug: var: query('community.general.random_string', override_all=hex_chars) vars: hex_chars: '0123456789ABCDEF' @@ -126,14 +159,15 @@ EXAMPLES = r""" """ RETURN = r""" - _raw: - description: A one-element list containing a random string - type: list - elements: str +_raw: + description: A one-element list containing a random string. + type: list + elements: str """ import base64 import random +import secrets import string from ansible.errors import AnsibleLookupError @@ -163,16 +197,30 @@ class LookupModule(LookupBase): lower_chars = string.ascii_lowercase upper_chars = string.ascii_uppercase special_chars = string.punctuation - random_generator = random.SystemRandom() self.set_options(var_options=variables, direct=kwargs) length = self.get_option("length") base64_flag = self.get_option("base64") override_all = self.get_option("override_all") + ignore_similar_chars = self.get_option("ignore_similar_chars") + similar_chars = self.get_option("similar_chars") + seed = self.get_option("seed") + + if seed is None: + random_generator = secrets.SystemRandom() + else: + random_generator = random.Random(seed) + values = "" available_chars_set = "" + if ignore_similar_chars: + number_chars = "".join([sc for sc in number_chars if sc not in similar_chars]) + lower_chars = "".join([sc for sc in lower_chars if sc not in similar_chars]) + upper_chars = "".join([sc for sc in upper_chars if sc not in similar_chars]) + special_chars = "".join([sc for sc in special_chars if sc not in similar_chars]) + if override_all: # Override all the values available_chars_set = override_all @@ -209,10 +257,11 @@ class LookupModule(LookupBase): remaining_pass_len = length - len(values) values += self.get_random(random_generator, available_chars_set, remaining_pass_len) - # Get pseudo randomization shuffled_values = list(values) - # Randomize the order - random.shuffle(shuffled_values) + if seed is None: + # Get pseudo randomization + # Randomize the order + random.shuffle(shuffled_values) if base64_flag: return [self.b64encode("".join(shuffled_values))] diff --git a/plugins/lookup/random_words.py b/plugins/lookup/random_words.py index a2381aa38f..dd06e701f8 100644 --- a/plugins/lookup/random_words.py +++ b/plugins/lookup/random_words.py @@ -1,51 +1,50 @@ -# -*- coding: utf-8 -*- -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later """The community.general.random_words Ansible lookup plugin.""" -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" - name: random_words - author: - - Thomas Sjögren (@konstruktoid) - short_description: Return a number of random words - version_added: "4.0.0" - requirements: - - xkcdpass U(https://github.com/redacted/XKCD-password-generator) +name: random_words +author: + - Thomas Sjögren (@konstruktoid) +short_description: Return a number of random words +version_added: "4.0.0" +requirements: + - xkcdpass U(https://github.com/redacted/XKCD-password-generator) +description: + - Returns a number of random words. The output can for example be used for passwords. + - See U(https://xkcd.com/936/) for background. +options: + numwords: description: - - Returns a number of random words. The output can for example be used for - passwords. - - See U(https://xkcd.com/936/) for background. - options: - numwords: - description: - - The number of words. - default: 6 - type: int - min_length: - description: - - Minimum length of words to make password. - default: 5 - type: int - max_length: - description: - - Maximum length of words to make password. - default: 9 - type: int - delimiter: - description: - - The delimiter character between words. - default: " " - type: str - case: - description: - - The method for setting the case of each word in the passphrase. - choices: ["alternating", "upper", "lower", "random", "capitalize"] - default: "lower" - type: str + - The number of words. + default: 6 + type: int + min_length: + description: + - Minimum length of words to make password. + default: 5 + type: int + max_length: + description: + - Maximum length of words to make password. + default: 9 + type: int + delimiter: + description: + - The delimiter character between words. + default: " " + type: str + case: + description: + - The method for setting the case of each word in the passphrase. + choices: ["alternating", "upper", "lower", "random", "capitalize"] + default: "lower" + type: str """ EXAMPLES = r""" @@ -72,10 +71,10 @@ EXAMPLES = r""" """ RETURN = r""" - _raw: - description: A single-element list containing random words. - type: list - elements: str +_raw: + description: A single-element list containing random words. + type: list + elements: str """ from ansible.errors import AnsibleLookupError diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py index 8de7e04cce..0073796a22 100644 --- a/plugins/lookup/redis.py +++ b/plugins/lookup/redis.py @@ -1,51 +1,53 @@ -# -*- coding: utf-8 -*- -# (c) 2012, Jan-Piet Mens -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2012, Jan-Piet Mens +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: redis - author: - - Jan-Piet Mens (@jpmens) - - Ansible Core Team - short_description: fetch data from Redis - description: - - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it - requirements: - - redis (python library https://github.com/andymccurdy/redis-py/) - options: - _terms: - description: list of keys to query - host: - description: location of Redis host - default: '127.0.0.1' - env: - - name: ANSIBLE_REDIS_HOST - ini: - - section: lookup_redis - key: host - port: - description: port on which Redis is listening on - default: 6379 - type: int - env: - - name: ANSIBLE_REDIS_PORT - ini: - - section: lookup_redis - key: port - socket: - description: path to socket on which to query Redis, this option overrides host and port options when set. - type: path - env: - - name: ANSIBLE_REDIS_SOCKET - ini: - - section: lookup_redis - key: socket -''' +DOCUMENTATION = r""" +name: redis +author: + - Jan-Piet Mens (@jpmens) + - Ansible Core Team +short_description: Fetch data from Redis +description: + - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it. +requirements: + - redis (python library https://github.com/andymccurdy/redis-py/) +options: + _terms: + description: List of keys to query. + type: list + elements: string + host: + description: Location of Redis host. + type: string + default: '127.0.0.1' + env: + - name: ANSIBLE_REDIS_HOST + ini: + - section: lookup_redis + key: host + port: + description: Port on which Redis is listening on. + default: 6379 + type: int + env: + - name: ANSIBLE_REDIS_PORT + ini: + - section: lookup_redis + key: port + socket: + description: Path to socket on which to query Redis, this option overrides host and port options when set. + type: path + env: + - name: ANSIBLE_REDIS_SOCKET + ini: + - section: lookup_redis + key: socket +""" -EXAMPLES = """ +EXAMPLES = r""" - name: query redis for somekey (default or configured settings used) ansible.builtin.debug: msg: "{{ lookup('community.general.redis', 'somekey') }}" @@ -62,18 +64,15 @@ EXAMPLES = """ - name: use list directly with a socket ansible.builtin.debug: msg: "{{ lookup('community.general.redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}" - """ -RETURN = """ +RETURN = r""" _raw: - description: value(s) stored in Redis + description: Value(s) stored in Redis. type: list elements: str """ -import os - HAVE_REDIS = False try: import redis @@ -114,5 +113,5 @@ class LookupModule(LookupBase): ret.append(to_text(res)) except Exception as e: # connection failed or key not found - raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e)) + raise AnsibleError(f'Encountered exception while fetching {term}: {e}') return ret diff --git a/plugins/lookup/revbitspss.py b/plugins/lookup/revbitspss.py index b5be15f7a6..86e3fbe38c 100644 --- a/plugins/lookup/revbitspss.py +++ b/plugins/lookup/revbitspss.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, RevBits -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function +# Copyright (c) 2021, RevBits +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" name: revbitspss @@ -12,60 +10,60 @@ author: RevBits (@RevBits) short_description: Get secrets from RevBits PAM server version_added: 4.1.0 description: - - Uses the revbits_ansible Python SDK to get Secrets from RevBits PAM - Server using API key authentication with the REST API. + - Uses the revbits_ansible Python SDK to get Secrets from RevBits PAM Server using API key authentication with the REST + API. requirements: - - revbits_ansible - U(https://pypi.org/project/revbits_ansible/) + - revbits_ansible - U(https://pypi.org/project/revbits_ansible/) options: - _terms: - description: - - This will be an array of keys for secrets which you want to fetch from RevBits PAM. - required: true - type: list - elements: string - base_url: - description: - - This will be the base URL of the server, for example C(https://server-url-here). - required: true - type: string - api_key: - description: - - This will be the API key for authentication. You can get it from the RevBits PAM secret manager module. - required: true - type: string + _terms: + description: + - This is an array of keys for secrets which you want to fetch from RevBits PAM. + required: true + type: list + elements: string + base_url: + description: + - This is the base URL of the server, for example V(https://server-url-here). + required: true + type: string + api_key: + description: + - This is the API key for authentication. You can get it from the RevBits PAM secret manager module. + required: true + type: string """ RETURN = r""" _list: - description: - - The JSON responses which you can access with defined keys. - - If you are fetching secrets named as UUID, PASSWORD it will gives you the dict of all secrets. - type: list - elements: dict + description: + - The JSON responses which you can access with defined keys. + - If you are fetching secrets named as UUID, PASSWORD it returns the dict of all secrets. + type: list + elements: dict """ EXAMPLES = r""" +--- - hosts: localhost vars: - secret: >- - {{ - lookup( - 'community.general.revbitspss', - 'UUIDPAM', 'DB_PASS', - base_url='https://server-url-here', - api_key='API_KEY_GOES_HERE' - ) - }} + secret: >- + {{ + lookup( + 'community.general.revbitspss', + 'UUIDPAM', 'DB_PASS', + base_url='https://server-url-here', + api_key='API_KEY_GOES_HERE' + ) + }} tasks: - - ansible.builtin.debug: - msg: > - UUIDPAM is {{ (secret['UUIDPAM']) }} and DB_PASS is {{ (secret['DB_PASS']) }} + - ansible.builtin.debug: + msg: >- + UUIDPAM is {{ (secret['UUIDPAM']) }} and DB_PASS is {{ (secret['DB_PASS']) }} """ from ansible.plugins.lookup import LookupBase from ansible.utils.display import Display from ansible.errors import AnsibleError -from ansible.module_utils.six import raise_from try: from pam.revbits_ansible.server import SecretServer @@ -86,10 +84,7 @@ class LookupModule(LookupBase): def run(self, terms, variables, **kwargs): if ANOTHER_LIBRARY_IMPORT_ERROR: - raise_from( - AnsibleError('revbits_ansible must be installed to use this plugin'), - ANOTHER_LIBRARY_IMPORT_ERROR - ) + raise AnsibleError('revbits_ansible must be installed to use this plugin') from ANOTHER_LIBRARY_IMPORT_ERROR self.set_options(var_options=variables, direct=kwargs) secret_server = LookupModule.Client( { @@ -100,8 +95,8 @@ class LookupModule(LookupBase): result = [] for term in terms: try: - display.vvv(u"Secret Server lookup of Secret with ID %s" % term) + display.vvv(f"Secret Server lookup of Secret with ID {term}") result.append({term: secret_server.get_pam_secret(term)}) except Exception as error: - raise AnsibleError("Secret Server lookup failure: %s" % error.message) + raise AnsibleError(f"Secret Server lookup failure: {error.message}") return result diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py index 56cfdf1143..54d96e91d2 100644 --- a/plugins/lookup/shelvefile.py +++ b/plugins/lookup/shelvefile.py @@ -1,35 +1,40 @@ -# -*- coding: utf-8 -*- -# (c) 2015, Alejandro Guirao -# (c) 2012-17 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2015, Alejandro Guirao +# Copyright (c) 2012-17 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' - name: shelvefile - author: Alejandro Guirao (!UNKNOWN) - short_description: read keys from Python shelve file - description: - - Read keys from Python shelve file. - options: - _terms: - description: sets of key value pairs of parameters - key: - description: key to query - required: True - file: - description: path to shelve file - required: True -''' - -EXAMPLES = """ -- name: retrieve a string value corresponding to a key inside a Python shelve file - ansible.builtin.debug: msg="{{ lookup('community.general.shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }} +DOCUMENTATION = r""" +name: shelvefile +author: Alejandro Guirao (!UNKNOWN) +short_description: Read keys from Python shelve file +description: + - Read keys from Python shelve file. +options: + _terms: + description: Sets of key value pairs of parameters. + type: list + elements: str + key: + description: Key to query. + type: str + required: true + file: + description: Path to shelve file. + type: path + required: true """ -RETURN = """ +EXAMPLES = r""" +--- +- name: Retrieve a string value corresponding to a key inside a Python shelve file + ansible.builtin.debug: + msg: "{{ lookup('community.general.shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }}" +""" + +RETURN = r""" _list: - description: value(s) of key(s) in shelve file(s) + description: Value(s) of key(s) in shelve file(s). type: list elements: str """ @@ -52,7 +57,6 @@ class LookupModule(LookupBase): return res def run(self, terms, variables=None, **kwargs): - if not isinstance(terms, list): terms = [terms] @@ -66,7 +70,7 @@ class LookupModule(LookupBase): for param in params: name, value = param.split('=') if name not in paramvals: - raise AnsibleAssertionError('%s not in paramvals' % name) + raise AnsibleAssertionError(f'{name} not in paramvals') paramvals[name] = value except (ValueError, AssertionError) as e: @@ -81,11 +85,11 @@ class LookupModule(LookupBase): if shelvefile: res = self.read_shelve(shelvefile, key) if res is None: - raise AnsibleError("Key %s not found in shelve file %s" % (key, shelvefile)) + raise AnsibleError(f"Key {key} not found in shelve file {shelvefile}") # Convert the value read to string ret.append(to_text(res)) break else: - raise AnsibleError("Could not locate shelve file in lookup: %s" % paramvals['file']) + raise AnsibleError(f"Could not locate shelve file in lookup: {paramvals['file']}") return ret diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index 880e6e3833..e612446374 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -1,9 +1,8 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Adam Migus -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function +# Copyright (c) 2020, Adam Migus +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" name: tss @@ -11,185 +10,283 @@ author: Adam Migus (@amigus) short_description: Get secrets from Thycotic Secret Server version_added: 1.0.0 description: - - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret - Server using token authentication with I(username) and I(password) on - the REST API at I(base_url). - - When using self-signed certificates the environment variable - C(REQUESTS_CA_BUNDLE) can be set to a file containing the trusted certificates - (in C(.pem) format). - - For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt'). + - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret Server using token authentication with O(username) + and O(password) on the REST API at O(base_url). + - When using self-signed certificates the environment variable E(REQUESTS_CA_BUNDLE) can be set to a file containing the + trusted certificates (in C(.pem) format). + - For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt'). requirements: - - python-tss-sdk - https://pypi.org/project/python-tss-sdk/ + - python-tss-sdk - https://pypi.org/project/python-tss-sdk/ options: - _terms: - description: The integer ID of the secret. - required: true - type: int - base_url: - description: The base URL of the server, e.g. C(https://localhost/SecretServer). - env: - - name: TSS_BASE_URL - ini: - - section: tss_lookup - key: base_url - required: true - username: - description: The username with which to request the OAuth2 Access Grant. - env: - - name: TSS_USERNAME - ini: - - section: tss_lookup - key: username - password: - description: - - The password associated with the supplied username. - - Required when I(token) is not provided. - env: - - name: TSS_PASSWORD - ini: - - section: tss_lookup - key: password - domain: - default: "" - description: - - The domain with which to request the OAuth2 Access Grant. - - Optional when I(token) is not provided. - - Requires C(python-tss-sdk) version 1.0.0 or greater. - env: - - name: TSS_DOMAIN - ini: - - section: tss_lookup - key: domain - required: false - version_added: 3.6.0 - token: - description: - - Existing token for Thycotic authorizer. - - If provided, I(username) and I(password) are not needed. - - Requires C(python-tss-sdk) version 1.0.0 or greater. - env: - - name: TSS_TOKEN - ini: - - section: tss_lookup - key: token - version_added: 3.7.0 - api_path_uri: - default: /api/v1 - description: The path to append to the base URL to form a valid REST - API request. - env: - - name: TSS_API_PATH_URI - required: false - token_path_uri: - default: /oauth2/token - description: The path to append to the base URL to form a valid OAuth2 - Access Grant request. - env: - - name: TSS_TOKEN_PATH_URI - required: false + _terms: + description: The integer ID of the secret. + required: true + type: list + elements: int + secret_path: + description: Indicate a full path of secret including folder and secret name when the secret ID is set to 0. + required: false + type: str + version_added: 7.2.0 + fetch_secret_ids_from_folder: + description: + - Boolean flag which indicates whether secret IDs are in a folder is fetched by folder ID or not. + - V(true) then the terms are considered as a folder IDs. Otherwise (default), they are considered as secret IDs. + required: false + type: bool + version_added: 7.1.0 + fetch_attachments: + description: + - Boolean flag which indicates whether attached files are downloaded or not. + - The download only happens if O(file_download_path) has been provided. + required: false + type: bool + version_added: 7.0.0 + file_download_path: + description: Indicate the file attachment download location. + required: false + type: path + version_added: 7.0.0 + base_url: + description: The base URL of the server, for example V(https://localhost/SecretServer). + type: string + env: + - name: TSS_BASE_URL + ini: + - section: tss_lookup + key: base_url + required: true + username: + description: The username with which to request the OAuth2 Access Grant. + type: string + env: + - name: TSS_USERNAME + ini: + - section: tss_lookup + key: username + password: + description: + - The password associated with the supplied username. + - Required when O(token) is not provided. + type: string + env: + - name: TSS_PASSWORD + ini: + - section: tss_lookup + key: password + domain: + default: "" + description: + - The domain with which to request the OAuth2 Access Grant. + - Optional when O(token) is not provided. + - Requires C(python-tss-sdk) version 1.0.0 or greater. + type: string + env: + - name: TSS_DOMAIN + ini: + - section: tss_lookup + key: domain + required: false + version_added: 3.6.0 + token: + description: + - Existing token for Thycotic authorizer. + - If provided, O(username) and O(password) are not needed. + - Requires C(python-tss-sdk) version 1.0.0 or greater. + type: string + env: + - name: TSS_TOKEN + ini: + - section: tss_lookup + key: token + version_added: 3.7.0 + api_path_uri: + default: /api/v1 + description: The path to append to the base URL to form a valid REST API request. + type: string + env: + - name: TSS_API_PATH_URI + required: false + token_path_uri: + default: /oauth2/token + description: The path to append to the base URL to form a valid OAuth2 Access Grant request. + type: string + env: + - name: TSS_TOKEN_PATH_URI + required: false """ RETURN = r""" _list: - description: - - The JSON responses to C(GET /secrets/{id}). - - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get). - type: list - elements: dict + description: + - The JSON responses to C(GET /secrets/{id}). + - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get). + type: list + elements: dict """ EXAMPLES = r""" - hosts: localhost vars: - secret: >- - {{ - lookup( - 'community.general.tss', - 102, - base_url='https://secretserver.domain.com/SecretServer/', - username='user.name', - password='password' - ) - }} + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + username='user.name', + password='password' + ) + }} tasks: - - ansible.builtin.debug: - msg: > - the password is {{ - (secret['items'] - | items2dict(key_name='slug', - value_name='itemValue'))['password'] - }} + - ansible.builtin.debug: + msg: > + the password is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['password'] + }} - hosts: localhost vars: - secret: >- - {{ - lookup( - 'community.general.tss', - 102, - base_url='https://secretserver.domain.com/SecretServer/', - username='user.name', - password='password', - domain='domain' - ) - }} + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + username='user.name', + password='password', + domain='domain' + ) + }} tasks: - - ansible.builtin.debug: - msg: > - the password is {{ - (secret['items'] - | items2dict(key_name='slug', - value_name='itemValue'))['password'] - }} + - ansible.builtin.debug: + msg: > + the password is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['password'] + }} - hosts: localhost vars: - secret_password: >- - {{ - ((lookup( - 'community.general.tss', - 102, - base_url='https://secretserver.domain.com/SecretServer/', - token='thycotic_access_token', - ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] - }} + secret_password: >- + {{ + ((lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + token='thycotic_access_token', + ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] + }} tasks: - - ansible.builtin.debug: - msg: the password is {{ secret_password }} + - ansible.builtin.debug: + msg: the password is {{ secret_password }} + +# Private key stores into certificate file which is attached with secret. +# If fetch_attachments=True then private key file will be download on specified path +# and file content will display in debug message. +- hosts: localhost + vars: + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + fetch_attachments=True, + file_download_path='/home/certs', + base_url='https://secretserver.domain.com/SecretServer/', + token='thycotic_access_token' + ) + }} + tasks: + - ansible.builtin.debug: + msg: > + the private key is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['private-key'] + }} + +# If fetch_secret_ids_from_folder=true then secret IDs are in a folder is fetched based on folder ID +- hosts: localhost + vars: + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + fetch_secret_ids_from_folder=true, + base_url='https://secretserver.domain.com/SecretServer/', + token='thycotic_access_token' + ) + }} + tasks: + - ansible.builtin.debug: + msg: > + the secret id's are {{ + secret + }} + +# If secret ID is 0 and secret_path has value then secret is fetched by secret path +- hosts: localhost + vars: + secret: >- + {{ + lookup( + 'community.general.tss', + 0, + secret_path='\folderName\secretName' + base_url='https://secretserver.domain.com/SecretServer/', + username='user.name', + password='password' + ) + }} + tasks: + - ansible.builtin.debug: + msg: >- + the password is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['password'] + }} """ import abc - +import os from ansible.errors import AnsibleError, AnsibleOptionsError -from ansible.module_utils import six from ansible.plugins.lookup import LookupBase from ansible.utils.display import Display try: - from thycotic.secrets.server import SecretServer, SecretServerError + from delinea.secrets.server import SecretServer, SecretServerError, PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer HAS_TSS_SDK = True -except ImportError: - SecretServer = None - SecretServerError = None - HAS_TSS_SDK = False - -try: - from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer - + HAS_DELINEA_SS_SDK = True HAS_TSS_AUTHORIZER = True except ImportError: - PasswordGrantAuthorizer = None - DomainPasswordGrantAuthorizer = None - AccessTokenAuthorizer = None - HAS_TSS_AUTHORIZER = False + try: + from thycotic.secrets.server import SecretServer, SecretServerError, PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer + + HAS_TSS_SDK = True + HAS_DELINEA_SS_SDK = False + HAS_TSS_AUTHORIZER = True + except ImportError: + SecretServer = None + SecretServerError = None + HAS_TSS_SDK = False + HAS_DELINEA_SS_SDK = False + PasswordGrantAuthorizer = None + DomainPasswordGrantAuthorizer = None + AccessTokenAuthorizer = None + HAS_TSS_AUTHORIZER = False display = Display() -@six.add_metaclass(abc.ABCMeta) -class TSSClient(object): +class TSSClient(object, metaclass=abc.ABCMeta): def __init__(self): self._client = None @@ -200,13 +297,49 @@ class TSSClient(object): else: return TSSClientV0(**server_parameters) - def get_secret(self, term): - display.debug("tss_lookup term: %s" % term) - + def get_secret(self, term, secret_path, fetch_file_attachments, file_download_path): + display.debug(f"tss_lookup term: {term}") secret_id = self._term_to_secret_id(term) - display.vvv(u"Secret Server lookup of Secret with ID %d" % secret_id) + if secret_id == 0 and secret_path: + fetch_secret_by_path = True + display.vvv(f"Secret Server lookup of Secret with path {secret_path}") + else: + fetch_secret_by_path = False + display.vvv(f"Secret Server lookup of Secret with ID {secret_id}") - return self._client.get_secret_json(secret_id) + if fetch_file_attachments: + if fetch_secret_by_path: + obj = self._client.get_secret_by_path(secret_path, fetch_file_attachments) + else: + obj = self._client.get_secret(secret_id, fetch_file_attachments) + for i in obj['items']: + if file_download_path and os.path.isdir(file_download_path): + if i['isFile']: + try: + file_content = i['itemValue'].content + with open(os.path.join(file_download_path, f"{obj['id']}_{i['slug']}"), "wb") as f: + f.write(file_content) + except ValueError: + raise AnsibleOptionsError(f"Failed to download {i['slug']}") + except AttributeError: + display.warning(f"Could not read file content for {i['slug']}") + finally: + i['itemValue'] = "*** Not Valid For Display ***" + else: + raise AnsibleOptionsError("File download path does not exist") + return obj + else: + if fetch_secret_by_path: + return self._client.get_secret_by_path(secret_path, False) + else: + return self._client.get_secret_json(secret_id) + + def get_secret_ids_by_folderid(self, term): + display.debug(f"tss_lookup term: {term}") + folder_id = self._term_to_folder_id(term) + display.vvv(f"Secret Server lookup of Secret id's with Folder ID {folder_id}") + + return self._client.get_secret_ids_by_folderid(folder_id) @staticmethod def _term_to_secret_id(term): @@ -215,6 +348,13 @@ class TSSClient(object): except ValueError: raise AnsibleOptionsError("Secret ID must be an integer") + @staticmethod + def _term_to_folder_id(term): + try: + return int(term) + except ValueError: + raise AnsibleOptionsError("Folder ID must be an integer") + class TSSClientV0(TSSClient): def __init__(self, **server_parameters): @@ -283,6 +423,20 @@ class LookupModule(LookupBase): ) try: - return [tss.get_secret(term) for term in terms] + if self.get_option("fetch_secret_ids_from_folder"): + if HAS_DELINEA_SS_SDK: + return [tss.get_secret_ids_by_folderid(term) for term in terms] + else: + raise AnsibleError("latest python-tss-sdk must be installed to use this plugin") + else: + return [ + tss.get_secret( + term, + self.get_option("secret_path"), + self.get_option("fetch_attachments"), + self.get_option("file_download_path"), + ) + for term in terms + ] except SecretServerError as error: - raise AnsibleError("Secret Server lookup failure: %s" % error.message) + raise AnsibleError(f"Secret Server lookup failure: {error.message}") diff --git a/tests/integration/targets/__init__.py b/plugins/module_utils/__init__.py similarity index 100% rename from tests/integration/targets/__init__.py rename to plugins/module_utils/__init__.py diff --git a/plugins/module_utils/_filelock.py b/plugins/module_utils/_filelock.py new file mode 100644 index 0000000000..f5d0e27608 --- /dev/null +++ b/plugins/module_utils/_filelock.py @@ -0,0 +1,108 @@ +# Copyright (c) 2018, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +# NOTE: +# This has been vendored from ansible.module_utils.common.file. This code has been removed from there for ansible-core 2.16. + +from __future__ import annotations + +import os +import stat +import time +import fcntl +import sys + +from contextlib import contextmanager + + +class LockTimeout(Exception): + pass + + +class FileLock: + ''' + Currently FileLock is implemented via fcntl.flock on a lock file, however this + behaviour may change in the future. Avoid mixing lock types fcntl.flock, + fcntl.lockf and module_utils.common.file.FileLock as it will certainly cause + unwanted and/or unexpected behaviour + ''' + def __init__(self): + self.lockfd = None + + @contextmanager + def lock_file(self, path, tmpdir, lock_timeout=None): + ''' + Context for lock acquisition + ''' + try: + self.set_lock(path, tmpdir, lock_timeout) + yield + finally: + self.unlock() + + def set_lock(self, path, tmpdir, lock_timeout=None): + ''' + Create a lock file based on path with flock to prevent other processes + using given path. + Please note that currently file locking only works when it is executed by + the same user, for example single user scenarios + + :kw path: Path (file) to lock + :kw tmpdir: Path where to place the temporary .lock file + :kw lock_timeout: + Wait n seconds for lock acquisition, fail if timeout is reached. + 0 = Do not wait, fail if lock cannot be acquired immediately, + Default is None, wait indefinitely until lock is released. + :returns: True + ''' + lock_path = os.path.join(tmpdir, f'ansible-{os.path.basename(path)}.lock') + l_wait = 0.1 + r_exception = IOError + if sys.version_info[0] == 3: + r_exception = BlockingIOError + + self.lockfd = open(lock_path, 'w') + + if lock_timeout <= 0: + fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) + os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD) + return True + + if lock_timeout: + e_secs = 0 + while e_secs < lock_timeout: + try: + fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) + os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD) + return True + except r_exception: + time.sleep(l_wait) + e_secs += l_wait + continue + + self.lockfd.close() + raise LockTimeout(f'{lock_timeout} sec') + + fcntl.flock(self.lockfd, fcntl.LOCK_EX) + os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD) + + return True + + def unlock(self): + ''' + Make sure lock file is available for everyone and Unlock the file descriptor + locked by set_lock + + :returns: True + ''' + if not self.lockfd: + return True + + try: + fcntl.flock(self.lockfd, fcntl.LOCK_UN) + self.lockfd.close() + except ValueError: # file wasn't opened, let context manager fail gracefully + pass + + return True diff --git a/plugins/module_utils/_mount.py b/plugins/module_utils/_mount.py index 391d468178..33d191c845 100644 --- a/plugins/module_utils/_mount.py +++ b/plugins/module_utils/_mount.py @@ -1,57 +1,14 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is based on # Lib/posixpath.py of cpython +# +# Copyright (c) 2001-2022 Python Software Foundation. All rights reserved. # It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 -# -# 1. This LICENSE AGREEMENT is between the Python Software Foundation -# ("PSF"), and the Individual or Organization ("Licensee") accessing and -# otherwise using this software ("Python") in source or binary form and -# its associated documentation. -# -# 2. Subject to the terms and conditions of this License Agreement, PSF hereby -# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, -# analyze, test, perform and/or display publicly, prepare derivative works, -# distribute, and otherwise use Python alone or in any derivative version, -# provided, however, that PSF's License Agreement and PSF's notice of copyright, -# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" -# are retained in Python alone or in any derivative version prepared by Licensee. -# -# 3. In the event Licensee prepares a derivative work that is based on -# or incorporates Python or any part thereof, and wants to make -# the derivative work available to others as provided herein, then -# Licensee hereby agrees to include in any such work a brief summary of -# the changes made to Python. -# -# 4. PSF is making Python available to Licensee on an "AS IS" -# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND -# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT -# INFRINGE ANY THIRD PARTY RIGHTS. -# -# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, -# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. -# -# 6. This License Agreement will automatically terminate upon a material -# breach of its terms and conditions. -# -# 7. Nothing in this License Agreement shall be deemed to create any -# relationship of agency, partnership, or joint venture between PSF and -# Licensee. This License Agreement does not grant permission to use PSF -# trademarks or trade name in a trademark sense to endorse or promote -# products or services of Licensee, or any third party. -# -# 8. By copying, installing or otherwise using Python, Licensee -# agrees to be bound by the terms and conditions of this License -# Agreement. +# (See LICENSES/PSF-2.0.txt in this collection) +# SPDX-License-Identifier: PSF-2.0 -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type import os diff --git a/plugins/module_utils/_stormssh.py b/plugins/module_utils/_stormssh.py new file mode 100644 index 0000000000..42a72eb674 --- /dev/null +++ b/plugins/module_utils/_stormssh.py @@ -0,0 +1,252 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is based on +# the config parser from here: https://github.com/emre/storm/blob/master/storm/parsers/ssh_config_parser.py +# Copyright (C) <2013> +# SPDX-License-Identifier: MIT + +from __future__ import annotations +import os +import re +import traceback +from operator import itemgetter + + +try: + from paramiko.config import SSHConfig +except ImportError: + SSHConfig = object + HAS_PARAMIKO = False + PARAMIKO_IMPORT_ERROR = traceback.format_exc() +else: + HAS_PARAMIKO = True + PARAMIKO_IMPORT_ERROR = None + + +class StormConfig(SSHConfig): + def parse(self, file_obj): + """ + Read an OpenSSH config from the given file object. + @param file_obj: a file-like object to read the config file from + @type file_obj: file + """ + order = 1 + host = {"host": ['*'], "config": {}, } + for line in file_obj: + line = line.rstrip('\n').lstrip() + if line == '': + self._config.append({ + 'type': 'empty_line', + 'value': line, + 'host': '', + 'order': order, + }) + order += 1 + continue + + if line.startswith('#'): + self._config.append({ + 'type': 'comment', + 'value': line, + 'host': '', + 'order': order, + }) + order += 1 + continue + + if '=' in line: + # Ensure ProxyCommand gets properly split + if line.lower().strip().startswith('proxycommand'): + proxy_re = re.compile(r"^(proxycommand)\s*=*\s*(.*)", re.I) + match = proxy_re.match(line) + key, value = match.group(1).lower(), match.group(2) + else: + key, value = line.split('=', 1) + key = key.strip().lower() + else: + # find first whitespace, and split there + i = 0 + while (i < len(line)) and not line[i].isspace(): + i += 1 + if i == len(line): + raise Exception(f'Unparsable line: {line!r}') + key = line[:i].lower() + value = line[i:].lstrip() + if key == 'host': + self._config.append(host) + value = value.split() + host = { + key: value, + 'config': {}, + 'type': 'entry', + 'order': order + } + order += 1 + elif key in ['identityfile', 'localforward', 'remoteforward']: + if key in host['config']: + host['config'][key].append(value) + else: + host['config'][key] = [value] + elif key not in host['config']: + host['config'].update({key: value}) + self._config.append(host) + + +class ConfigParser(object): + """ + Config parser for ~/.ssh/config files. + """ + + def __init__(self, ssh_config_file=None): + if not ssh_config_file: + ssh_config_file = self.get_default_ssh_config_file() + + self.defaults = {} + + self.ssh_config_file = ssh_config_file + + if not os.path.exists(self.ssh_config_file): + if not os.path.exists(os.path.dirname(self.ssh_config_file)): + os.makedirs(os.path.dirname(self.ssh_config_file)) + open(self.ssh_config_file, 'w+').close() + os.chmod(self.ssh_config_file, 0o600) + + self.config_data = [] + + def get_default_ssh_config_file(self): + return os.path.expanduser("~/.ssh/config") + + def load(self): + config = StormConfig() + + with open(self.ssh_config_file) as fd: + config.parse(fd) + + for entry in config.__dict__.get("_config"): + if entry.get("host") == ["*"]: + self.defaults.update(entry.get("config")) + + if entry.get("type") in ["comment", "empty_line"]: + self.config_data.append(entry) + continue + + host_item = { + 'host': entry["host"][0], + 'options': entry.get("config"), + 'type': 'entry', + 'order': entry.get("order", 0), + } + + if len(entry["host"]) > 1: + host_item.update({ + 'host': " ".join(entry["host"]), + }) + # minor bug in paramiko.SSHConfig that duplicates + # "Host *" entries. + if entry.get("config") and len(entry.get("config")) > 0: + self.config_data.append(host_item) + + return self.config_data + + def add_host(self, host, options): + self.config_data.append({ + 'host': host, + 'options': options, + 'order': self.get_last_index(), + }) + + return self + + def update_host(self, host, options, use_regex=False): + for index, host_entry in enumerate(self.config_data): + if host_entry.get("host") == host or \ + (use_regex and re.match(host, host_entry.get("host"))): + + if 'deleted_fields' in options: + deleted_fields = options.pop("deleted_fields") + for deleted_field in deleted_fields: + del self.config_data[index]["options"][deleted_field] + + self.config_data[index]["options"].update(options) + + return self + + def search_host(self, search_string): + results = [] + for host_entry in self.config_data: + if host_entry.get("type") != 'entry': + continue + if host_entry.get("host") == "*": + continue + + searchable_information = host_entry.get("host") + for key, value in host_entry.get("options").items(): + if isinstance(value, list): + value = " ".join(value) + if isinstance(value, int): + value = str(value) + + searchable_information += f" {value}" + + if search_string in searchable_information: + results.append(host_entry) + + return results + + def delete_host(self, host): + found = 0 + for index, host_entry in enumerate(self.config_data): + if host_entry.get("host") == host: + del self.config_data[index] + found += 1 + + if found == 0: + raise ValueError('No host found') + return self + + def delete_all_hosts(self): + self.config_data = [] + self.write_to_ssh_config() + + return self + + def dump(self): + if len(self.config_data) < 1: + return + + file_content = "" + self.config_data = sorted(self.config_data, key=itemgetter("order")) + + for host_item in self.config_data: + if host_item.get("type") in ['comment', 'empty_line']: + file_content += f"{host_item.get('value')}\n" + continue + host_item_content = f"Host {host_item.get('host')}\n" + for key, value in host_item.get("options").items(): + if isinstance(value, list): + sub_content = "" + for value_ in value: + sub_content += f" {key} {value_}\n" + host_item_content += sub_content + else: + host_item_content += f" {key} {value}\n" + file_content += host_item_content + + return file_content + + def write_to_ssh_config(self): + with open(self.ssh_config_file, 'w+') as f: + data = self.dump() + if data: + f.write(data) + return self + + def get_last_index(self): + last_index = 0 + indexes = [] + for item in self.config_data: + if item.get("order"): + indexes.append(item.get("order")) + if len(indexes) > 0: + last_index = max(indexes) + + return last_index diff --git a/plugins/module_utils/alicloud_ecs.py b/plugins/module_utils/alicloud_ecs.py index d4d3bf76c9..e752b4aa4a 100644 --- a/plugins/module_utils/alicloud_ecs.py +++ b/plugins/module_utils/alicloud_ecs.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -7,13 +6,14 @@ # # Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import json +import traceback from ansible.module_utils.basic import env_fallback try: @@ -27,8 +27,11 @@ try: import footmark.dns import footmark.ram import footmark.market + + FOOTMARK_IMP_ERR = None HAS_FOOTMARK = True except ImportError: + FOOTMARK_IMP_ERR = traceback.format_exc() HAS_FOOTMARK = False @@ -85,10 +88,10 @@ def connect_to_acs(acs_module, region, **params): if not conn: if region not in [acs_module_region.id for acs_module_region in acs_module.regions()]: raise AnsibleACSError( - "Region %s does not seem to be available for acs module %s." % (region, acs_module.__name__)) + f"Region {region} does not seem to be available for acs module {acs_module.__name__}.") else: raise AnsibleACSError( - "Unknown problem connecting to region %s for acs module %s." % (region, acs_module.__name__)) + f"Unknown problem connecting to region {region} for acs module {acs_module.__name__}.") return conn @@ -122,7 +125,7 @@ def get_assume_role(params): def get_profile(params): if not params['alicloud_access_key'] and not params['ecs_role_name'] and params['profile']: - path = params['shared_credentials_file'] if params['shared_credentials_file'] else os.getenv('HOME') + '/.aliyun/config.json' + path = params['shared_credentials_file'] if params['shared_credentials_file'] else f"{os.getenv('HOME')}/.aliyun/config.json" auth = {} with open(path, 'r') as f: for pro in json.load(f)['profiles']: diff --git a/plugins/module_utils/android_sdkmanager.py b/plugins/module_utils/android_sdkmanager.py new file mode 100644 index 0000000000..b25a1a04fc --- /dev/null +++ b/plugins/module_utils/android_sdkmanager.py @@ -0,0 +1,146 @@ + +# Copyright (c) 2024, Stanislav Shamilov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +import re + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + +__state_map = { + "present": "--install", + "absent": "--uninstall" +} + +# sdkmanager --help 2>&1 | grep -A 2 -- --channel +__channel_map = { + "stable": 0, + "beta": 1, + "dev": 2, + "canary": 3 +} + + +def __map_channel(channel_name): + if channel_name not in __channel_map: + raise ValueError(f"Unknown channel name '{channel_name}'") + return __channel_map[channel_name] + + +def sdkmanager_runner(module, **kwargs): + return CmdRunner( + module, + command='sdkmanager', + arg_formats=dict( + state=cmd_runner_fmt.as_map(__state_map), + name=cmd_runner_fmt.as_list(), + installed=cmd_runner_fmt.as_fixed("--list_installed"), + list=cmd_runner_fmt.as_fixed('--list'), + newer=cmd_runner_fmt.as_fixed("--newer"), + sdk_root=cmd_runner_fmt.as_opt_eq_val("--sdk_root"), + channel=cmd_runner_fmt.as_func(lambda x: [f"--channel={__map_channel(x)}"]) + ), + force_lang="C.UTF-8", # Without this, sdkmanager binary crashes + **kwargs + ) + + +class Package: + def __init__(self, name): + self.name = name + + def __hash__(self): + return hash(self.name) + + def __ne__(self, other): + if not isinstance(other, Package): + return True + return self.name != other.name + + def __eq__(self, other): + if not isinstance(other, Package): + return False + + return self.name == other.name + + +class SdkManagerException(Exception): + pass + + +class AndroidSdkManager(object): + _RE_INSTALLED_PACKAGES_HEADER = re.compile(r'^Installed packages:$') + _RE_UPDATABLE_PACKAGES_HEADER = re.compile(r'^Available Updates:$') + + # Example: ' platform-tools | 27.0.0 | Android SDK Platform-Tools 27 | platform-tools ' + _RE_INSTALLED_PACKAGE = re.compile(r'^\s*(?P\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*.+\s*\|\s*(\S+)\s*$') + + # Example: ' platform-tools | 27.0.0 | 35.0.2' + _RE_UPDATABLE_PACKAGE = re.compile(r'^\s*(?P\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*[0-9].*\b\s*$') + + _RE_UNKNOWN_PACKAGE = re.compile(r'^Warning: Failed to find package \'(?P\S+)\'\s*$') + _RE_ACCEPT_LICENSE = re.compile(r'^The following packages can not be installed since their licenses or those of ' + r'the packages they depend on were not accepted') + + def __init__(self, module): + self.runner = sdkmanager_runner(module) + + def get_installed_packages(self): + with self.runner('installed sdk_root channel') as ctx: + rc, stdout, stderr = ctx.run() + return self._parse_packages(stdout, self._RE_INSTALLED_PACKAGES_HEADER, self._RE_INSTALLED_PACKAGE) + + def get_updatable_packages(self): + with self.runner('list newer sdk_root channel') as ctx: + rc, stdout, stderr = ctx.run() + return self._parse_packages(stdout, self._RE_UPDATABLE_PACKAGES_HEADER, self._RE_UPDATABLE_PACKAGE) + + def apply_packages_changes(self, packages, accept_licenses=False): + """ Install or delete packages, depending on the `module.vars.state` parameter """ + if len(packages) == 0: + return 0, '', '' + + if accept_licenses: + license_prompt_answer = 'y' + else: + license_prompt_answer = 'N' + for package in packages: + with self.runner('state name sdk_root channel', data=license_prompt_answer) as ctx: + rc, stdout, stderr = ctx.run(name=package.name) + + for line in stdout.splitlines(): + if self._RE_ACCEPT_LICENSE.match(line): + raise SdkManagerException("Licenses for some packages were not accepted") + + if rc != 0: + self._try_parse_stderr(stderr) + return rc, stdout, stderr + return 0, '', '' + + def _try_parse_stderr(self, stderr): + data = stderr.splitlines() + for line in data: + unknown_package_regex = self._RE_UNKNOWN_PACKAGE.match(line) + if unknown_package_regex: + package = unknown_package_regex.group('package') + raise SdkManagerException(f"Unknown package {package}") + + @staticmethod + def _parse_packages(stdout, header_regexp, row_regexp): + data = stdout.splitlines() + + section_found = False + packages = set() + + for line in data: + if not section_found: + section_found = header_regexp.match(line) + continue + else: + p = row_regexp.match(line) + if p: + packages.add(Package(p.group('name'))) + return packages diff --git a/plugins/module_utils/btrfs.py b/plugins/module_utils/btrfs.py new file mode 100644 index 0000000000..3c9ad3b382 --- /dev/null +++ b/plugins/module_utils/btrfs.py @@ -0,0 +1,460 @@ +# Copyright (c) 2022, Gregory Furlong +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible.module_utils.common.text.converters import to_bytes +import re +import os + + +def normalize_subvolume_path(path): + """ + Normalizes btrfs subvolume paths to ensure exactly one leading slash, no trailing slashes and no consecutive slashes. + In addition, if the path is prefixed with a leading , this value is removed. + """ + fstree_stripped = re.sub(r'^', '', path) + result = re.sub(r'/+$', '', re.sub(r'/+', '/', f"/{fstree_stripped}")) + return result if len(result) > 0 else '/' + + +class BtrfsModuleException(Exception): + pass + + +class BtrfsCommands(object): + + """ + Provides access to a subset of the Btrfs command line + """ + + def __init__(self, module): + self.__module = module + self.__btrfs = self.__module.get_bin_path("btrfs", required=True) + + def filesystem_show(self): + command = f"{self.__btrfs} filesystem show -d" + result = self.__module.run_command(command, check_rc=True) + stdout = [x.strip() for x in result[1].splitlines()] + filesystems = [] + current = None + for line in stdout: + if line.startswith('Label'): + current = self.__parse_filesystem(line) + filesystems.append(current) + elif line.startswith('devid'): + current['devices'].append(self.__parse_filesystem_device(line)) + return filesystems + + def __parse_filesystem(self, line): + label = re.sub(r'\s*uuid:.*$', '', re.sub(r'^Label:\s*', '', line)) + id = re.sub(r'^.*uuid:\s*', '', line) + + filesystem = {} + filesystem['label'] = label.strip("'") if label != 'none' else None + filesystem['uuid'] = id + filesystem['devices'] = [] + filesystem['mountpoints'] = [] + filesystem['subvolumes'] = [] + filesystem['default_subvolid'] = None + return filesystem + + def __parse_filesystem_device(self, line): + return re.sub(r'^.*path\s', '', line) + + def subvolumes_list(self, filesystem_path): + command = f"{self.__btrfs} subvolume list -tap {filesystem_path}" + result = self.__module.run_command(command, check_rc=True) + stdout = [x.split('\t') for x in result[1].splitlines()] + subvolumes = [{'id': 5, 'parent': None, 'path': '/'}] + if len(stdout) > 2: + subvolumes.extend([self.__parse_subvolume_list_record(x) for x in stdout[2:]]) + return subvolumes + + def __parse_subvolume_list_record(self, item): + return { + 'id': int(item[0]), + 'parent': int(item[2]), + 'path': normalize_subvolume_path(item[5]), + } + + def subvolume_get_default(self, filesystem_path): + command = [self.__btrfs, "subvolume", "get-default", to_bytes(filesystem_path)] + result = self.__module.run_command(command, check_rc=True) + # ID [n] ... + return int(result[1].strip().split()[1]) + + def subvolume_set_default(self, filesystem_path, subvolume_id): + command = [self.__btrfs, "subvolume", "set-default", str(subvolume_id), to_bytes(filesystem_path)] + result = self.__module.run_command(command, check_rc=True) + + def subvolume_create(self, subvolume_path): + command = [self.__btrfs, "subvolume", "create", to_bytes(subvolume_path)] + result = self.__module.run_command(command, check_rc=True) + + def subvolume_snapshot(self, snapshot_source, snapshot_destination): + command = [self.__btrfs, "subvolume", "snapshot", to_bytes(snapshot_source), to_bytes(snapshot_destination)] + result = self.__module.run_command(command, check_rc=True) + + def subvolume_delete(self, subvolume_path): + command = [self.__btrfs, "subvolume", "delete", to_bytes(subvolume_path)] + result = self.__module.run_command(command, check_rc=True) + + +class BtrfsInfoProvider(object): + + """ + Utility providing details of the currently available btrfs filesystems + """ + + def __init__(self, module): + self.__module = module + self.__btrfs_api = BtrfsCommands(module) + self.__findmnt_path = self.__module.get_bin_path("findmnt", required=True) + + def get_filesystems(self): + filesystems = self.__btrfs_api.filesystem_show() + mountpoints = self.__find_mountpoints() + for filesystem in filesystems: + device_mountpoints = self.__filter_mountpoints_for_devices(mountpoints, filesystem['devices']) + filesystem['mountpoints'] = device_mountpoints + + if len(device_mountpoints) > 0: + + # any path within the filesystem can be used to query metadata + mountpoint = device_mountpoints[0]['mountpoint'] + filesystem['subvolumes'] = self.get_subvolumes(mountpoint) + filesystem['default_subvolid'] = self.get_default_subvolume_id(mountpoint) + + return filesystems + + def get_mountpoints(self, filesystem_devices): + mountpoints = self.__find_mountpoints() + return self.__filter_mountpoints_for_devices(mountpoints, filesystem_devices) + + def get_subvolumes(self, filesystem_path): + return self.__btrfs_api.subvolumes_list(filesystem_path) + + def get_default_subvolume_id(self, filesystem_path): + return self.__btrfs_api.subvolume_get_default(filesystem_path) + + def __filter_mountpoints_for_devices(self, mountpoints, devices): + return [m for m in mountpoints if (m['device'] in devices)] + + def __find_mountpoints(self): + command = f"{self.__findmnt_path} -t btrfs -nvP" + result = self.__module.run_command(command) + mountpoints = [] + if result[0] == 0: + lines = result[1].splitlines() + for line in lines: + mountpoint = self.__parse_mountpoint_pairs(line) + mountpoints.append(mountpoint) + return mountpoints + + def __parse_mountpoint_pairs(self, line): + pattern = re.compile(r'^TARGET="(?P.*)"\s+SOURCE="(?P.*)"\s+FSTYPE="(?P.*)"\s+OPTIONS="(?P.*)"\s*$') + match = pattern.search(line) + if match is not None: + groups = match.groupdict() + + return { + 'mountpoint': groups['target'], + 'device': groups['source'], + 'subvolid': self.__extract_mount_subvolid(groups['options']), + } + else: + raise BtrfsModuleException(f"Failed to parse findmnt result for line: '{line}'") + + def __extract_mount_subvolid(self, mount_options): + for option in mount_options.split(','): + if option.startswith('subvolid='): + return int(option[len('subvolid='):]) + raise BtrfsModuleException(f"Failed to find subvolid for mountpoint in options '{mount_options}'") + + +class BtrfsSubvolume(object): + + """ + Wrapper class providing convenience methods for inspection of a btrfs subvolume + """ + + def __init__(self, filesystem, subvolume_id): + self.__filesystem = filesystem + self.__subvolume_id = subvolume_id + + def get_filesystem(self): + return self.__filesystem + + def is_mounted(self): + mountpoints = self.get_mountpoints() + return mountpoints is not None and len(mountpoints) > 0 + + def is_filesystem_root(self): + return 5 == self.__subvolume_id + + def is_filesystem_default(self): + return self.__filesystem.default_subvolid == self.__subvolume_id + + def get_mounted_path(self): + mountpoints = self.get_mountpoints() + if mountpoints is not None and len(mountpoints) > 0: + return mountpoints[0] + elif self.parent is not None: + parent = self.__filesystem.get_subvolume_by_id(self.parent) + parent_path = parent.get_mounted_path() + if parent_path is not None: + return parent_path + os.path.sep + self.name + else: + return None + + def get_mountpoints(self): + return self.__filesystem.get_mountpoints_by_subvolume_id(self.__subvolume_id) + + def get_child_relative_path(self, absolute_child_path): + """ + Get the relative path from this subvolume to the named child subvolume. + The provided parameter is expected to be normalized as by normalize_subvolume_path. + """ + path = self.path + if absolute_child_path.startswith(path): + relative = absolute_child_path[len(path):] + return re.sub(r'^/*', '', relative) + else: + raise BtrfsModuleException(f"Path '{absolute_child_path}' doesn't start with '{path}'") + + def get_parent_subvolume(self): + parent_id = self.parent + return self.__filesystem.get_subvolume_by_id(parent_id) if parent_id is not None else None + + def get_child_subvolumes(self): + return self.__filesystem.get_subvolume_children(self.__subvolume_id) + + @property + def __info(self): + return self.__filesystem.get_subvolume_info_for_id(self.__subvolume_id) + + @property + def id(self): + return self.__subvolume_id + + @property + def name(self): + return self.path.split('/').pop() + + @property + def path(self): + return self.__info['path'] + + @property + def parent(self): + return self.__info['parent'] + + +class BtrfsFilesystem(object): + + """ + Wrapper class providing convenience methods for inspection of a btrfs filesystem + """ + + def __init__(self, info, provider, module): + self.__provider = provider + + # constant for module execution + self.__uuid = info['uuid'] + self.__label = info['label'] + self.__devices = info['devices'] + + # refreshable + self.__default_subvolid = info['default_subvolid'] if 'default_subvolid' in info else None + self.__update_mountpoints(info['mountpoints'] if 'mountpoints' in info else []) + self.__update_subvolumes(info['subvolumes'] if 'subvolumes' in info else []) + + @property + def uuid(self): + return self.__uuid + + @property + def label(self): + return self.__label + + @property + def default_subvolid(self): + return self.__default_subvolid + + @property + def devices(self): + return list(self.__devices) + + def refresh(self): + self.refresh_mountpoints() + self.refresh_subvolumes() + self.refresh_default_subvolume() + + def refresh_mountpoints(self): + mountpoints = self.__provider.get_mountpoints(list(self.__devices)) + self.__update_mountpoints(mountpoints) + + def __update_mountpoints(self, mountpoints): + self.__mountpoints = dict() + for i in mountpoints: + subvolid = i['subvolid'] + mountpoint = i['mountpoint'] + if subvolid not in self.__mountpoints: + self.__mountpoints[subvolid] = [] + self.__mountpoints[subvolid].append(mountpoint) + + def refresh_subvolumes(self): + filesystem_path = self.get_any_mountpoint() + if filesystem_path is not None: + subvolumes = self.__provider.get_subvolumes(filesystem_path) + self.__update_subvolumes(subvolumes) + + def __update_subvolumes(self, subvolumes): + # TODO strategy for retaining information on deleted subvolumes? + self.__subvolumes = dict() + for subvolume in subvolumes: + self.__subvolumes[subvolume['id']] = subvolume + + def refresh_default_subvolume(self): + filesystem_path = self.get_any_mountpoint() + if filesystem_path is not None: + self.__default_subvolid = self.__provider.get_default_subvolume_id(filesystem_path) + + def contains_device(self, device): + return device in self.__devices + + def contains_subvolume(self, subvolume): + return self.get_subvolume_by_name(subvolume) is not None + + def get_subvolume_by_id(self, subvolume_id): + return BtrfsSubvolume(self, subvolume_id) if subvolume_id in self.__subvolumes else None + + def get_subvolume_info_for_id(self, subvolume_id): + return self.__subvolumes[subvolume_id] if subvolume_id in self.__subvolumes else None + + def get_subvolume_by_name(self, subvolume): + for subvolume_info in self.__subvolumes.values(): + if subvolume_info['path'] == subvolume: + return BtrfsSubvolume(self, subvolume_info['id']) + return None + + def get_any_mountpoint(self): + for subvol_mountpoints in self.__mountpoints.values(): + if len(subvol_mountpoints) > 0: + return subvol_mountpoints[0] + # maybe error? + return None + + def get_any_mounted_subvolume(self): + for subvolid, subvol_mountpoints in self.__mountpoints.items(): + if len(subvol_mountpoints) > 0: + return self.get_subvolume_by_id(subvolid) + return None + + def get_mountpoints_by_subvolume_id(self, subvolume_id): + return self.__mountpoints[subvolume_id] if subvolume_id in self.__mountpoints else [] + + def get_nearest_subvolume(self, subvolume): + """Return the identified subvolume if existing, else the closest matching parent""" + subvolumes_by_path = self.__get_subvolumes_by_path() + while len(subvolume) > 1: + if subvolume in subvolumes_by_path: + return BtrfsSubvolume(self, subvolumes_by_path[subvolume]['id']) + else: + subvolume = re.sub(r'/[^/]+$', '', subvolume) + + return BtrfsSubvolume(self, 5) + + def get_mountpath_as_child(self, subvolume_name): + """Find a path to the target subvolume through a mounted ancestor""" + nearest = self.get_nearest_subvolume(subvolume_name) + if nearest.path == subvolume_name: + nearest = nearest.get_parent_subvolume() + if nearest is None or nearest.get_mounted_path() is None: + raise BtrfsModuleException(f"Failed to find a path '{subvolume_name}' through a mounted parent subvolume") + else: + return nearest.get_mounted_path() + os.path.sep + nearest.get_child_relative_path(subvolume_name) + + def get_subvolume_children(self, subvolume_id): + return [BtrfsSubvolume(self, x['id']) for x in self.__subvolumes.values() if x['parent'] == subvolume_id] + + def __get_subvolumes_by_path(self): + result = {} + for s in self.__subvolumes.values(): + path = s['path'] + result[path] = s + return result + + def is_mounted(self): + return self.__mountpoints is not None and len(self.__mountpoints) > 0 + + def get_summary(self): + subvolumes = [] + sources = self.__subvolumes.values() if self.__subvolumes is not None else [] + for subvolume in sources: + id = subvolume['id'] + subvolumes.append({ + 'id': id, + 'path': subvolume['path'], + 'parent': subvolume['parent'], + 'mountpoints': self.get_mountpoints_by_subvolume_id(id), + }) + + return { + 'default_subvolume': self.__default_subvolid, + 'devices': self.__devices, + 'label': self.__label, + 'uuid': self.__uuid, + 'subvolumes': subvolumes, + } + + +class BtrfsFilesystemsProvider(object): + + """ + Provides methods to query available btrfs filesystems + """ + + def __init__(self, module): + self.__module = module + self.__provider = BtrfsInfoProvider(module) + self.__filesystems = None + + def get_matching_filesystem(self, criteria): + if criteria['device'] is not None: + criteria['device'] = os.path.realpath(criteria['device']) + + self.__check_init() + matching = [f for f in self.__filesystems.values() if self.__filesystem_matches_criteria(f, criteria)] + if len(matching) == 1: + return matching[0] + else: + raise BtrfsModuleException( + f"Found {len(matching)} filesystems matching criteria uuid={criteria['uuid']} label={criteria['label']} device={criteria['device']}" + ) + + def __filesystem_matches_criteria(self, filesystem, criteria): + return ((criteria['uuid'] is None or filesystem.uuid == criteria['uuid']) and + (criteria['label'] is None or filesystem.label == criteria['label']) and + (criteria['device'] is None or filesystem.contains_device(criteria['device']))) + + def get_filesystem_for_device(self, device): + real_device = os.path.realpath(device) + self.__check_init() + for fs in self.__filesystems.values(): + if fs.contains_device(real_device): + return fs + return None + + def get_filesystems(self): + self.__check_init() + return list(self.__filesystems.values()) + + def __check_init(self): + if self.__filesystems is None: + self.__filesystems = dict() + for f in self.__provider.get_filesystems(): + uuid = f['uuid'] + self.__filesystems[uuid] = BtrfsFilesystem(f, self.__provider, self.__module) diff --git a/plugins/module_utils/cloud.py b/plugins/module_utils/cloud.py index 7619023a3c..c8043a8d9e 100644 --- a/plugins/module_utils/cloud.py +++ b/plugins/module_utils/cloud.py @@ -1,11 +1,9 @@ -# -*- coding: utf-8 -*- # -# (c) 2016 Allen Sanabria, -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016 Allen Sanabria, +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations """ @@ -134,7 +132,7 @@ class CloudRetry(object): if isinstance(e, cls.base_class): # pylint: disable=isinstance-second-argument-not-valid-type response_code = cls.status_code_from_exception(e) if cls.found(response_code, catch_extra_error_codes): - msg = "{0}: Retrying in {1} seconds...".format(str(e), delay) + msg = f"{e}: Retrying in {delay} seconds..." syslog.syslog(syslog.LOG_INFO, msg) time.sleep(delay) else: diff --git a/plugins/module_utils/cmd_runner.py b/plugins/module_utils/cmd_runner.py index 8048ed25ca..b4903e1452 100644 --- a/plugins/module_utils/cmd_runner.py +++ b/plugins/module_utils/cmd_runner.py @@ -1,14 +1,14 @@ -# -*- coding: utf-8 -*- -# (c) 2022, Alexei Znamensky -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -from functools import wraps +import os from ansible.module_utils.common.collections import is_sequence -from ansible.module_utils.six import iteritems +from ansible.module_utils.common.locale import get_best_parsable_locale +from ansible_collections.community.general.plugins.module_utils import cmd_runner_fmt def _ensure_list(value): @@ -30,18 +30,10 @@ class MissingArgumentFormat(CmdRunnerException): self.args_formats = args_formats def __repr__(self): - return "MissingArgumentFormat({0!r}, {1!r}, {2!r})".format( - self.arg, - self.args_order, - self.args_formats, - ) + return f"MissingArgumentFormat({self.arg!r}, {self.args_order!r}, {self.args_formats!r})" def __str__(self): - return "Cannot find format for parameter {0} {1} in: {2}".format( - self.arg, - self.args_order, - self.args_formats, - ) + return f"Cannot find format for parameter {self.arg} {self.args_order} in: {self.args_formats}" class MissingArgumentValue(CmdRunnerException): @@ -50,16 +42,10 @@ class MissingArgumentValue(CmdRunnerException): self.arg = arg def __repr__(self): - return "MissingArgumentValue({0!r}, {1!r})".format( - self.args_order, - self.arg, - ) + return f"MissingArgumentValue({self.args_order!r}, {self.arg!r})" def __str__(self): - return "Cannot find value for parameter {0} in {1}".format( - self.arg, - self.args_order, - ) + return f"Cannot find value for parameter {self.arg} in {self.args_order}" class FormatError(CmdRunnerException): @@ -71,97 +57,10 @@ class FormatError(CmdRunnerException): super(FormatError, self).__init__() def __repr__(self): - return "FormatError({0!r}, {1!r}, {2!r}, {3!r})".format( - self.name, - self.value, - self.args_formats, - self.exc, - ) + return f"FormatError({self.name!r}, {self.value!r}, {self.args_formats!r}, {self.exc!r})" def __str__(self): - return "Failed to format parameter {0} with value {1}: {2}".format( - self.name, - self.value, - self.exc, - ) - - -class _ArgFormat(object): - def __init__(self, func, ignore_none=None): - self.func = func - self.ignore_none = ignore_none - - def __call__(self, value, ctx_ignore_none): - ignore_none = self.ignore_none if self.ignore_none is not None else ctx_ignore_none - if value is None and ignore_none: - return [] - f = self.func - return [str(x) for x in f(value)] - - -class _Format(object): - @staticmethod - def as_bool(args): - return _ArgFormat(lambda value: _ensure_list(args) if value else []) - - @staticmethod - def as_bool_not(args): - return _ArgFormat(lambda value: [] if value else _ensure_list(args), ignore_none=False) - - @staticmethod - def as_optval(arg, ignore_none=None): - return _ArgFormat(lambda value: ["{0}{1}".format(arg, value)], ignore_none=ignore_none) - - @staticmethod - def as_opt_val(arg, ignore_none=None): - return _ArgFormat(lambda value: [arg, value], ignore_none=ignore_none) - - @staticmethod - def as_opt_eq_val(arg, ignore_none=None): - return _ArgFormat(lambda value: ["{0}={1}".format(arg, value)], ignore_none=ignore_none) - - @staticmethod - def as_list(ignore_none=None): - return _ArgFormat(_ensure_list, ignore_none=ignore_none) - - @staticmethod - def as_fixed(args): - return _ArgFormat(lambda value: _ensure_list(args), ignore_none=False) - - @staticmethod - def as_func(func, ignore_none=None): - return _ArgFormat(func, ignore_none=ignore_none) - - @staticmethod - def as_map(_map, default=None, ignore_none=None): - return _ArgFormat(lambda value: _ensure_list(_map.get(value, default)), ignore_none=ignore_none) - - @staticmethod - def as_default_type(_type, arg="", ignore_none=None): - fmt = _Format - if _type == "dict": - return fmt.as_func(lambda d: ["--{0}={1}".format(*a) for a in iteritems(d)], - ignore_none=ignore_none) - if _type == "list": - return fmt.as_func(lambda value: ["--{0}".format(x) for x in value], ignore_none=ignore_none) - if _type == "bool": - return fmt.as_bool("--{0}".format(arg)) - - return fmt.as_opt_val("--{0}".format(arg), ignore_none=ignore_none) - - @staticmethod - def unpack_args(func): - @wraps(func) - def wrapper(v): - return func(*v) - return wrapper - - @staticmethod - def unpack_kwargs(func): - @wraps(func) - def wrapper(v): - return func(**v) - return wrapper + return f"Failed to format parameter {self.name} with value {self.value}: {self.exc}" class CmdRunner(object): @@ -183,21 +82,32 @@ class CmdRunner(object): self.default_args_order = self._prepare_args_order(default_args_order) if arg_formats is None: arg_formats = {} - self.arg_formats = dict(arg_formats) + self.arg_formats = {} + for fmt_name, fmt in arg_formats.items(): + if not cmd_runner_fmt.is_argformat(fmt): + fmt = cmd_runner_fmt.as_func(func=fmt, ignore_none=True) + self.arg_formats[fmt_name] = fmt self.check_rc = check_rc - self.force_lang = force_lang + if force_lang == "auto": + try: + self.force_lang = get_best_parsable_locale(module) + except RuntimeWarning: + self.force_lang = "C" + else: + self.force_lang = force_lang self.path_prefix = path_prefix if environ_update is None: environ_update = {} self.environ_update = environ_update - self.command[0] = module.get_bin_path(command[0], opt_dirs=path_prefix, required=True) + _cmd = self.command[0] + self.command[0] = _cmd if (os.path.isabs(_cmd) or '/' in _cmd) else module.get_bin_path(_cmd, opt_dirs=path_prefix, required=True) - for mod_param_name, spec in iteritems(module.argument_spec): - if mod_param_name not in self.arg_formats: - self.arg_formats[mod_param_name] = _Format.as_default_type(spec['type'], mod_param_name) + @property + def binary(self): + return self.command[0] - def context(self, args_order=None, output_process=None, ignore_value_none=True, **kwargs): + def __call__(self, args_order=None, output_process=None, check_mode_skip=False, check_mode_return=None, **kwargs): if output_process is None: output_process = _process_as_is if args_order is None: @@ -209,18 +119,23 @@ class CmdRunner(object): return _CmdRunnerContext(runner=self, args_order=args_order, output_process=output_process, - ignore_value_none=ignore_value_none, **kwargs) + check_mode_skip=check_mode_skip, + check_mode_return=check_mode_return, **kwargs) def has_arg_format(self, arg): return arg in self.arg_formats + # not decided whether to keep it or not, but if deprecating it will happen in a farther future. + context = __call__ + class _CmdRunnerContext(object): - def __init__(self, runner, args_order, output_process, ignore_value_none, **kwargs): + def __init__(self, runner, args_order, output_process, check_mode_skip, check_mode_return, **kwargs): self.runner = runner self.args_order = tuple(args_order) self.output_process = output_process - self.ignore_value_none = ignore_value_none + self.check_mode_skip = check_mode_skip + self.check_mode_return = check_mode_return self.run_command_args = dict(kwargs) self.environ_update = runner.environ_update @@ -253,13 +168,18 @@ class _CmdRunnerContext(object): for arg_name in self.args_order: value = None try: - value = named_args[arg_name] - self.cmd.extend(runner.arg_formats[arg_name](value, ctx_ignore_none=self.ignore_value_none)) - except KeyError: - raise MissingArgumentValue(self.args_order, arg_name) + if arg_name in named_args: + value = named_args[arg_name] + elif not runner.arg_formats[arg_name].ignore_missing_value: + raise MissingArgumentValue(self.args_order, arg_name) + self.cmd.extend(runner.arg_formats[arg_name](value)) + except MissingArgumentValue: + raise except Exception as e: raise FormatError(arg_name, value, runner.arg_formats[arg_name], e) + if self.check_mode_skip and module.check_mode: + return self.check_mode_return results = module.run_command(self.cmd, **self.run_command_args) self.results_rc, self.results_out, self.results_err = results self.results_processed = self.output_process(*results) @@ -268,7 +188,6 @@ class _CmdRunnerContext(object): @property def run_info(self): return dict( - ignore_value_none=self.ignore_value_none, check_rc=self.check_rc, environ_update=self.environ_update, args_order=self.args_order, @@ -286,6 +205,3 @@ class _CmdRunnerContext(object): def __exit__(self, exc_type, exc_val, exc_tb): return False - - -fmt = _Format() diff --git a/plugins/module_utils/cmd_runner_fmt.py b/plugins/module_utils/cmd_runner_fmt.py new file mode 100644 index 0000000000..dcb9fc8e20 --- /dev/null +++ b/plugins/module_utils/cmd_runner_fmt.py @@ -0,0 +1,116 @@ +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from functools import wraps + +from ansible.module_utils.common.collections import is_sequence + + +def _ensure_list(value): + return list(value) if is_sequence(value) else [value] + + +class _ArgFormat(object): + def __init__(self, func, ignore_none=True, ignore_missing_value=False): + self.func = func + self.ignore_none = ignore_none + self.ignore_missing_value = ignore_missing_value + + def __call__(self, value): + ignore_none = self.ignore_none if self.ignore_none is not None else True + if value is None and ignore_none: + return [] + f = self.func + return [str(x) for x in f(value)] + + def __str__(self): + return f"" + + def __repr__(self): + return str(self) + + +def as_bool(args_true, args_false=None, ignore_none=None): + if args_false is not None: + if ignore_none is None: + ignore_none = False + else: + args_false = [] + return _ArgFormat(lambda value: _ensure_list(args_true) if value else _ensure_list(args_false), ignore_none=ignore_none) + + +def as_bool_not(args): + return as_bool([], args, ignore_none=False) + + +def as_optval(arg, ignore_none=None): + return _ArgFormat(lambda value: [f"{arg}{value}"], ignore_none=ignore_none) + + +def as_opt_val(arg, ignore_none=None): + return _ArgFormat(lambda value: [arg, value], ignore_none=ignore_none) + + +def as_opt_eq_val(arg, ignore_none=None): + return _ArgFormat(lambda value: [f"{arg}={value}"], ignore_none=ignore_none) + + +def as_list(ignore_none=None, min_len=0, max_len=None): + def func(value): + value = _ensure_list(value) + if len(value) < min_len: + raise ValueError(f"Parameter must have at least {min_len} element(s)") + if max_len is not None and len(value) > max_len: + raise ValueError(f"Parameter must have at most {max_len} element(s)") + return value + return _ArgFormat(func, ignore_none=ignore_none) + + +def as_fixed(*args): + if len(args) == 1 and is_sequence(args[0]): + args = args[0] + return _ArgFormat(lambda value: _ensure_list(args), ignore_none=False, ignore_missing_value=True) + + +def as_func(func, ignore_none=None): + return _ArgFormat(func, ignore_none=ignore_none) + + +def as_map(_map, default=None, ignore_none=None): + if default is None: + default = [] + return _ArgFormat(lambda value: _ensure_list(_map.get(value, default)), ignore_none=ignore_none) + + +def unpack_args(func): + @wraps(func) + def wrapper(v): + return func(*v) + return wrapper + + +def unpack_kwargs(func): + @wraps(func) + def wrapper(v): + return func(**v) + return wrapper + + +def stack(fmt): + @wraps(fmt) + def wrapper(*args, **kwargs): + new_func = fmt(ignore_none=True, *args, **kwargs) + + def stacking(value): + stack = [new_func(v) for v in value if v] + stack = [x for args in stack for x in args] + return stack + return _ArgFormat(stacking, ignore_none=True) + return wrapper + + +def is_argformat(fmt): + return isinstance(fmt, _ArgFormat) diff --git a/plugins/module_utils/consul.py b/plugins/module_utils/consul.py new file mode 100644 index 0000000000..b814485c55 --- /dev/null +++ b/plugins/module_utils/consul.py @@ -0,0 +1,349 @@ + +# Copyright (c) 2022, Håkon Lerring +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +import copy +import json +import re +from urllib import error as urllib_error +from urllib.parse import urlencode + +from ansible.module_utils.urls import open_url + + +def get_consul_url(configuration): + return f"{configuration.scheme}://{configuration.host}:{configuration.port}/v1" + + +def get_auth_headers(configuration): + if configuration.token is None: + return {} + else: + return {"X-Consul-Token": configuration.token} + + +class RequestError(Exception): + def __init__(self, status, response_data=None): + self.status = status + self.response_data = response_data + + def __str__(self): + if self.response_data is None: + # self.status is already the message (backwards compat) + return self.status + return f"HTTP {self.status}: {self.response_data}" + + +def handle_consul_response_error(response): + if 400 <= response.status_code < 600: + raise RequestError(f"{response.status_code} {response.content}") + + +AUTH_ARGUMENTS_SPEC = dict( + host=dict(default="localhost"), + port=dict(type="int", default=8500), + scheme=dict(default="http"), + validate_certs=dict(type="bool", default=True), + token=dict(no_log=True), + ca_path=dict(), +) + + +def camel_case_key(key): + parts = [] + for part in key.split("_"): + if part in {"id", "ttl", "jwks", "jwt", "oidc", "iam", "sts"}: + parts.append(part.upper()) + else: + parts.append(part.capitalize()) + return "".join(parts) + + +def validate_check(check): + validate_duration_keys = ['Interval', 'Ttl', 'Timeout'] + validate_tcp_regex = r"(?P.*):(?P(?:[0-9]+))$" + if check.get('Tcp') is not None: + match = re.match(validate_tcp_regex, check['Tcp']) + if not match: + raise Exception('tcp check must be in host:port format') + for duration in validate_duration_keys: + if duration in check and check[duration] is not None: + check[duration] = validate_duration(check[duration]) + + +def validate_duration(duration): + if duration: + if not re.search(r"\d+(?:ns|us|ms|s|m|h)", duration): + duration = f"{duration}s" + return duration + + +STATE_PARAMETER = "state" +STATE_PRESENT = "present" +STATE_ABSENT = "absent" + +OPERATION_READ = "read" +OPERATION_CREATE = "create" +OPERATION_UPDATE = "update" +OPERATION_DELETE = "remove" + + +def _normalize_params(params, arg_spec): + final_params = {} + for k, v in params.items(): + if k not in arg_spec or v is None: # Alias + continue + spec = arg_spec[k] + if ( + spec.get("type") == "list" + and spec.get("elements") == "dict" + and spec.get("options") + and v + ): + v = [_normalize_params(d, spec["options"]) for d in v] + elif spec.get("type") == "dict" and spec.get("options") and v: + v = _normalize_params(v, spec["options"]) + final_params[k] = v + return final_params + + +class _ConsulModule: + """Base class for Consul modules. + + This class is considered private, till the API is fully fleshed out. + As such backwards incompatible changes can occur even in bugfix releases. + """ + + api_endpoint = None # type: str + unique_identifiers = None # type: list + result_key = None # type: str + create_only_fields = set() + operational_attributes = set() + params = {} + + def __init__(self, module): + self._module = module + self.params = _normalize_params(module.params, module.argument_spec) + self.api_params = { + k: camel_case_key(k) + for k in self.params + if k not in STATE_PARAMETER and k not in AUTH_ARGUMENTS_SPEC + } + + self.operational_attributes.update({"CreateIndex", "CreateTime", "Hash", "ModifyIndex"}) + + def execute(self): + obj = self.read_object() + + changed = False + diff = {} + if self.params[STATE_PARAMETER] == STATE_PRESENT: + obj_from_module = self.module_to_obj(obj is not None) + if obj is None: + operation = OPERATION_CREATE + new_obj = self.create_object(obj_from_module) + diff = {"before": {}, "after": new_obj} + changed = True + else: + operation = OPERATION_UPDATE + if self._needs_update(obj, obj_from_module): + new_obj = self.update_object(obj, obj_from_module) + diff = {"before": obj, "after": new_obj} + changed = True + else: + new_obj = obj + elif self.params[STATE_PARAMETER] == STATE_ABSENT: + operation = OPERATION_DELETE + if obj is not None: + self.delete_object(obj) + changed = True + diff = {"before": obj, "after": {}} + else: + diff = {"before": {}, "after": {}} + new_obj = None + else: + raise RuntimeError("Unknown state supplied.") + + result = {"changed": changed} + if changed: + result["operation"] = operation + if self._module._diff: + result["diff"] = diff + if self.result_key: + result[self.result_key] = new_obj + self._module.exit_json(**result) + + def module_to_obj(self, is_update): + obj = {} + for k, v in self.params.items(): + result = self.map_param(k, v, is_update) + if result: + obj[result[0]] = result[1] + return obj + + def map_param(self, k, v, is_update): + def helper(item): + return {camel_case_key(k): v for k, v in item.items()} + + def needs_camel_case(k): + spec = self._module.argument_spec[k] + return ( + spec.get("type") == "list" + and spec.get("elements") == "dict" + and spec.get("options") + ) or (spec.get("type") == "dict" and spec.get("options")) + + if k in self.api_params and v is not None: + if isinstance(v, dict) and needs_camel_case(k): + v = helper(v) + elif isinstance(v, (list, tuple)) and needs_camel_case(k): + v = [helper(i) for i in v] + if is_update and k in self.create_only_fields: + return + return camel_case_key(k), v + + def _needs_update(self, api_obj, module_obj): + api_obj = copy.deepcopy(api_obj) + module_obj = copy.deepcopy(module_obj) + return self.needs_update(api_obj, module_obj) + + def needs_update(self, api_obj, module_obj): + for k, v in module_obj.items(): + if k not in api_obj: + return True + if api_obj[k] != v: + return True + return False + + def prepare_object(self, existing, obj): + existing = { + k: v for k, v in existing.items() if k not in self.operational_attributes + } + for k, v in obj.items(): + existing[k] = v + return existing + + def id_from_obj(self, obj, camel_case=False): + def key_func(key): + return camel_case_key(key) if camel_case else key + + if self.unique_identifiers: + for identifier in self.unique_identifiers: + identifier = key_func(identifier) + if identifier in obj: + return obj[identifier] + return None + + def endpoint_url(self, operation, identifier=None): + if operation == OPERATION_CREATE: + return self.api_endpoint + elif identifier: + return f"{self.api_endpoint}/{identifier}" + raise RuntimeError("invalid arguments passed") + + def read_object(self): + identifier = self.id_from_obj(self.params) + url = self.endpoint_url(OPERATION_READ, identifier) + try: + return self.get(url) + except RequestError as e: + if e.status == 404: + return + elif e.status == 403 and b"ACL not found" in e.response_data: + return + raise + + def create_object(self, obj): + if self._module.check_mode: + return obj + else: + url = self.endpoint_url(OPERATION_CREATE) + created_obj = self.put(url, data=self.prepare_object({}, obj)) + if created_obj is None: + created_obj = self.read_object() + return created_obj + + def update_object(self, existing, obj): + merged_object = self.prepare_object(existing, obj) + if self._module.check_mode: + return merged_object + else: + url = self.endpoint_url(OPERATION_UPDATE, self.id_from_obj(existing, camel_case=True)) + updated_obj = self.put(url, data=merged_object) + if updated_obj is None: + updated_obj = self.read_object() + return updated_obj + + def delete_object(self, obj): + if self._module.check_mode: + return {} + else: + url = self.endpoint_url(OPERATION_DELETE, self.id_from_obj(obj, camel_case=True)) + return self.delete(url) + + def _request(self, method, url_parts, data=None, params=None): + module_params = self.params + + if not isinstance(url_parts, (tuple, list)): + url_parts = [url_parts] + if params: + # Remove values that are None + params = {k: v for k, v in params.items() if v is not None} + + ca_path = module_params.get("ca_path") + base_url = f"{module_params['scheme']}://{module_params['host']}:{module_params['port']}/v1" + url = "/".join([base_url] + list(url_parts)) + + headers = {} + token = self.params.get("token") + if token: + headers["X-Consul-Token"] = token + + try: + if data is not None: + data = json.dumps(data) + headers["Content-Type"] = "application/json" + if params: + url = f"{url}?{urlencode(params)}" + response = open_url( + url, + method=method, + data=data, + headers=headers, + validate_certs=module_params["validate_certs"], + ca_path=ca_path, + ) + response_data = response.read() + status = ( + response.status if hasattr(response, "status") else response.getcode() + ) + + except urllib_error.URLError as e: + if isinstance(e, urllib_error.HTTPError): + status = e.code + response_data = e.fp.read() + else: + self._module.fail_json( + msg=f"Could not connect to consul agent at {module_params['host']}:{module_params['port']}, error was {e}" + ) + raise + + if 400 <= status < 600: + raise RequestError(status, response_data) + + if response_data: + return json.loads(response_data) + return None + + def get(self, url_parts, **kwargs): + return self._request("GET", url_parts, **kwargs) + + def put(self, url_parts, **kwargs): + return self._request("PUT", url_parts, **kwargs) + + def delete(self, url_parts, **kwargs): + return self._request("DELETE", url_parts, **kwargs) diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py index 86c4694524..3003875c09 100644 --- a/plugins/module_utils/csv.py +++ b/plugins/module_utils/csv.py @@ -1,17 +1,15 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) -# Copyright: (c) 2018, Dag Wieers (@dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) +# Copyright (c) 2018, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import csv -from io import BytesIO, StringIO +from io import StringIO from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.six import PY3 class CustomDialectFailureError(Exception): @@ -39,28 +37,27 @@ def initialize_dialect(dialect, **kwargs): csv.register_dialect("unix", unix_dialect) if dialect not in csv.list_dialects(): - raise DialectNotAvailableError("Dialect '%s' is not supported by your version of python." % dialect) + raise DialectNotAvailableError(f"Dialect '{dialect}' is not supported by your version of python.") # Create a dictionary from only set options - dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None) + dialect_params = {k: v for k, v in kwargs.items() if v is not None} if dialect_params: try: csv.register_dialect('custom', dialect, **dialect_params) except TypeError as e: - raise CustomDialectFailureError("Unable to create custom dialect: %s" % to_native(e)) + raise CustomDialectFailureError(f"Unable to create custom dialect: {e}") dialect = 'custom' return dialect def read_csv(data, dialect, fieldnames=None): - + BOM = to_native('\ufeff') data = to_native(data, errors='surrogate_or_strict') + if data.startswith(BOM): + data = data[len(BOM):] - if PY3: - fake_fh = StringIO(data) - else: - fake_fh = BytesIO(data) + fake_fh = StringIO(data) reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect) diff --git a/plugins/module_utils/database.py b/plugins/module_utils/database.py index 825d3a2be9..bb4c0efcee 100644 --- a/plugins/module_utils/database.py +++ b/plugins/module_utils/database.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -7,10 +6,10 @@ # # Copyright (c) 2014, Toshio Kuratomi # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re @@ -103,19 +102,19 @@ def _identifier_parse(identifier, quote_char): dot = identifier.index('.') except ValueError: identifier = identifier.replace(quote_char, quote_char * 2) - identifier = ''.join((quote_char, identifier, quote_char)) + identifier = f"{quote_char}{identifier}{quote_char}" further_identifiers = [identifier] else: if dot == 0 or dot >= len(identifier) - 1: identifier = identifier.replace(quote_char, quote_char * 2) - identifier = ''.join((quote_char, identifier, quote_char)) + identifier = f"{quote_char}{identifier}{quote_char}" further_identifiers = [identifier] else: first_identifier = identifier[:dot] next_identifier = identifier[dot + 1:] further_identifiers = _identifier_parse(next_identifier, quote_char) first_identifier = first_identifier.replace(quote_char, quote_char * 2) - first_identifier = ''.join((quote_char, first_identifier, quote_char)) + first_identifier = f"{quote_char}{first_identifier}{quote_char}" further_identifiers.insert(0, first_identifier) return further_identifiers @@ -124,14 +123,14 @@ def _identifier_parse(identifier, quote_char): def pg_quote_identifier(identifier, id_type): identifier_fragments = _identifier_parse(identifier, quote_char='"') if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]: - raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type])) + raise SQLParseError(f'PostgreSQL does not support {id_type} with more than {_PG_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots') return '.'.join(identifier_fragments) def mysql_quote_identifier(identifier, id_type): identifier_fragments = _identifier_parse(identifier, quote_char='`') if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]: - raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type])) + raise SQLParseError(f'MySQL does not support {id_type} with more than {_MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots') special_cased_fragments = [] for fragment in identifier_fragments: @@ -186,5 +185,4 @@ def check_input(module, *args): dangerous_elements.append(elem) if dangerous_elements: - module.fail_json(msg="Passed input '%s' is " - "potentially dangerous" % ', '.join(dangerous_elements)) + module.fail_json(msg=f"Passed input '{', '.join(dangerous_elements)}' is potentially dangerous") diff --git a/plugins/module_utils/datetime.py b/plugins/module_utils/datetime.py new file mode 100644 index 0000000000..f11375f0eb --- /dev/null +++ b/plugins/module_utils/datetime.py @@ -0,0 +1,30 @@ +# +# Copyright (c) 2023 Felix Fontein +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import annotations + +import datetime as _datetime +import sys + + +_USE_TIMEZONE = sys.version_info >= (3, 6) + + +def ensure_timezone_info(value): + if not _USE_TIMEZONE or value.tzinfo is not None: + return value + return value.astimezone(_datetime.timezone.utc) + + +def fromtimestamp(value): + if _USE_TIMEZONE: + return _datetime.fromtimestamp(value, tz=_datetime.timezone.utc) + return _datetime.utcfromtimestamp(value) + + +def now(): + if _USE_TIMEZONE: + return _datetime.datetime.now(tz=_datetime.timezone.utc) + return _datetime.datetime.utcnow() diff --git a/plugins/module_utils/deps.py b/plugins/module_utils/deps.py new file mode 100644 index 0000000000..a24cd63838 --- /dev/null +++ b/plugins/module_utils/deps.py @@ -0,0 +1,100 @@ +# (c) 2022, Alexei Znamensky +# Copyright (c) 2022, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import annotations + + +import traceback +from contextlib import contextmanager + +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.basic import missing_required_lib + + +_deps = dict() + + +class _Dependency(object): + _states = ["pending", "failure", "success"] + + def __init__(self, name, reason=None, url=None, msg=None): + self.name = name + self.reason = reason + self.url = url + self.msg = msg + + self.state = 0 + self.trace = None + self.exc = None + + def succeed(self): + self.state = 2 + + def fail(self, exc, trace): + self.state = 1 + self.exc = exc + self.trace = trace + + @property + def message(self): + if self.msg: + return to_native(self.msg) + else: + return missing_required_lib(self.name, reason=self.reason, url=self.url) + + @property + def failed(self): + return self.state == 1 + + def validate(self, module): + if self.failed: + module.fail_json(msg=self.message, exception=self.trace) + + def __str__(self): + return f"" + + +@contextmanager +def declare(name, *args, **kwargs): + dep = _Dependency(name, *args, **kwargs) + try: + yield dep + except Exception as e: + dep.fail(e, traceback.format_exc()) + else: + dep.succeed() + finally: + _deps[name] = dep + + +def _select_names(spec): + dep_names = sorted(_deps) + + if spec: + if spec.startswith("-"): + spec_split = spec[1:].split(":") + for d in spec_split: + dep_names.remove(d) + else: + spec_split = spec.split(":") + dep_names = [] + for d in spec_split: + _deps[d] # ensure it exists + dep_names.append(d) + + return dep_names + + +def validate(module, spec=None): + for dep in _select_names(spec): + _deps[dep].validate(module) + + +def failed(spec=None): + return any(_deps[d].failed for d in _select_names(spec)) + + +def clear(): + _deps.clear() diff --git a/plugins/module_utils/dimensiondata.py b/plugins/module_utils/dimensiondata.py index bcb02e8476..a0430b445e 100644 --- a/plugins/module_utils/dimensiondata.py +++ b/plugins/module_utils/dimensiondata.py @@ -1,8 +1,8 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2016 Dimension Data # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # # Authors: # - Aimon Bustardo @@ -11,22 +11,22 @@ # # Common functionality to be used by various module components -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations +import configparser import os import re import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six.moves import configparser +# (TODO: remove AnsibleModule from next line!) +from ansible.module_utils.basic import AnsibleModule, missing_required_lib # noqa: F401, pylint: disable=unused-import from os.path import expanduser from uuid import UUID LIBCLOUD_IMP_ERR = None try: - from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus - from libcloud.compute.base import Node, NodeLocation + from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus # noqa: F401, pylint: disable=unused-import + from libcloud.compute.base import Node, NodeLocation # noqa: F401, pylint: disable=unused-import from libcloud.compute.providers import get_driver from libcloud.compute.types import Provider @@ -37,7 +37,7 @@ except ImportError: LIBCLOUD_IMP_ERR = traceback.format_exc() HAS_LIBCLOUD = False -# MCP 2.x version patten for location (datacenter) names. +# MCP 2.x version pattern for location (datacenter) names. # # Note that this is not a totally reliable way of determining MCP version. # Unfortunately, libcloud's NodeLocation currently makes no provision for extended properties. @@ -73,7 +73,7 @@ class DimensionDataModule(object): # Region and location are common to all Dimension Data modules. region = self.module.params['region'] - self.region = 'dd-{0}'.format(region) + self.region = f'dd-{region}' self.location = self.module.params['location'] libcloud.security.VERIFY_SSL_CERT = self.module.params['validate_certs'] @@ -140,7 +140,7 @@ class DimensionDataModule(object): if not user_id or not key: home = expanduser('~') config = configparser.RawConfigParser() - config.read("%s/.dimensiondata" % home) + config.read(f"{home}/.dimensiondata") try: user_id = config.get("dimensiondatacloud", "MCP_USER") @@ -190,7 +190,7 @@ class DimensionDataModule(object): if network_domain: return network_domain - raise UnknownNetworkError("Network '%s' could not be found" % locator) + raise UnknownNetworkError(f"Network '{locator}' could not be found") def get_vlan(self, locator, location, network_domain): """ @@ -212,7 +212,7 @@ class DimensionDataModule(object): if vlan: return vlan - raise UnknownVLANError("VLAN '%s' could not be found" % locator) + raise UnknownVLANError(f"VLAN '{locator}' could not be found") @staticmethod def argument_spec(**additional_argument_spec): diff --git a/plugins/module_utils/django.py b/plugins/module_utils/django.py new file mode 100644 index 0000000000..4c052a1d6e --- /dev/null +++ b/plugins/module_utils/django.py @@ -0,0 +1,150 @@ +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils import cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.python_runner import PythonRunner +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + +django_std_args = dict( + # environmental options + venv=dict(type="path"), + # default options of django-admin + settings=dict(type="str", required=True), + pythonpath=dict(type="path"), + traceback=dict(type="bool"), + verbosity=dict(type="int", choices=[0, 1, 2, 3]), + skip_checks=dict(type="bool"), +) +_database_dash = dict( + database=dict(type="str", default="default"), +) +_data = dict( + excludes=dict(type="list", elements="str"), + format=dict(type="str", default="json", choices=["xml", "json", "jsonl", "yaml"]), +) +_pks = dict( + primary_keys=dict(type="list", elements="str"), +) + +_django_std_arg_fmts = dict( + all=cmd_runner_fmt.as_bool("--all"), + app=cmd_runner_fmt.as_opt_val("--app"), + apps=cmd_runner_fmt.as_list(), + apps_models=cmd_runner_fmt.as_list(), + check=cmd_runner_fmt.as_bool("--check"), + command=cmd_runner_fmt.as_list(), + database_dash=cmd_runner_fmt.as_opt_eq_val("--database"), + database_stacked_dash=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--database"), + deploy=cmd_runner_fmt.as_bool("--deploy"), + dry_run=cmd_runner_fmt.as_bool("--dry-run"), + excludes=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--exclude"), + fail_level=cmd_runner_fmt.as_opt_val("--fail-level"), + fixture=cmd_runner_fmt.as_opt_val("--output"), + fixtures=cmd_runner_fmt.as_list(), + format=cmd_runner_fmt.as_opt_val("--format"), + ignore_non_existent=cmd_runner_fmt.as_bool("--ignorenonexistent"), + indent=cmd_runner_fmt.as_opt_val("--indent"), + natural_foreign=cmd_runner_fmt.as_bool("--natural-foreign"), + natural_primary=cmd_runner_fmt.as_bool("--natural-primary"), + no_color=cmd_runner_fmt.as_fixed("--no-color"), + noinput=cmd_runner_fmt.as_fixed("--noinput"), + primary_keys=lambda v: ["--pks", ",".join(v)], + pythonpath=cmd_runner_fmt.as_opt_eq_val("--pythonpath"), + settings=cmd_runner_fmt.as_opt_eq_val("--settings"), + skip_checks=cmd_runner_fmt.as_bool("--skip-checks"), + tags=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--tag"), + traceback=cmd_runner_fmt.as_bool("--traceback"), + verbosity=cmd_runner_fmt.as_opt_val("--verbosity"), + version=cmd_runner_fmt.as_fixed("--version"), +) + +# keys can be used in _django_args +_args_menu = dict( + std=(django_std_args, _django_std_arg_fmts), + database=(_database_dash, {"database": _django_std_arg_fmts["database_dash"]}), # deprecate, remove in 13.0.0 + noinput=({}, {"noinput": cmd_runner_fmt.as_fixed("--noinput")}), # deprecate, remove in 13.0.0 + dry_run=({}, {"dry_run": cmd_runner_fmt.as_bool("--dry-run")}), # deprecate, remove in 13.0.0 + check=({}, {"check": cmd_runner_fmt.as_bool("--check")}), # deprecate, remove in 13.0.0 + database_dash=(_database_dash, {}), + data=(_data, {}), +) + + +class _DjangoRunner(PythonRunner): + def __init__(self, module, arg_formats=None, **kwargs): + arg_fmts = dict(arg_formats) if arg_formats else {} + arg_fmts.update(_django_std_arg_fmts) + + super(_DjangoRunner, self).__init__(module, ["-m", "django"], arg_formats=arg_fmts, **kwargs) + + def __call__(self, output_process=None, check_mode_skip=False, check_mode_return=None, **kwargs): + args_order = ( + ("command", "no_color", "settings", "pythonpath", "traceback", "verbosity", "skip_checks") + self._prepare_args_order(self.default_args_order) + ) + return super(_DjangoRunner, self).__call__(args_order, output_process, check_mode_skip=check_mode_skip, check_mode_return=check_mode_return, **kwargs) + + def bare_context(self, *args, **kwargs): + return super(_DjangoRunner, self).__call__(*args, **kwargs) + + +class DjangoModuleHelper(ModuleHelper): + module = {} + django_admin_cmd = None + arg_formats = {} + django_admin_arg_order = () + _django_args = [] + _check_mode_arg = "" + + def __init__(self): + self.module["argument_spec"], self.arg_formats = self._build_args(self.module.get("argument_spec", {}), + self.arg_formats, + *(["std"] + self._django_args)) + super(DjangoModuleHelper, self).__init__(self.module) + if self.django_admin_cmd is not None: + self.vars.command = self.django_admin_cmd + + @staticmethod + def _build_args(arg_spec, arg_format, *names): + res_arg_spec = {} + res_arg_fmts = {} + for name in names: + args, fmts = _args_menu[name] + res_arg_spec = dict_merge(res_arg_spec, args) + res_arg_fmts = dict_merge(res_arg_fmts, fmts) + res_arg_spec = dict_merge(res_arg_spec, arg_spec) + res_arg_fmts = dict_merge(res_arg_fmts, arg_format) + + return res_arg_spec, res_arg_fmts + + def __run__(self): + runner = _DjangoRunner(self.module, + default_args_order=self.django_admin_arg_order, + arg_formats=self.arg_formats, + venv=self.vars.venv, + check_rc=True) + + run_params = self.vars.as_dict() + if self._check_mode_arg: + run_params.update({self._check_mode_arg: self.check_mode}) + + rc, out, err = runner.bare_context("version").run() + self.vars.version = out.strip() + + with runner() as ctx: + results = ctx.run(**run_params) + self.vars.stdout = ctx.results_out + self.vars.stderr = ctx.results_err + self.vars.cmd = ctx.cmd + self.vars.set("run_info", ctx.run_info, verbosity=3) + + return results + + @classmethod + def execute(cls): + cls().run() diff --git a/plugins/module_utils/gandi_livedns_api.py b/plugins/module_utils/gandi_livedns_api.py index 2c785353ad..135fc6188c 100644 --- a/plugins/module_utils/gandi_livedns_api.py +++ b/plugins/module_utils/gandi_livedns_api.py @@ -1,13 +1,12 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2019 Gregory Thiemonge -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2019 Gregory Thiemonge +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import json -from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.urls import fetch_url @@ -32,6 +31,7 @@ class GandiLiveDNSAPI(object): def __init__(self, module): self.module = module self.api_key = module.params['api_key'] + self.personal_access_token = module.params['personal_access_token'] def _build_error_message(self, module, info): s = '' @@ -42,21 +42,26 @@ class GandiLiveDNSAPI(object): error = errors[0] name = error.get('name') if name: - s += '{0} :'.format(name) + s += f'{name} :' description = error.get('description') if description: s += description return s def _gandi_api_call(self, api_call, method='GET', payload=None, error_on_404=True): - headers = {'Authorization': 'Apikey {0}'.format(self.api_key), + authorization_header = ( + f'Bearer {self.personal_access_token}' + if self.personal_access_token + else f'Apikey {self.api_key}' + ) + headers = {'Authorization': authorization_header, 'Content-Type': 'application/json'} data = None if payload: try: data = json.dumps(payload) except Exception as e: - self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e)) + self.module.fail_json(msg=f"Failed to encode payload as JSON: {e} ") resp, info = fetch_url(self.module, self.api_endpoint + api_call, @@ -68,7 +73,7 @@ class GandiLiveDNSAPI(object): if info['status'] >= 400 and (info['status'] != 404 or error_on_404): err_s = self.error_strings.get(info['status'], '') - error_msg = "API Error {0}: {1}".format(err_s, self._build_error_message(self.module, info)) + error_msg = f"API Error {err_s}: {self._build_error_message(self.module, info)}" result = None try: @@ -80,7 +85,7 @@ class GandiLiveDNSAPI(object): try: result = json.loads(to_text(content, errors='surrogate_or_strict')) except (getattr(json, 'JSONDecodeError', ValueError)) as e: - error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content) + error_msg += f"; Failed to parse API response with error {e}: {content}" if error_msg: self.module.fail_json(msg=error_msg) @@ -109,11 +114,11 @@ class GandiLiveDNSAPI(object): return [self.build_result(r, domain) for r in results] def get_records(self, record, type, domain): - url = '/domains/%s/records' % (domain) + url = f'/domains/{domain}/records' if record: - url += '/%s' % (record) + url += f'/{record}' if type: - url += '/%s' % (type) + url += f'/{type}' records, status = self._gandi_api_call(url, error_on_404=False) @@ -132,7 +137,7 @@ class GandiLiveDNSAPI(object): return records def create_record(self, record, type, values, ttl, domain): - url = '/domains/%s/records' % (domain) + url = f'/domains/{domain}/records' new_record = { 'rrset_name': record, 'rrset_type': type, @@ -147,7 +152,7 @@ class GandiLiveDNSAPI(object): return None def update_record(self, record, type, values, ttl, domain): - url = '/domains/%s/records/%s/%s' % (domain, record, type) + url = f'/domains/{domain}/records/{record}/{type}' new_record = { 'rrset_values': values, 'rrset_ttl': ttl, @@ -156,7 +161,7 @@ class GandiLiveDNSAPI(object): return record def delete_record(self, record, type, domain): - url = '/domains/%s/records/%s/%s' % (domain, record, type) + url = f'/domains/{domain}/records/{record}/{type}' self._gandi_api_call(url, method='DELETE') diff --git a/plugins/module_utils/gconftool2.py b/plugins/module_utils/gconftool2.py new file mode 100644 index 0000000000..7d11078edf --- /dev/null +++ b/plugins/module_utils/gconftool2.py @@ -0,0 +1,31 @@ +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +_state_map = { + "present": "--set", + "absent": "--unset", + "get": "--get", +} + + +def gconftool2_runner(module, **kwargs): + return CmdRunner( + module, + command='gconftool-2', + arg_formats=dict( + state=cmd_runner_fmt.as_map(_state_map), + key=cmd_runner_fmt.as_list(), + value_type=cmd_runner_fmt.as_opt_val("--type"), + value=cmd_runner_fmt.as_list(), + direct=cmd_runner_fmt.as_bool("--direct"), + config_source=cmd_runner_fmt.as_opt_val("--config-source"), + version=cmd_runner_fmt.as_fixed("--version"), + ), + **kwargs + ) diff --git a/plugins/module_utils/gio_mime.py b/plugins/module_utils/gio_mime.py new file mode 100644 index 0000000000..15122b1ef1 --- /dev/null +++ b/plugins/module_utils/gio_mime.py @@ -0,0 +1,32 @@ +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def gio_mime_runner(module, **kwargs): + return CmdRunner( + module, + command=['gio'], + arg_formats=dict( + mime=cmd_runner_fmt.as_fixed('mime'), + mime_type=cmd_runner_fmt.as_list(), + handler=cmd_runner_fmt.as_list(), + version=cmd_runner_fmt.as_fixed('--version'), + ), + **kwargs + ) + + +def gio_mime_get(runner, mime_type): + def process(rc, out, err): + if err.startswith("No default applications for"): + return None + out = out.splitlines()[0] + return out.split()[-1] + + with runner("mime mime_type", output_process=process) as ctx: + return ctx.run(mime_type=mime_type) diff --git a/plugins/module_utils/gitlab.py b/plugins/module_utils/gitlab.py index 21af10b5cd..7ad11ab5a2 100644 --- a/plugins/module_utils/gitlab.py +++ b/plugins/module_utils/gitlab.py @@ -1,37 +1,48 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2018, Marcus Watkins -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Marcus Watkins +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.basic import missing_required_lib -from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -try: - from urllib import quote_plus # Python 2.X - from urlparse import urljoin -except ImportError: - from urllib.parse import quote_plus, urljoin # Python 3+ +from urllib.parse import urljoin import traceback + +def _determine_list_all_kwargs(version): + gitlab_version = LooseVersion(version) + if gitlab_version >= LooseVersion('4.0.0'): + # 4.0.0 removed 'as_list' + return {'iterator': True, 'per_page': 100} + elif gitlab_version >= LooseVersion('3.7.0'): + # 3.7.0 added 'get_all' + return {'as_list': False, 'get_all': True, 'per_page': 100} + else: + return {'as_list': False, 'all': True, 'per_page': 100} + + GITLAB_IMP_ERR = None try: import gitlab import requests HAS_GITLAB_PACKAGE = True + list_all_kwargs = _determine_list_all_kwargs(gitlab.__version__) except Exception: + gitlab = None GITLAB_IMP_ERR = traceback.format_exc() HAS_GITLAB_PACKAGE = False + list_all_kwargs = {} def auth_argument_spec(spec=None): arg_spec = (dict( + ca_path=dict(type='str'), api_token=dict(type='str', no_log=True), api_oauth_token=dict(type='str', no_log=True), api_job_token=dict(type='str', no_log=True), @@ -47,7 +58,7 @@ def find_project(gitlab_instance, identifier): except Exception as e: current_user = gitlab_instance.user try: - project = gitlab_instance.projects.get(current_user.username + '/' + identifier) + project = gitlab_instance.projects.get(f"{current_user.username}/{identifier}") except Exception as e: return None @@ -56,49 +67,111 @@ def find_project(gitlab_instance, identifier): def find_group(gitlab_instance, identifier): try: - project = gitlab_instance.groups.get(identifier) + group = gitlab_instance.groups.get(identifier) except Exception as e: return None - return project + return group -def gitlab_authentication(module): +def ensure_gitlab_package(module, min_version=None): + if not HAS_GITLAB_PACKAGE: + module.fail_json( + msg=missing_required_lib("python-gitlab", url='https://python-gitlab.readthedocs.io/en/stable/'), + exception=GITLAB_IMP_ERR + ) + gitlab_version = gitlab.__version__ + if min_version is not None and LooseVersion(gitlab_version) < LooseVersion(min_version): + module.fail_json(msg=( + f"This module requires python-gitlab Python module >= {min_version} (installed version: " + f"{gitlab_version}). Please upgrade python-gitlab to version {min_version} or above." + )) + + +def gitlab_authentication(module, min_version=None): + ensure_gitlab_package(module, min_version=min_version) + gitlab_url = module.params['api_url'] validate_certs = module.params['validate_certs'] + ca_path = module.params['ca_path'] gitlab_user = module.params['api_username'] gitlab_password = module.params['api_password'] gitlab_token = module.params['api_token'] gitlab_oauth_token = module.params['api_oauth_token'] gitlab_job_token = module.params['api_job_token'] - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + verify = ca_path if validate_certs and ca_path else validate_certs try: - # python-gitlab library remove support for username/password authentication since 1.13.0 - # Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0 - # This condition allow to still support older version of the python-gitlab library - if LooseVersion(gitlab.__version__) < LooseVersion("1.13.0"): - gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password, - private_token=gitlab_token, api_version=4) - else: - # We can create an oauth_token using a username and password - # https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow - if gitlab_user: - data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password} - resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=validate_certs) - resp_data = resp.json() - gitlab_oauth_token = resp_data["access_token"] - - gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token, - oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4) + # We can create an oauth_token using a username and password + # https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow + if gitlab_user: + data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password} + resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=verify) + resp_data = resp.json() + gitlab_oauth_token = resp_data["access_token"] + gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=verify, private_token=gitlab_token, + oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4) gitlab_instance.auth() except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e: - module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e)) + module.fail_json(msg=f"Failed to connect to GitLab server: {e}") except (gitlab.exceptions.GitlabHttpError) as e: - module.fail_json(msg="Failed to connect to GitLab server: %s. \ - GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e)) + module.fail_json(msg=( + f"Failed to connect to GitLab server: {e}. GitLab remove Session API now " + "that private tokens are removed from user API endpoints since version 10.2." + )) return gitlab_instance + + +def filter_returned_variables(gitlab_variables): + # pop properties we don't know + existing_variables = [dict(x.attributes) for x in gitlab_variables] + KNOWN = ['key', 'value', 'description', 'masked', 'hidden', 'protected', 'variable_type', 'environment_scope', 'raw'] + for item in existing_variables: + for key in list(item.keys()): + if key not in KNOWN: + item.pop(key) + return existing_variables + + +def vars_to_variables(vars, module): + # transform old vars to new variables structure + variables = list() + for item, value in vars.items(): + if isinstance(value, (str, int, float)): + variables.append( + { + "name": item, + "value": str(value), + "description": None, + "masked": False, + "protected": False, + "hidden": False, + "raw": False, + "variable_type": "env_var", + } + ) + + elif isinstance(value, dict): + new_item = { + "name": item, + "value": value.get('value'), + "description": value.get('description'), + "masked": value.get('masked'), + "hidden": value.get('hidden'), + "protected": value.get('protected'), + "raw": value.get('raw'), + "variable_type": value.get('variable_type'), + } + + if value.get('environment_scope'): + new_item['environment_scope'] = value.get('environment_scope') + + variables.append(new_item) + + else: + module.fail_json(msg="value must be of type string, integer, float or dict") + + return variables diff --git a/plugins/module_utils/heroku.py b/plugins/module_utils/heroku.py index 70b144c077..149e11162e 100644 --- a/plugins/module_utils/heroku.py +++ b/plugins/module_utils/heroku.py @@ -1,9 +1,8 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2018, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import traceback diff --git a/plugins/module_utils/homebrew.py b/plugins/module_utils/homebrew.py new file mode 100644 index 0000000000..88e92461c3 --- /dev/null +++ b/plugins/module_utils/homebrew.py @@ -0,0 +1,135 @@ +# Copyright (c) Ansible project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import annotations + + +import os +import re + + +def _create_regex_group_complement(s): + lines = (line.strip() for line in s.split("\n") if line.strip()) + chars = [_f for _f in (line.split("#")[0].strip() for line in lines) if _f] + group = rf"[^{''.join(chars)}]" + return re.compile(group) + + +class HomebrewValidate(object): + # class regexes ------------------------------------------------ {{{ + VALID_PATH_CHARS = rf""" + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + : # colons + {os.path.sep} # the OS-specific path separator + . # dots + \- # dashes + """ + + VALID_BREW_PATH_CHARS = rf""" + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + {os.path.sep} # the OS-specific path separator + . # dots + \- # dashes + """ + + VALID_PACKAGE_CHARS = r""" + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + . # dots + / # slash (for taps) + \+ # plusses + \- # dashes + : # colons (for URLs) + @ # at-sign + """ + + INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS) + INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS) + INVALID_PACKAGE_REGEX = _create_regex_group_complement(VALID_PACKAGE_CHARS) + # /class regexes ----------------------------------------------- }}} + + # class validations -------------------------------------------- {{{ + @classmethod + def valid_path(cls, path): + """ + `path` must be one of: + - list of paths + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - colons + - os.path.sep + """ + + if isinstance(path, str): + return not cls.INVALID_PATH_REGEX.search(path) + + try: + iter(path) + except TypeError: + return False + else: + paths = path + return all(cls.valid_brew_path(path_) for path_ in paths) + + @classmethod + def valid_brew_path(cls, brew_path): + """ + `brew_path` must be one of: + - None + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - os.path.sep + """ + + if brew_path is None: + return True + + return isinstance( + brew_path, str + ) and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) + + @classmethod + def valid_package(cls, package): + """A valid package is either None or alphanumeric.""" + + if package is None: + return True + + return isinstance( + package, str + ) and not cls.INVALID_PACKAGE_REGEX.search(package) + + +def parse_brew_path(module): + # type: (...) -> str + """Attempt to find the Homebrew executable path. + + Requires: + - module has a `path` parameter + - path is a valid path string for the target OS. Otherwise, module.fail_json() + is called with msg="Invalid_path: ". + """ + path = module.params["path"] + if not HomebrewValidate.valid_path(path): + module.fail_json(msg=f"Invalid path: {path}") + + if isinstance(path, str): + paths = path.split(":") + elif isinstance(path, list): + paths = path + else: + module.fail_json(msg=f"Invalid path: {path}") + + brew_path = module.get_bin_path("brew", required=True, opt_dirs=paths) + if not HomebrewValidate.valid_brew_path(brew_path): + module.fail_json(msg=f"Invalid brew path: {brew_path}") + + return brew_path diff --git a/plugins/module_utils/hwc_utils.py b/plugins/module_utils/hwc_utils.py index 489e90dd3c..596303527c 100644 --- a/plugins/module_utils/hwc_utils.py +++ b/plugins/module_utils/hwc_utils.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c), Google Inc, 2017 -# Simplified BSD License (see licenses/simplified_bsd.txt or -# https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import time @@ -32,7 +30,7 @@ class HwcModuleException(Exception): self._message = message def __str__(self): - return "[HwcClientException] message=%s" % self._message + return f"[HwcClientException] message={self._message}" class HwcClientException(Exception): @@ -43,9 +41,8 @@ class HwcClientException(Exception): self._message = message def __str__(self): - msg = " code=%s," % str(self._code) if self._code != 0 else "" - return "[HwcClientException]%s message=%s" % ( - msg, self._message) + msg = f" code={self._code}," if self._code != 0 else "" + return f"[HwcClientException]{msg} message={self._message}" class HwcClientException404(HwcClientException): @@ -53,7 +50,7 @@ class HwcClientException404(HwcClientException): super(HwcClientException404, self).__init__(404, message) def __str__(self): - return "[HwcClientException404] message=%s" % self._message + return f"[HwcClientException404] message={self._message}" def session_method_wrapper(f): @@ -63,7 +60,7 @@ def session_method_wrapper(f): r = f(self, url, *args, **kwargs) except Exception as ex: raise HwcClientException( - 0, "Sending request failed, error=%s" % ex) + 0, f"Sending request failed, error={ex}") result = None if r.content: @@ -71,7 +68,7 @@ def session_method_wrapper(f): result = r.json() except Exception as ex: raise HwcClientException( - 0, "Parsing response to json failed, error: %s" % ex) + 0, f"Parsing response to json failed, error: {ex}") code = r.status_code if code not in [200, 201, 202, 203, 204, 205, 206, 207, 208, 226]: @@ -100,7 +97,7 @@ class _ServiceClient(object): self._client = client self._endpoint = endpoint self._default_header = { - 'User-Agent': "Huawei-Ansible-MM-%s" % product, + 'User-Agent': f"Huawei-Ansible-MM-{product}", 'Accept': 'application/json', } @@ -188,7 +185,7 @@ class Config(object): raise_exc=False) def _get_service_endpoint(self, client, service_type, region): - k = "%s.%s" % (service_type, region if region else "") + k = f"{service_type}.{region if region else ''}" if k in self._endpoints: return self._endpoints.get(k) @@ -199,11 +196,11 @@ class Config(object): region_name=region, interface="public") except Exception as ex: raise HwcClientException( - 0, "Getting endpoint failed, error=%s" % ex) + 0, f"Getting endpoint failed, error={ex}") if url == "": raise HwcClientException( - 0, "Can not find the enpoint for %s" % service_type) + 0, f"Cannot find the endpoint for {service_type}") if url[-1] != "/": url += "/" @@ -342,7 +339,7 @@ def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3): if not_found_times > 10: raise HwcModuleException( - "not found the object for %d times" % not_found_times) + f"not found the object for {not_found_times} times") else: not_found_times = 0 @@ -351,7 +348,7 @@ def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3): if pending and status not in pending: raise HwcModuleException( - "unexpect status(%s) occured" % status) + f"unexpected status({status}) occurred") if not is_last_time: wait *= 2 @@ -362,7 +359,7 @@ def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3): time.sleep(wait) - raise HwcModuleException("asycn wait timeout after %d seconds" % timeout) + raise HwcModuleException(f"async wait timeout after {timeout} seconds") def navigate_value(data, index, array_index=None): @@ -381,7 +378,7 @@ def navigate_value(data, index, array_index=None): i = index[n] if i not in d: raise HwcModuleException( - "navigate value failed: key(%s) is not exist in dict" % i) + f"navigate value failed: key({i}) is not exist in dict") d = d[i] if not array_index: diff --git a/plugins/module_utils/ibm_sa_utils.py b/plugins/module_utils/ibm_sa_utils.py index 4f70f844cd..0c8f3d274d 100644 --- a/plugins/module_utils/ibm_sa_utils.py +++ b/plugins/module_utils/ibm_sa_utils.py @@ -1,11 +1,10 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2018 IBM CORPORATION # Author(s): Tzur Eliyahu # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import traceback diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index 8051c946e2..a54a911c20 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -1,46 +1,23 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017, Eike Frost -# -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Redistribution and use in source and binary forms, with or without modification, -# are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# BSD 2-Clause license (see LICENSES/BSD-2-Clause.txt) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type import json import traceback +import copy +from urllib.parse import urlencode, quote +from urllib.error import HTTPError from ansible.module_utils.urls import open_url -from ansible.module_utils.six.moves.urllib.parse import urlencode, quote -from ansible.module_utils.six.moves.urllib.error import HTTPError from ansible.module_utils.common.text.converters import to_native, to_text URL_REALM_INFO = "{url}/realms/{realm}" URL_REALMS = "{url}/admin/realms" URL_REALM = "{url}/admin/realms/{realm}" +URL_REALM_KEYS_METADATA = "{url}/admin/realms/{realm}/keys" URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token" URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}" @@ -50,23 +27,62 @@ URL_CLIENT_ROLES = "{url}/admin/realms/{realm}/clients/{id}/roles" URL_CLIENT_ROLE = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}" URL_CLIENT_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}/composites" +URL_CLIENT_ROLE_SCOPE_CLIENTS = "{url}/admin/realms/{realm}/clients/{id}/scope-mappings/clients/{scopeid}" +URL_CLIENT_ROLE_SCOPE_REALM = "{url}/admin/realms/{realm}/clients/{id}/scope-mappings/realm" + URL_REALM_ROLES = "{url}/admin/realms/{realm}/roles" URL_REALM_ROLE = "{url}/admin/realms/{realm}/roles/{name}" +URL_REALM_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm" +URL_REALM_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm/available" +URL_REALM_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm/composite" URL_REALM_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/roles/{name}/composites" +URL_ROLES_BY_ID = "{url}/admin/realms/{realm}/roles-by-id/{id}" +URL_ROLES_BY_ID_COMPOSITES_CLIENTS = "{url}/admin/realms/{realm}/roles-by-id/{id}/composites/clients/{cid}" +URL_ROLES_BY_ID_COMPOSITES = "{url}/admin/realms/{realm}/roles-by-id/{id}/composites" + URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}" URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates" URL_GROUPS = "{url}/admin/realms/{realm}/groups" URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}" +URL_GROUP_CHILDREN = "{url}/admin/realms/{realm}/groups/{groupid}/children" URL_CLIENTSCOPES = "{url}/admin/realms/{realm}/client-scopes" URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}" URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models" URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}" -URL_CLIENT_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}" -URL_CLIENT_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available" -URL_CLIENT_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite" +URL_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-default-client-scopes" +URL_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-default-client-scopes/{id}" +URL_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-optional-client-scopes" +URL_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-optional-client-scopes/{id}" + +URL_CLIENT_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes" +URL_CLIENT_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes/{id}" +URL_CLIENT_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes" +URL_CLIENT_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes/{id}" + +URL_CLIENT_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}" +URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available" +URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite" + +URL_USERS = "{url}/admin/realms/{realm}/users" +URL_USER = "{url}/admin/realms/{realm}/users/{id}" +URL_USER_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings" +URL_USER_REALM_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm" +URL_USER_CLIENTS_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients" +URL_USER_CLIENT_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client_id}" +URL_USER_GROUPS = "{url}/admin/realms/{realm}/users/{id}/groups" +URL_USER_GROUP = "{url}/admin/realms/{realm}/users/{id}/groups/{group_id}" + +URL_CLIENT_SERVICE_ACCOUNT_USER = "{url}/admin/realms/{realm}/clients/{id}/service-account-user" +URL_CLIENT_USER_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}" +URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/available" +URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/composite" + +URL_REALM_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{group}/role-mappings/realm" + +URL_CLIENTSECRET = "{url}/admin/realms/{realm}/clients/{id}/client-secret" URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows" URL_AUTHENTICATION_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{id}" @@ -78,15 +94,36 @@ URL_AUTHENTICATION_EXECUTION_CONFIG = "{url}/admin/realms/{realm}/authentication URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/raise-priority" URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/lower-priority" URL_AUTHENTICATION_CONFIG = "{url}/admin/realms/{realm}/authentication/config/{id}" +URL_AUTHENTICATION_REGISTER_REQUIRED_ACTION = "{url}/admin/realms/{realm}/authentication/register-required-action" +URL_AUTHENTICATION_REQUIRED_ACTIONS = "{url}/admin/realms/{realm}/authentication/required-actions" +URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS = "{url}/admin/realms/{realm}/authentication/required-actions/{alias}" URL_IDENTITY_PROVIDERS = "{url}/admin/realms/{realm}/identity-provider/instances" URL_IDENTITY_PROVIDER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}" URL_IDENTITY_PROVIDER_MAPPERS = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers" URL_IDENTITY_PROVIDER_MAPPER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers/{id}" +URL_IDENTITY_PROVIDER_IMPORT = "{url}/admin/realms/{realm}/identity-provider/import-config" URL_COMPONENTS = "{url}/admin/realms/{realm}/components" URL_COMPONENT = "{url}/admin/realms/{realm}/components/{id}" +URL_AUTHZ_AUTHORIZATION_SCOPE = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope/{id}" +URL_AUTHZ_AUTHORIZATION_SCOPES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope" + +# This URL is used for: +# - Querying client authorization permissions +# - Removing client authorization permissions +URL_AUTHZ_POLICIES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy" +URL_AUTHZ_POLICY = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy/{id}" + +URL_AUTHZ_PERMISSION = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/permission/{permission_type}/{id}" +URL_AUTHZ_PERMISSIONS = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/permission/{permission_type}" + +URL_AUTHZ_RESOURCES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/resource" + +URL_AUTHZ_CUSTOM_POLICY = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy/{policy_type}" +URL_AUTHZ_CUSTOM_POLICIES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy" + def keycloak_argument_spec(): """ @@ -104,6 +141,8 @@ def keycloak_argument_spec(): validate_certs=dict(type='bool', default=True), connection_timeout=dict(type='int', default=10), token=dict(type='str', no_log=True), + refresh_token=dict(type='str', no_log=True), + http_agent=dict(type='str', default='Ansible'), ) @@ -112,60 +151,142 @@ def camel(words): class KeycloakError(Exception): - pass + def __init__(self, msg, authError=None): + self.msg = msg + self.authError = authError + + def __str__(self): + return str(self.msg) + + +def _token_request(module_params, payload): + """ Obtains connection header with token for the authentication, + using the provided auth_username/auth_password + :param module_params: parameters of the module + :param payload: + type: + dict + description: + Authentication request payload. Must contain at least + 'grant_type' and 'client_id', optionally 'client_secret', + along with parameters based on 'grant_type'; e.g., + 'username'/'password' for type 'password', + 'refresh_token' for type 'refresh_token'. + :return: access token + """ + base_url = module_params.get('auth_keycloak_url') + if not base_url.lower().startswith(('http', 'https')): + raise KeycloakError(f"auth_url '{base_url}' should either start with 'http' or 'https'.") + auth_realm = module_params.get('auth_realm') + auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm) + http_agent = module_params.get('http_agent') + validate_certs = module_params.get('validate_certs') + connection_timeout = module_params.get('connection_timeout') + + try: + r = json.loads(to_native(open_url(auth_url, method='POST', + validate_certs=validate_certs, http_agent=http_agent, timeout=connection_timeout, + data=urlencode(payload)).read())) + + return r['access_token'] + except ValueError as e: + raise KeycloakError( + f'API returned invalid JSON when trying to obtain access token from {auth_url}: {e}') + except KeyError: + raise KeycloakError( + f'API did not include access_token field in response from {auth_url}') + except Exception as e: + raise KeycloakError(f'Could not obtain access token from {auth_url}: {e}', authError=e) + + +def _request_token_using_credentials(module_params): + """ Obtains connection header with token for the authentication, + using the provided auth_username/auth_password + :param module_params: parameters of the module. Must include 'auth_username' and 'auth_password'. + :return: connection header + """ + client_id = module_params.get('auth_client_id') + auth_username = module_params.get('auth_username') + auth_password = module_params.get('auth_password') + client_secret = module_params.get('auth_client_secret') + + temp_payload = { + 'grant_type': 'password', + 'client_id': client_id, + 'client_secret': client_secret, + 'username': auth_username, + 'password': auth_password, + } + # Remove empty items, for instance missing client_secret + payload = {k: v for k, v in temp_payload.items() if v is not None} + + return _token_request(module_params, payload) + + +def _request_token_using_refresh_token(module_params): + """ Obtains connection header with token for the authentication, + using the provided refresh_token + :param module_params: parameters of the module. Must include 'refresh_token'. + :return: connection header + """ + client_id = module_params.get('auth_client_id') + refresh_token = module_params.get('refresh_token') + client_secret = module_params.get('auth_client_secret') + + temp_payload = { + 'grant_type': 'refresh_token', + 'client_id': client_id, + 'client_secret': client_secret, + 'refresh_token': refresh_token, + } + # Remove empty items, for instance missing client_secret + payload = {k: v for k, v in temp_payload.items() if v is not None} + + return _token_request(module_params, payload) + + +def _request_token_using_client_credentials(module_params): + """ Obtains connection header with token for the authentication, + using the provided auth_client_id and auth_client_secret by grant_type + client_credentials. Ensure that the used client uses client authorization + with service account roles enabled and required service roles assigned. + :param module_params: parameters of the module. Must include 'auth_client_id' + and 'auth_client_secret'.. + :return: connection header + """ + client_id = module_params.get('auth_client_id') + client_secret = module_params.get('auth_client_secret') + + temp_payload = { + 'grant_type': 'client_credentials', + 'client_id': client_id, + 'client_secret': client_secret, + } + # Remove empty items, for instance missing client_secret + payload = {k: v for k, v in temp_payload.items() if v is not None} + + return _token_request(module_params, payload) def get_token(module_params): """ Obtains connection header with token for the authentication, - token already given or obtained from credentials - :param module_params: parameters of the module - :return: connection header + token already given or obtained from credentials + :param module_params: parameters of the module + :return: connection header """ token = module_params.get('token') - base_url = module_params.get('auth_keycloak_url') - - if not base_url.lower().startswith(('http', 'https')): - raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url) if token is None: - base_url = module_params.get('auth_keycloak_url') - validate_certs = module_params.get('validate_certs') - auth_realm = module_params.get('auth_realm') - client_id = module_params.get('auth_client_id') + auth_client_id = module_params.get('auth_client_id') + auth_client_secret = module_params.get('auth_client_secret') auth_username = module_params.get('auth_username') - auth_password = module_params.get('auth_password') - client_secret = module_params.get('auth_client_secret') - connection_timeout = module_params.get('connection_timeout') - auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm) - temp_payload = { - 'grant_type': 'password', - 'client_id': client_id, - 'client_secret': client_secret, - 'username': auth_username, - 'password': auth_password, - } - # Remove empty items, for instance missing client_secret - payload = dict( - (k, v) for k, v in temp_payload.items() if v is not None) - try: - r = json.loads(to_native(open_url(auth_url, method='POST', - validate_certs=validate_certs, timeout=connection_timeout, - data=urlencode(payload)).read())) - except ValueError as e: - raise KeycloakError( - 'API returned invalid JSON when trying to obtain access token from %s: %s' - % (auth_url, str(e))) - except Exception as e: - raise KeycloakError('Could not obtain access token from %s: %s' - % (auth_url, str(e))) + if auth_client_id is not None and auth_client_secret is not None and auth_username is None: + token = _request_token_using_client_credentials(module_params) + else: + token = _request_token_using_credentials(module_params) - try: - token = r['access_token'] - except KeyError: - raise KeycloakError( - 'Could not obtain access token from %s' % auth_url) return { - 'Authorization': 'Bearer ' + token, + 'Authorization': f"Bearer {token}", 'Content-Type': 'application/json' } @@ -199,24 +320,30 @@ def is_struct_included(struct1, struct2, exclude=None): Return True if all element of dict 1 are present in dict 2, return false otherwise. """ if isinstance(struct1, list) and isinstance(struct2, list): + if not struct1 and not struct2: + return True for item1 in struct1: if isinstance(item1, (list, dict)): for item2 in struct2: - if not is_struct_included(item1, item2, exclude): - return False + if is_struct_included(item1, item2, exclude): + break + else: + return False else: if item1 not in struct2: return False return True elif isinstance(struct1, dict) and isinstance(struct2, dict): + if not struct1 and not struct2: + return True try: for key in struct1: if not (exclude and key in exclude): if not is_struct_included(struct1[key], struct2[key], exclude): return False - return True except KeyError: return False + return True elif isinstance(struct1, bool) and isinstance(struct2, bool): return struct1 == struct2 else: @@ -227,12 +354,95 @@ class KeycloakAPI(object): """ Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which is obtained through OpenID connect """ + def __init__(self, module, connection_header): self.module = module self.baseurl = self.module.params.get('auth_keycloak_url') self.validate_certs = self.module.params.get('validate_certs') self.connection_timeout = self.module.params.get('connection_timeout') self.restheaders = connection_header + self.http_agent = self.module.params.get('http_agent') + + def _request(self, url, method, data=None): + """ Makes a request to Keycloak and returns the raw response. + If a 401 is returned, attempts to re-authenticate + using first the module's refresh_token (if provided) + and then the module's username/password (if provided). + On successful re-authentication, the new token is stored + in the restheaders for future requests. + + :param url: request path + :param method: request method (e.g., 'GET', 'POST', etc.) + :param data: (optional) data for request + :return: raw API response + """ + def make_request_catching_401(): + try: + return open_url(url, method=method, data=data, + http_agent=self.http_agent, headers=self.restheaders, + timeout=self.connection_timeout, + validate_certs=self.validate_certs) + except HTTPError as e: + if e.code != 401: + raise e + return e + + r = make_request_catching_401() + + if isinstance(r, Exception): + # Try to refresh token and retry, if available + refresh_token = self.module.params.get('refresh_token') + if refresh_token is not None: + try: + token = _request_token_using_refresh_token(self.module.params) + self.restheaders['Authorization'] = f"Bearer {token}" + + r = make_request_catching_401() + except KeycloakError as e: + # Token refresh returns 400 if token is expired/invalid, so continue on if we get a 400 + if e.authError is not None and e.authError.code != 400: + raise e + + if isinstance(r, Exception): + # Try to re-auth with username/password, if available + auth_username = self.module.params.get('auth_username') + auth_password = self.module.params.get('auth_password') + if auth_username is not None and auth_password is not None: + token = _request_token_using_credentials(self.module.params) + self.restheaders['Authorization'] = f"Bearer {token}" + + r = make_request_catching_401() + + if isinstance(r, Exception): + # Try to re-auth with client_id and client_secret, if available + auth_client_id = self.module.params.get('auth_client_id') + auth_client_secret = self.module.params.get('auth_client_secret') + if auth_client_id is not None and auth_client_secret is not None: + try: + token = _request_token_using_client_credentials(self.module.params) + self.restheaders['Authorization'] = f"Bearer {token}" + + r = make_request_catching_401() + except KeycloakError as e: + # Token refresh returns 400 if token is expired/invalid, so continue on if we get a 400 + if e.authError is not None and e.authError.code != 400: + raise e + + if isinstance(r, Exception): + # Either no re-auth options were available, or they all failed + raise r + + return r + + def _request_and_deserialize(self, url, method, data=None): + """ Wraps the _request method with JSON deserialization of the response. + + :param url: request path + :param method: request method (e.g., 'GET', 'POST', etc.) + :param data: (optional) data for request + :return: raw API response + """ + return json.loads(to_native(self._request(url, method, data).read())) def get_realm_info_by_id(self, realm='master'): """ Obtain realm public info by id @@ -243,22 +453,52 @@ class KeycloakAPI(object): realm_info_url = URL_REALM_INFO.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(realm_info_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(realm_info_url, method='GET') except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg=f'Could not obtain realm {realm}: {e}', + exception=traceback.format_exc()) except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), + self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain realm {realm}: {e}', exception=traceback.format_exc()) except Exception as e: - self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), + self.module.fail_json(msg=f'Could not obtain realm {realm}: {e}', exception=traceback.format_exc()) + def get_realm_keys_metadata_by_id(self, realm='master'): + """Obtain realm public info by id + + :param realm: realm id + + :return: None, or a 'KeysMetadataRepresentation' + (https://www.keycloak.org/docs-api/latest/rest-api/index.html#KeysMetadataRepresentation) + -- a dict containing the keys 'active' and 'keys', the former containing a mapping + from algorithms to key-ids, the latter containing a list of dicts with key + information. + """ + realm_keys_metadata_url = URL_REALM_KEYS_METADATA.format(url=self.baseurl, realm=realm) + + try: + return self._request_and_deserialize(realm_keys_metadata_url, method="GET") + + except HTTPError as e: + if e.code == 404: + return None + else: + self.fail_request(e, msg=f'Could not obtain realm {realm}: {e}', + exception=traceback.format_exc()) + except ValueError as e: + self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain realm {realm}: {e}', + exception=traceback.format_exc()) + except Exception as e: + self.module.fail_json(msg=f'Could not obtain realm {realm}: {e}', + exception=traceback.format_exc()) + + # The Keycloak API expects the realm name (like `master`) not the ID when fetching the realm data. + # See the Keycloak API docs: https://www.keycloak.org/docs-api/latest/rest-api/#_realms_admin def get_realm_by_id(self, realm='master'): """ Obtain realm representation by id @@ -268,20 +508,19 @@ class KeycloakAPI(object): realm_url = URL_REALM.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(realm_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(realm_url, method='GET') except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg=f'Could not obtain realm {realm}: {e}', + exception=traceback.format_exc()) except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), + self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain realm {realm}: {e}', exception=traceback.format_exc()) except Exception as e: - self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), + self.module.fail_json(msg=f'Could not obtain realm {realm}: {e}', exception=traceback.format_exc()) def update_realm(self, realmrep, realm="master"): @@ -293,11 +532,10 @@ class KeycloakAPI(object): realm_url = URL_REALM.format(url=self.baseurl, realm=realm) try: - return open_url(realm_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(realmrep), validate_certs=self.validate_certs) + return self._request(realm_url, method='PUT', data=json.dumps(realmrep)) except Exception as e: - self.module.fail_json(msg='Could not update realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg=f'Could not update realm {realm}: {e}', + exception=traceback.format_exc()) def create_realm(self, realmrep): """ Create a realm in keycloak @@ -307,11 +545,10 @@ class KeycloakAPI(object): realm_url = URL_REALMS.format(url=self.baseurl) try: - return open_url(realm_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(realmrep), validate_certs=self.validate_certs) + return self._request(realm_url, method='POST', data=json.dumps(realmrep)) except Exception as e: - self.module.fail_json(msg='Could not create realm %s: %s' % (realmrep['id'], str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg=f"Could not create realm {realmrep['id']}: {e}", + exception=traceback.format_exc()) def delete_realm(self, realm="master"): """ Delete a realm from Keycloak @@ -322,11 +559,10 @@ class KeycloakAPI(object): realm_url = URL_REALM.format(url=self.baseurl, realm=realm) try: - return open_url(realm_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(realm_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Could not delete realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg=f'Could not delete realm {realm}: {e}', + exception=traceback.format_exc()) def get_clients(self, realm='master', filter=None): """ Obtains client representations for clients in a realm @@ -337,17 +573,14 @@ class KeycloakAPI(object): """ clientlist_url = URL_CLIENTS.format(url=self.baseurl, realm=realm) if filter is not None: - clientlist_url += '?clientId=%s' % filter + clientlist_url += f'?clientId={filter}' try: - return json.loads(to_native(open_url(clientlist_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientlist_url, method='GET') except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of clients for realm %s: %s' - % (realm, str(e))) + self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain list of clients for realm {realm}: {e}') except Exception as e: - self.module.fail_json(msg='Could not obtain list of clients for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg=f'Could not obtain list of clients for realm {realm}: {e}') def get_client_by_clientid(self, client_id, realm='master'): """ Get client representation by clientId @@ -371,21 +604,17 @@ class KeycloakAPI(object): client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) try: - return json.loads(to_native(open_url(client_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(client_url, method='GET') except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not obtain client %s for realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg=f'Could not obtain client {id} for realm {realm}: {e}') except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client %s for realm %s: %s' - % (id, realm, str(e))) + self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain client {id} for realm {realm}: {e}') except Exception as e: - self.module.fail_json(msg='Could not obtain client %s for realm %s: %s' - % (id, realm, str(e))) + self.module.fail_json(msg=f'Could not obtain client {id} for realm {realm}: {e}') def get_client_id(self, client_id, realm='master'): """ Obtain id of client by client_id @@ -410,11 +639,9 @@ class KeycloakAPI(object): client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(client_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientrep), validate_certs=self.validate_certs) + return self._request(client_url, method='PUT', data=json.dumps(clientrep)) except Exception as e: - self.module.fail_json(msg='Could not update client %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg=f'Could not update client {id} in realm {realm}: {e}') def create_client(self, clientrep, realm="master"): """ Create a client in keycloak @@ -425,11 +652,9 @@ class KeycloakAPI(object): client_url = URL_CLIENTS.format(url=self.baseurl, realm=realm) try: - return open_url(client_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientrep), validate_certs=self.validate_certs) + return self._request(client_url, method='POST', data=json.dumps(clientrep)) except Exception as e: - self.module.fail_json(msg='Could not create client %s in realm %s: %s' - % (clientrep['clientId'], realm, str(e))) + self.fail_request(e, msg=f"Could not create client {clientrep['clientId']} in realm {realm}: {e}") def delete_client(self, id, realm="master"): """ Delete a client from Keycloak @@ -441,11 +666,9 @@ class KeycloakAPI(object): client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(client_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(client_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Could not delete client %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg=f'Could not delete client {id} in realm {realm}: {e}') def get_client_roles_by_id(self, cid, realm="master"): """ Fetch the roles of the a client on the Keycloak server. @@ -456,16 +679,13 @@ class KeycloakAPI(object): """ client_roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(client_roles_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(client_roles_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch rolemappings for client %s in realm %s: %s" - % (cid, realm, str(e))) + self.fail_request(e, msg=f"Could not fetch rolemappings for client {cid} in realm {realm}: {e}") - def get_client_role_by_name(self, gid, cid, name, realm="master"): + def get_client_role_id_by_name(self, cid, name, realm="master"): """ Get the role ID of a client. - :param gid: ID of the group from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. :param name: Name of the role. :param realm: Realm from which to obtain the rolemappings. @@ -477,7 +697,7 @@ class KeycloakAPI(object): return role['id'] return None - def get_client_rolemapping_by_id(self, gid, cid, rid, realm='master'): + def get_client_group_rolemapping_by_id(self, gid, cid, rid, realm='master'): """ Obtain client representation by id :param gid: ID of the group from which to obtain the rolemappings. @@ -486,35 +706,31 @@ class KeycloakAPI(object): :param realm: client from this realm :return: dict of rolemapping representation or None if none matching exist """ - rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) + rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + rolemappings = self._request_and_deserialize(rolemappings_url, method="GET") for role in rolemappings: if rid == role['id']: return role except Exception as e: - self.module.fail_json(msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg=f"Could not fetch rolemappings for client {cid} in group {gid}, realm {realm}: {e}") return None - def get_client_available_rolemappings(self, gid, cid, realm="master"): - """ Fetch the available role of a client in a specified goup on the Keycloak server. + def get_client_group_available_rolemappings(self, gid, cid, realm="master"): + """ Fetch the available role of a client in a specified group on the Keycloak server. :param gid: ID of the group from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. :param realm: Realm from which to obtain the rolemappings. :return: The rollemappings of specified group and client of the realm (default "master"). """ - available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid) + available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - return json.loads(to_native(open_url(available_rolemappings_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(available_rolemappings_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg=f"Could not fetch available rolemappings for client {cid} in group {gid}, realm {realm}: {e}") - def get_client_composite_rolemappings(self, gid, cid, realm="master"): + def get_client_group_composite_rolemappings(self, gid, cid, realm="master"): """ Fetch the composite role of a client in a specified group on the Keycloak server. :param gid: ID of the group from which to obtain the rolemappings. @@ -522,16 +738,83 @@ class KeycloakAPI(object): :param realm: Realm from which to obtain the rolemappings. :return: The rollemappings of specified group and client of the realm (default "master"). """ - available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid) + composite_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - return json.loads(to_native(open_url(available_rolemappings_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(composite_rolemappings_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg=f"Could not fetch available rolemappings for client {cid} in group {gid}, realm {realm}: {e}") + + def get_role_by_id(self, rid, realm="master"): + """ Fetch a role by its id on the Keycloak server. + + :param rid: ID of the role. + :param realm: Realm from which to obtain the rolemappings. + :return: The role. + """ + client_roles_url = URL_ROLES_BY_ID.format(url=self.baseurl, realm=realm, id=rid) + try: + return self._request_and_deserialize(client_roles_url, method="GET") + except Exception as e: + self.fail_request(e, msg=f"Could not fetch role for id {rid} in realm {realm}: {e}") + + def get_client_roles_by_id_composite_rolemappings(self, rid, cid, realm="master"): + """ Fetch a role by its id on the Keycloak server. + + :param rid: ID of the composite role. + :param cid: ID of the client from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The role. + """ + client_roles_url = URL_ROLES_BY_ID_COMPOSITES_CLIENTS.format(url=self.baseurl, realm=realm, id=rid, cid=cid) + try: + return self._request_and_deserialize(client_roles_url, method="GET") + except Exception as e: + self.fail_request(e, msg=f"Could not fetch role for id {rid} and cid {cid} in realm {realm}: {e}") + + def add_client_roles_by_id_composite_rolemapping(self, rid, roles_rep, realm="master"): + """ Assign roles to composite role + + :param rid: ID of the composite role. + :param roles_rep: Representation of the roles to assign. + :param realm: Realm from which to obtain the rolemappings. + :return: None. + """ + available_rolemappings_url = URL_ROLES_BY_ID_COMPOSITES.format(url=self.baseurl, realm=realm, id=rid) + try: + self._request(available_rolemappings_url, method="POST", data=json.dumps(roles_rep)) + except Exception as e: + self.fail_request(e, msg=f"Could not assign roles to composite role {rid} and realm {realm}: {e}") + + def add_group_realm_rolemapping(self, gid, role_rep, realm="master"): + """ Add the specified realm role to specified group on the Keycloak server. + + :param gid: ID of the group to add the role mapping. + :param role_rep: Representation of the role to assign. + :param realm: Realm from which to obtain the rolemappings. + :return: None. + """ + url = URL_REALM_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, group=gid) + try: + self._request(url, method="POST", data=json.dumps(role_rep)) + except Exception as e: + self.fail_request(e, msg=f"Could add realm role mappings for group {gid}, realm {realm}: {e}") + + def delete_group_realm_rolemapping(self, gid, role_rep, realm="master"): + """ Delete the specified realm role from the specified group on the Keycloak server. + + :param gid: ID of the group from which to obtain the rolemappings. + :param role_rep: Representation of the role to assign. + :param realm: Realm from which to obtain the rolemappings. + :return: None. + """ + url = URL_REALM_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, group=gid) + try: + self._request(url, method="DELETE", data=json.dumps(role_rep)) + except Exception as e: + self.fail_request(e, msg=f"Could not delete realm role mappings for group {gid}, realm {realm}: {e}") def add_group_rolemapping(self, gid, cid, role_rep, realm="master"): - """ Fetch the composite role of a client in a specified goup on the Keycloak server. + """ Fetch the composite role of a client in a specified group on the Keycloak server. :param gid: ID of the group from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. @@ -539,13 +822,11 @@ class KeycloakAPI(object): :param realm: Realm from which to obtain the rolemappings. :return: None. """ - available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) + available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - open_url(available_rolemappings_url, method="POST", headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(available_rolemappings_url, method="POST", data=json.dumps(role_rep)) except Exception as e: - self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg=f"Could not fetch available rolemappings for client {cid} in group {gid}, realm {realm}: {e}") def delete_group_rolemapping(self, gid, cid, role_rep, realm="master"): """ Delete the rolemapping of a client in a specified group on the Keycloak server. @@ -556,13 +837,188 @@ class KeycloakAPI(object): :param realm: Realm from which to obtain the rolemappings. :return: None. """ - available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) + available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - open_url(available_rolemappings_url, method="DELETE", headers=self.restheaders, - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(available_rolemappings_url, method="DELETE", data=json.dumps(role_rep)) except Exception as e: - self.module.fail_json(msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg=f"Could not delete available rolemappings for client {cid} in group {gid}, realm {realm}: {e}") + + def get_client_user_rolemapping_by_id(self, uid, cid, rid, realm='master'): + """ Obtain client representation by id + + :param uid: ID of the user from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param rid: ID of the role. + :param realm: client from this realm + :return: dict of rolemapping representation or None if none matching exist + """ + rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid) + try: + rolemappings = self._request_and_deserialize(rolemappings_url, method="GET") + for role in rolemappings: + if rid == role['id']: + return role + except Exception as e: + self.fail_request(e, msg=f"Could not fetch rolemappings for client {cid} and user {uid}, realm {realm}: {e}") + return None + + def get_client_user_available_rolemappings(self, uid, cid, realm="master"): + """ Fetch the available role of a client for a specified user on the Keycloak server. + + :param uid: ID of the user from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The effective rollemappings of specified client and user of the realm (default "master"). + """ + available_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid, client=cid) + try: + return self._request_and_deserialize(available_rolemappings_url, method="GET") + except Exception as e: + self.fail_request(e, msg=f"Could not fetch effective rolemappings for client {cid} and user {uid}, realm {realm}: {e}") + + def get_client_user_composite_rolemappings(self, uid, cid, realm="master"): + """ Fetch the composite role of a client for a specified user on the Keycloak server. + + :param uid: ID of the user from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The rollemappings of specified group and client of the realm (default "master"). + """ + composite_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid, client=cid) + try: + return self._request_and_deserialize(composite_rolemappings_url, method="GET") + except Exception as e: + self.fail_request(e, msg=f"Could not fetch available rolemappings for user {uid} of realm {realm}: {e}") + + def get_realm_user_rolemapping_by_id(self, uid, rid, realm='master'): + """ Obtain role representation by id + + :param uid: ID of the user from which to obtain the rolemappings. + :param rid: ID of the role. + :param realm: client from this realm + :return: dict of rolemapping representation or None if none matching exist + """ + rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid) + try: + rolemappings = self._request_and_deserialize(rolemappings_url, method="GET") + for role in rolemappings: + if rid == role['id']: + return role + except Exception as e: + self.fail_request(e, msg=f"Could not fetch rolemappings for user {uid}, realm {realm}: {e}") + return None + + def get_realm_user_available_rolemappings(self, uid, realm="master"): + """ Fetch the available role of a realm for a specified user on the Keycloak server. + + :param uid: ID of the user from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The rollemappings of specified group and client of the realm (default "master"). + """ + available_rolemappings_url = URL_REALM_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid) + try: + return self._request_and_deserialize(available_rolemappings_url, method="GET") + except Exception as e: + self.fail_request(e, msg=f"Could not fetch available rolemappings for user {uid} of realm {realm}: {e}") + + def get_realm_user_composite_rolemappings(self, uid, realm="master"): + """ Fetch the composite role of a realm for a specified user on the Keycloak server. + + :param uid: ID of the user from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The effective rollemappings of specified client and user of the realm (default "master"). + """ + composite_rolemappings_url = URL_REALM_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid) + try: + return self._request_and_deserialize(composite_rolemappings_url, method="GET") + except Exception as e: + self.fail_request(e, msg=f"Could not fetch effective rolemappings for user {uid}, realm {realm}: {e}") + + def get_user_by_username(self, username, realm="master"): + """ Fetch a keycloak user within a realm based on its username. + + If the user does not exist, None is returned. + :param username: Username of the user to fetch. + :param realm: Realm in which the user resides; default 'master' + """ + users_url = URL_USERS.format(url=self.baseurl, realm=realm) + users_url += f'?username={username}&exact=true' + try: + userrep = None + users = self._request_and_deserialize(users_url, method='GET') + for user in users: + if user['username'] == username: + userrep = user + break + return userrep + + except ValueError as e: + self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain the user for realm {realm} and username {username}: {e}') + except Exception as e: + self.fail_request(e, msg=f'Could not obtain the user for realm {realm} and username {username}: {e}') + + def get_service_account_user_by_client_id(self, client_id, realm="master"): + """ Fetch a keycloak service account user within a realm based on its client_id. + + If the user does not exist, None is returned. + :param client_id: clientId of the service account user to fetch. + :param realm: Realm in which the user resides; default 'master' + """ + cid = self.get_client_id(client_id, realm=realm) + + service_account_user_url = URL_CLIENT_SERVICE_ACCOUNT_USER.format(url=self.baseurl, realm=realm, id=cid) + try: + return self._request_and_deserialize(service_account_user_url, method='GET') + except ValueError as e: + self.module.fail_json( + msg=f'API returned incorrect JSON when trying to obtain the service-account-user for realm {realm} and client_id {client_id}: {e}' + ) + except Exception as e: + self.fail_request(e, msg=f'Could not obtain the service-account-user for realm {realm} and client_id {client_id}: {e}') + + def add_user_rolemapping(self, uid, cid, role_rep, realm="master"): + """ Assign a realm or client role to a specified user on the Keycloak server. + + :param uid: ID of the user roles are assigned to. + :param cid: ID of the client from which to obtain the rolemappings. If empty, roles are from the realm + :param role_rep: Representation of the role to assign. + :param realm: Realm from which to obtain the rolemappings. + :return: None. + """ + if cid is None: + user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid) + try: + self._request(user_realm_rolemappings_url, method="POST", data=json.dumps(role_rep)) + except Exception as e: + self.fail_request(e, msg=f"Could not map roles to userId {uid} for realm {realm} and roles {json.dumps(role_rep)}: {e}") + else: + user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid) + try: + self._request(user_client_rolemappings_url, method="POST", data=json.dumps(role_rep)) + except Exception as e: + self.fail_request(e, msg=f"Could not map roles to userId {cid} for client {uid}, realm {realm} and roles {json.dumps(role_rep)}: {e}") + + def delete_user_rolemapping(self, uid, cid, role_rep, realm="master"): + """ Delete the rolemapping of a client in a specified user on the Keycloak server. + + :param uid: ID of the user from which to remove the rolemappings. + :param cid: ID of the client from which to remove the rolemappings. + :param role_rep: Representation of the role to remove from rolemappings. + :param realm: Realm from which to remove the rolemappings. + :return: None. + """ + if cid is None: + user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid) + try: + self._request(user_realm_rolemappings_url, method="DELETE", data=json.dumps(role_rep)) + except Exception as e: + self.fail_request(e, msg=f"Could not remove roles {json.dumps(role_rep)} from userId {uid}, realm {realm}: {e}") + else: + user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid) + try: + self._request(user_client_rolemappings_url, method="DELETE", data=json.dumps(role_rep)) + except Exception as e: + self.fail_request(e, msg=f"Could not remove roles {json.dumps(role_rep)} for client {cid} from userId {uid}, realm {realm}: {e}") def get_client_templates(self, realm='master'): """ Obtains client template representations for client templates in a realm @@ -573,14 +1029,11 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(url, method='GET') except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of client templates for realm %s: %s' - % (realm, str(e))) + self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain list of client templates for realm {realm}: {e}') except Exception as e: - self.module.fail_json(msg='Could not obtain list of client templates for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg=f'Could not obtain list of client templates for realm {realm}: {e}') def get_client_template_by_id(self, id, realm='master'): """ Obtain client template representation by id @@ -592,14 +1045,11 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATE.format(url=self.baseurl, id=id, realm=realm) try: - return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(url, method='GET') except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client templates %s for realm %s: %s' - % (id, realm, str(e))) + self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain client templates {id} for realm {realm}: {e}') except Exception as e: - self.module.fail_json(msg='Could not obtain client template %s for realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg=f'Could not obtain client template {id} for realm {realm}: {e}') def get_client_template_by_name(self, name, realm='master'): """ Obtain client template representation by name @@ -638,11 +1088,9 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clienttrep), validate_certs=self.validate_certs) + return self._request(url, method='PUT', data=json.dumps(clienttrep)) except Exception as e: - self.module.fail_json(msg='Could not update client template %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg=f'Could not update client template {id} in realm {realm}: {e}') def create_client_template(self, clienttrep, realm="master"): """ Create a client in keycloak @@ -653,11 +1101,9 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm) try: - return open_url(url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clienttrep), validate_certs=self.validate_certs) + return self._request(url, method='POST', data=json.dumps(clienttrep)) except Exception as e: - self.module.fail_json(msg='Could not create client template %s in realm %s: %s' - % (clienttrep['clientId'], realm, str(e))) + self.fail_request(e, msg=f"Could not create client template {clienttrep['clientId']} in realm {realm}: {e}") def delete_client_template(self, id, realm="master"): """ Delete a client template from Keycloak @@ -669,11 +1115,9 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Could not delete client template %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg=f'Could not delete client template {id} in realm {realm}: {e}') def get_clientscopes(self, realm="master"): """ Fetch the name and ID of all clientscopes on the Keycloak server. @@ -686,11 +1130,9 @@ class KeycloakAPI(object): """ clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(clientscopes_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientscopes_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch list of clientscopes in realm %s: %s" - % (realm, str(e))) + self.fail_request(e, msg=f"Could not fetch list of clientscopes in realm {realm}: {e}") def get_clientscope_by_clientscopeid(self, cid, realm="master"): """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. @@ -703,18 +1145,15 @@ class KeycloakAPI(object): """ clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(clientscope_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientscope_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg="Could not fetch clientscope %s in realm %s: %s" - % (cid, realm, str(e))) + self.fail_request(e, msg=f"Could not fetch clientscope {cid} in realm {realm}: {e}") except Exception as e: - self.module.fail_json(msg="Could not clientscope group %s in realm %s: %s" - % (cid, realm, str(e))) + self.module.fail_json(msg=f"Could not clientscope group {cid} in realm {realm}: {e}") def get_clientscope_by_name(self, name, realm="master"): """ Fetch a keycloak clientscope within a realm based on its name. @@ -737,8 +1176,7 @@ class KeycloakAPI(object): return None except Exception as e: - self.module.fail_json(msg="Could not fetch clientscope %s in realm %s: %s" - % (name, realm, str(e))) + self.module.fail_json(msg=f"Could not fetch clientscope {name} in realm {realm}: {e}") def create_clientscope(self, clientscoperep, realm="master"): """ Create a Keycloak clientscope. @@ -748,11 +1186,9 @@ class KeycloakAPI(object): """ clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm) try: - return open_url(clientscopes_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientscoperep), validate_certs=self.validate_certs) + return self._request(clientscopes_url, method='POST', data=json.dumps(clientscoperep)) except Exception as e: - self.module.fail_json(msg="Could not create clientscope %s in realm %s: %s" - % (clientscoperep['name'], realm, str(e))) + self.fail_request(e, msg=f"Could not create clientscope {clientscoperep['name']} in realm {realm}: {e}") def update_clientscope(self, clientscoperep, realm="master"): """ Update an existing clientscope. @@ -763,12 +1199,10 @@ class KeycloakAPI(object): clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=clientscoperep['id']) try: - return open_url(clientscope_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientscoperep), validate_certs=self.validate_certs) + return self._request(clientscope_url, method='PUT', data=json.dumps(clientscoperep)) except Exception as e: - self.module.fail_json(msg='Could not update clientscope %s in realm %s: %s' - % (clientscoperep['name'], realm, str(e))) + self.fail_request(e, msg=f"Could not update clientscope {clientscoperep['name']} in realm {realm}: {e}") def delete_clientscope(self, name=None, cid=None, realm="master"): """ Delete a clientscope. One of name or cid must be provided. @@ -785,8 +1219,8 @@ class KeycloakAPI(object): # prefer an exception since this is almost certainly a programming error in the module itself. raise Exception("Unable to delete group - one of group ID or name must be provided.") - # only lookup the name if cid isn't provided. - # in the case that both are provided, prefer the ID, since it's one + # only lookup the name if cid is not provided. + # in the case that both are provided, prefer the ID, since it is one # less lookup. if cid is None and name is not None: for clientscope in self.get_clientscopes(realm=realm): @@ -801,11 +1235,10 @@ class KeycloakAPI(object): # should have a good cid by here. clientscope_url = URL_CLIENTSCOPE.format(realm=realm, id=cid, url=self.baseurl) try: - return open_url(clientscope_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(clientscope_url, method='DELETE') except Exception as e: - self.module.fail_json(msg="Unable to delete clientscope %s: %s" % (cid, str(e))) + self.fail_request(e, msg=f"Unable to delete clientscope {cid}: {e}") def get_clientscope_protocolmappers(self, cid, realm="master"): """ Fetch the name and ID of all clientscopes on the Keycloak server. @@ -819,11 +1252,9 @@ class KeycloakAPI(object): """ protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(id=cid, url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(protocolmappers_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(protocolmappers_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch list of protocolmappers in realm %s: %s" - % (realm, str(e))) + self.fail_request(e, msg=f"Could not fetch list of protocolmappers in realm {realm}: {e}") def get_clientscope_protocolmapper_by_protocolmapperid(self, pid, cid, realm="master"): """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. @@ -838,18 +1269,15 @@ class KeycloakAPI(object): """ protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=pid) try: - return json.loads(to_native(open_url(protocolmapper_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(protocolmapper_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" - % (pid, realm, str(e))) + self.fail_request(e, msg=f"Could not fetch protocolmapper {pid} in realm {realm}: {e}") except Exception as e: - self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" - % (cid, realm, str(e))) + self.module.fail_json(msg=f"Could not fetch protocolmapper {cid} in realm {realm}: {e}") def get_clientscope_protocolmapper_by_name(self, cid, name, realm="master"): """ Fetch a keycloak clientscope within a realm based on its name. @@ -873,8 +1301,7 @@ class KeycloakAPI(object): return None except Exception as e: - self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" - % (name, realm, str(e))) + self.module.fail_json(msg=f"Could not fetch protocolmapper {name} in realm {realm}: {e}") def create_clientscope_protocolmapper(self, cid, mapper_rep, realm="master"): """ Create a Keycloak clientscope protocolmapper. @@ -885,11 +1312,9 @@ class KeycloakAPI(object): """ protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(url=self.baseurl, id=cid, realm=realm) try: - return open_url(protocolmappers_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper_rep), validate_certs=self.validate_certs) + return self._request(protocolmappers_url, method='POST', data=json.dumps(mapper_rep)) except Exception as e: - self.module.fail_json(msg="Could not create protocolmapper %s in realm %s: %s" - % (mapper_rep['name'], realm, str(e))) + self.fail_request(e, msg=f"Could not create protocolmapper {mapper_rep['name']} in realm {realm}: {e}") def update_clientscope_protocolmappers(self, cid, mapper_rep, realm="master"): """ Update an existing clientscope. @@ -901,12 +1326,172 @@ class KeycloakAPI(object): protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=mapper_rep['id']) try: - return open_url(protocolmapper_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper_rep), validate_certs=self.validate_certs) + return self._request(protocolmapper_url, method='PUT', data=json.dumps(mapper_rep)) except Exception as e: - self.module.fail_json(msg='Could not update protocolmappers for clientscope %s in realm %s: %s' - % (mapper_rep, realm, str(e))) + self.fail_request(e, msg=f'Could not update protocolmappers for clientscope {mapper_rep} in realm {realm}: {e}') + + def get_default_clientscopes(self, realm, client_id=None): + """Fetch the name and ID of all clientscopes on the Keycloak server. + + To fetch the full data of the client scope, make a subsequent call to + get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return. + + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + :return The default clientscopes of this realm or client + """ + url = URL_DEFAULT_CLIENTSCOPES if client_id is None else URL_CLIENT_DEFAULT_CLIENTSCOPES + return self._get_clientscopes_of_type(realm, url, 'default', client_id) + + def get_optional_clientscopes(self, realm, client_id=None): + """Fetch the name and ID of all clientscopes on the Keycloak server. + + To fetch the full data of the client scope, make a subsequent call to + get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return. + + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + :return The optional clientscopes of this realm or client + """ + url = URL_OPTIONAL_CLIENTSCOPES if client_id is None else URL_CLIENT_OPTIONAL_CLIENTSCOPES + return self._get_clientscopes_of_type(realm, url, 'optional', client_id) + + def _get_clientscopes_of_type(self, realm, url_template, scope_type, client_id=None): + """Fetch the name and ID of all clientscopes on the Keycloak server. + + To fetch the full data of the client scope, make a subsequent call to + get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return. + + :param realm: Realm in which the clientscope resides. + :param url_template the template for the right type + :param scope_type this can be either optional or default + :param client_id: The client in which the clientscope resides. + :return The clientscopes of the specified type of this realm + """ + if client_id is None: + clientscopes_url = url_template.format(url=self.baseurl, realm=realm) + try: + return self._request_and_deserialize(clientscopes_url, method="GET") + except Exception as e: + self.fail_request(e, msg=f"Could not fetch list of {scope_type} clientscopes in realm {realm}: {e}") + else: + cid = self.get_client_id(client_id=client_id, realm=realm) + clientscopes_url = url_template.format(url=self.baseurl, realm=realm, cid=cid) + try: + return self._request_and_deserialize(clientscopes_url, method="GET") + except Exception as e: + self.fail_request(e, msg=f"Could not fetch list of {scope_type} clientscopes in client {client_id}: {clientscopes_url}") + + def _decide_url_type_clientscope(self, client_id=None, scope_type="default"): + """Decides which url to use. + :param scope_type this can be either optional or default + :param client_id: The client in which the clientscope resides. + """ + if client_id is None: + if scope_type == "default": + return URL_DEFAULT_CLIENTSCOPE + if scope_type == "optional": + return URL_OPTIONAL_CLIENTSCOPE + else: + if scope_type == "default": + return URL_CLIENT_DEFAULT_CLIENTSCOPE + if scope_type == "optional": + return URL_CLIENT_OPTIONAL_CLIENTSCOPE + + def add_default_clientscope(self, id, realm="master", client_id=None): + """Add a client scope as default either on realm or client level. + + :param id: Client scope Id. + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + """ + self._action_type_clientscope(id, client_id, "default", realm, 'add') + + def add_optional_clientscope(self, id, realm="master", client_id=None): + """Add a client scope as optional either on realm or client level. + + :param id: Client scope Id. + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + """ + self._action_type_clientscope(id, client_id, "optional", realm, 'add') + + def delete_default_clientscope(self, id, realm="master", client_id=None): + """Remove a client scope as default either on realm or client level. + + :param id: Client scope Id. + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + """ + self._action_type_clientscope(id, client_id, "default", realm, 'delete') + + def delete_optional_clientscope(self, id, realm="master", client_id=None): + """Remove a client scope as optional either on realm or client level. + + :param id: Client scope Id. + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + """ + self._action_type_clientscope(id, client_id, "optional", realm, 'delete') + + def _action_type_clientscope(self, id=None, client_id=None, scope_type="default", realm="master", action='add'): + """ Delete or add a clientscope of type. + :param name: The name of the clientscope. A lookup will be performed to retrieve the clientscope ID. + :param client_id: The ID of the clientscope (preferred to name). + :param scope_type 'default' or 'optional' + :param realm: The realm in which this group resides, default "master". + """ + cid = None if client_id is None else self.get_client_id(client_id=client_id, realm=realm) + # should have a good cid by here. + clientscope_type_url = self._decide_url_type_clientscope(client_id, scope_type).format(realm=realm, id=id, cid=cid, url=self.baseurl) + try: + method = 'PUT' if action == "add" else 'DELETE' + return self._request(clientscope_type_url, method=method) + + except Exception as e: + place = 'realm' if client_id is None else f"client {client_id}" + self.fail_request(e, msg=f"Unable to {action} {scope_type} clientscope {id} @ {place} : {e}") + + def create_clientsecret(self, id, realm="master"): + """ Generate a new client secret by id + + :param id: id (not clientId) of client to be queried + :param realm: client from this realm + :return: dict of credential representation + """ + clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id) + + try: + return self._request_and_deserialize(clientsecret_url, method='POST') + + except HTTPError as e: + if e.code == 404: + return None + else: + self.fail_request(e, msg=f'Could not obtain clientsecret of client {id} for realm {realm}: {e}') + except Exception as e: + self.module.fail_json(msg=f'Could not obtain clientsecret of client {id} for realm {realm}: {e}') + + def get_clientsecret(self, id, realm="master"): + """ Obtain client secret by id + + :param id: id (not clientId) of client to be queried + :param realm: client from this realm + :return: dict of credential representation + """ + clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id) + + try: + return self._request_and_deserialize(clientsecret_url, method='GET') + + except HTTPError as e: + if e.code == 404: + return None + else: + self.fail_request(e, msg=f'Could not obtain clientsecret of client {id} for realm {realm}: {e}') + except Exception as e: + self.module.fail_json(msg=f'Could not obtain clientsecret of client {id} for realm {realm}: {e}') def get_groups(self, realm="master"): """ Fetch the name and ID of all groups on the Keycloak server. @@ -918,11 +1503,9 @@ class KeycloakAPI(object): """ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(groups_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch list of groups in realm %s: %s" - % (realm, str(e))) + self.fail_request(e, msg=f"Could not fetch list of groups in realm {realm}: {e}") def get_group_by_groupid(self, gid, realm="master"): """ Fetch a keycloak group from the provided realm using the group's unique ID. @@ -935,20 +1518,31 @@ class KeycloakAPI(object): """ groups_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=gid) try: - return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) - + return self._request_and_deserialize(groups_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" - % (gid, realm, str(e))) + self.fail_request(e, msg=f"Could not fetch group {gid} in realm {realm}: {e}") except Exception as e: - self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" - % (gid, realm, str(e))) + self.module.fail_json(msg=f"Could not fetch group {gid} in realm {realm}: {e}") - def get_group_by_name(self, name, realm="master"): + def get_subgroups(self, parent, realm="master"): + if 'subGroupCount' in parent: + # Since version 23, when GETting a group Keycloak does not + # return subGroups but only a subGroupCount. + # Children must be fetched in a second request. + if parent['subGroupCount'] == 0: + group_children = [] + else: + group_children_url = f"{URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent['id'])}?max={parent['subGroupCount']}" + group_children = self._request_and_deserialize(group_children_url, method="GET") + subgroups = group_children + else: + subgroups = parent['subGroups'] + return subgroups + + def get_group_by_name(self, name, realm="master", parents=None): """ Fetch a keycloak group within a realm based on its name. The Keycloak API does not allow filtering of the Groups resource by name. @@ -958,10 +1552,18 @@ class KeycloakAPI(object): If the group does not exist, None is returned. :param name: Name of the group to fetch. :param realm: Realm in which the group resides; default 'master' + :param parents: Optional list of parents when group to look for is a subgroup """ - groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) try: - all_groups = self.get_groups(realm=realm) + if parents: + parent = self.get_subgroup_direct_parent(parents, realm) + + if not parent: + return None + + all_groups = self.get_subgroups(parent, realm) + else: + all_groups = self.get_groups(realm=realm) for group in all_groups: if group['name'] == name: @@ -970,8 +1572,103 @@ class KeycloakAPI(object): return None except Exception as e: - self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" - % (name, realm, str(e))) + self.module.fail_json(msg=f"Could not fetch group {name} in realm {realm}: {e}") + + def _get_normed_group_parent(self, parent): + """ Converts parent dict information into a more easy to use form. + + :param parent: parent describing dict + """ + if parent['id']: + return (parent['id'], True) + + return (parent['name'], False) + + def get_subgroup_by_chain(self, name_chain, realm="master"): + """ Access a subgroup API object by walking down a given name/id chain. + + Groups can be given either as by name or by ID, the first element + must either be a toplvl group or given as ID, all parents must exist. + + If the group cannot be found, None is returned. + :param name_chain: Topdown ordered list of subgroup parent (ids or names) + its own name at the end + :param realm: Realm in which the group resides; default 'master' + """ + cp = name_chain[0] + + # for 1st parent in chain we must query the server + cp, is_id = self._get_normed_group_parent(cp) + + if is_id: + tmp = self.get_group_by_groupid(cp, realm=realm) + else: + # given as name, assume toplvl group + tmp = self.get_group_by_name(cp, realm=realm) + + if not tmp: + return None + + for p in name_chain[1:]: + for sg in self.get_subgroups(tmp, realm): + pv, is_id = self._get_normed_group_parent(p) + + if is_id: + cmpkey = "id" + else: + cmpkey = "name" + + if pv == sg[cmpkey]: + tmp = sg + break + + if not tmp: + return None + + return tmp + + def get_subgroup_direct_parent(self, parents, realm="master", children_to_resolve=None): + """ Get keycloak direct parent group API object for a given chain of parents. + + To successfully work the API for subgroups we actually don't need + to "walk the whole tree" for nested groups but only need to know + the ID for the direct predecessor of current subgroup. This + method will guarantee us this information getting there with + as minimal work as possible. + + Note that given parent list can and might be incomplete at the + upper levels as long as it starts with an ID instead of a name + + If the group does not exist, None is returned. + :param parents: Topdown ordered list of subgroup parents + :param realm: Realm in which the group resides; default 'master' + """ + if children_to_resolve is None: + # start recursion by reversing parents (in optimal cases + # we dont need to walk the whole tree upwarts) + parents = list(reversed(parents)) + children_to_resolve = [] + + if not parents: + # walk complete parents list to the top, all names, no id's, + # try to resolve it assuming list is complete and 1st + # element is a toplvl group + return self.get_subgroup_by_chain(list(reversed(children_to_resolve)), realm=realm) + + cp = parents[0] + unused, is_id = self._get_normed_group_parent(cp) + + if is_id: + # current parent is given as ID, we can stop walking + # upwards searching for an entry point + return self.get_subgroup_by_chain([cp] + list(reversed(children_to_resolve)), realm=realm) + else: + # current parent is given as name, it must be resolved + # later, try next parent (recurse) + children_to_resolve.append(cp) + return self.get_subgroup_direct_parent( + parents[1:], + realm=realm, children_to_resolve=children_to_resolve + ) def create_group(self, grouprep, realm="master"): """ Create a Keycloak group. @@ -981,11 +1678,35 @@ class KeycloakAPI(object): """ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) try: - return open_url(groups_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(grouprep), validate_certs=self.validate_certs) + return self._request(groups_url, method='POST', data=json.dumps(grouprep)) except Exception as e: - self.module.fail_json(msg="Could not create group %s in realm %s: %s" - % (grouprep['name'], realm, str(e))) + self.fail_request(e, msg=f"Could not create group {grouprep['name']} in realm {realm}: {e}") + + def create_subgroup(self, parents, grouprep, realm="master"): + """ Create a Keycloak subgroup. + + :param parents: list of one or more parent groups + :param grouprep: a GroupRepresentation of the group to be created. Must contain at minimum the field name. + :return: HTTPResponse object on success + """ + parent_id = "---UNDETERMINED---" + try: + parent_id = self.get_subgroup_direct_parent(parents, realm) + + if not parent_id: + raise Exception( + "Could not determine subgroup parent ID for given" + f" parent chain {parents}. Assure that all parents exist" + " already and the list is complete and properly" + " ordered, starts with an ID or starts at the" + " top level" + ) + + parent_id = parent_id["id"] + url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent_id) + return self._request(url, method='POST', data=json.dumps(grouprep)) + except Exception as e: + self.fail_request(e, msg=f"Could not create subgroup {grouprep['name']} for parent group {parent_id} in realm {realm}: {e}") def update_group(self, grouprep, realm="master"): """ Update an existing group. @@ -996,11 +1717,9 @@ class KeycloakAPI(object): group_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=grouprep['id']) try: - return open_url(group_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(grouprep), validate_certs=self.validate_certs) + return self._request(group_url, method='PUT', data=json.dumps(grouprep)) except Exception as e: - self.module.fail_json(msg='Could not update group %s in realm %s: %s' - % (grouprep['name'], realm, str(e))) + self.fail_request(e, msg=f"Could not update group {grouprep['name']} in realm {realm}: {e}") def delete_group(self, name=None, groupid=None, realm="master"): """ Delete a group. One of name or groupid must be provided. @@ -1018,7 +1737,7 @@ class KeycloakAPI(object): raise Exception("Unable to delete group - one of group ID or name must be provided.") # only lookup the name if groupid isn't provided. - # in the case that both are provided, prefer the ID, since it's one + # in the case that both are provided, prefer the ID, since it is one # less lookup. if groupid is None and name is not None: for group in self.get_groups(realm=realm): @@ -1033,10 +1752,9 @@ class KeycloakAPI(object): # should have a good groupid by here. group_url = URL_GROUP.format(realm=realm, groupid=groupid, url=self.baseurl) try: - return open_url(group_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(group_url, method='DELETE') except Exception as e: - self.module.fail_json(msg="Unable to delete group %s: %s" % (groupid, str(e))) + self.fail_request(e, msg=f"Unable to delete group {groupid}: {e}") def get_realm_roles(self, realm='master'): """ Obtains role representations for roles in a realm @@ -1046,14 +1764,11 @@ class KeycloakAPI(object): """ rolelist_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(rolelist_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(rolelist_url, method='GET') except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for realm %s: %s' - % (realm, str(e))) + self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain list of roles for realm {realm}: {e}') except Exception as e: - self.module.fail_json(msg='Could not obtain list of roles for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg=f'Could not obtain list of roles for realm {realm}: {e}') def get_realm_role(self, name, realm='master'): """ Fetch a keycloak role from the provided realm using the role's name. @@ -1062,19 +1777,16 @@ class KeycloakAPI(object): :param name: Name of the role to fetch. :param realm: Realm in which the role resides; default 'master'. """ - role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name)) + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe='')) try: - return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(role_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not fetch role %s in realm %s: %s' - % (name, realm, str(e))) + self.fail_request(e, msg=f'Could not fetch role {name} in realm {realm}: {e}') except Exception as e: - self.module.fail_json(msg='Could not fetch role %s in realm %s: %s' - % (name, realm, str(e))) + self.module.fail_json(msg=f'Could not fetch role {name} in realm {realm}: {e}') def create_realm_role(self, rolerep, realm='master'): """ Create a Keycloak realm role. @@ -1084,11 +1796,12 @@ class KeycloakAPI(object): """ roles_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm) try: - return open_url(roles_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + if "composites" in rolerep: + keycloak_compatible_composites = self.convert_role_composites(rolerep["composites"]) + rolerep["composites"] = keycloak_compatible_composites + return self._request(roles_url, method='POST', data=json.dumps(rolerep)) except Exception as e: - self.module.fail_json(msg='Could not create role %s in realm %s: %s' - % (rolerep['name'], realm, str(e))) + self.fail_request(e, msg=f"Could not create role {rolerep['name']} in realm {realm}: {e}") def update_realm_role(self, rolerep, realm='master'): """ Update an existing realm role. @@ -1096,13 +1809,112 @@ class KeycloakAPI(object): :param rolerep: A RoleRepresentation of the updated role. :return HTTPResponse object on success """ - role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep['name'])) + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep['name']), safe='') try: - return open_url(role_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + composites = None + if "composites" in rolerep: + composites = copy.deepcopy(rolerep["composites"]) + del rolerep["composites"] + role_response = self._request(role_url, method='PUT', data=json.dumps(rolerep)) + if composites is not None: + self.update_role_composites(rolerep=rolerep, composites=composites, realm=realm) + return role_response except Exception as e: - self.module.fail_json(msg='Could not update role %s in realm %s: %s' - % (rolerep['name'], realm, str(e))) + self.fail_request(e, msg=f"Could not update role {rolerep['name']} in realm {realm}: {e}") + + def get_role_composites(self, rolerep, clientid=None, realm='master'): + composite_url = '' + try: + if clientid is not None: + client = self.get_client_by_clientid(client_id=clientid, realm=realm) + cid = client['id'] + composite_url = URL_CLIENT_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe='')) + else: + composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe='')) + # Get existing composites + return self._request_and_deserialize(composite_url, method='GET') + except Exception as e: + self.fail_request(e, msg=f"Could not get role {rolerep['name']} composites in realm {realm}: {e}") + + def create_role_composites(self, rolerep, composites, clientid=None, realm='master'): + composite_url = '' + try: + if clientid is not None: + client = self.get_client_by_clientid(client_id=clientid, realm=realm) + cid = client['id'] + composite_url = URL_CLIENT_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe='')) + else: + composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe='')) + # Get existing composites + # create new composites + return self._request(composite_url, method='POST', data=json.dumps(composites)) + except Exception as e: + self.fail_request(e, msg=f"Could not create role {rolerep['name']} composites in realm {realm}: {e}") + + def delete_role_composites(self, rolerep, composites, clientid=None, realm='master'): + composite_url = '' + try: + if clientid is not None: + client = self.get_client_by_clientid(client_id=clientid, realm=realm) + cid = client['id'] + composite_url = URL_CLIENT_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe='')) + else: + composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe='')) + # Get existing composites + # create new composites + return self._request(composite_url, method='DELETE', data=json.dumps(composites)) + except Exception as e: + self.fail_request(e, msg=f"Could not create role {rolerep['name']} composites in realm {realm}: {e}") + + def update_role_composites(self, rolerep, composites, clientid=None, realm='master'): + # Get existing composites + existing_composites = self.get_role_composites(rolerep=rolerep, clientid=clientid, realm=realm) + composites_to_be_created = [] + composites_to_be_deleted = [] + for composite in composites: + composite_found = False + existing_composite_client = None + for existing_composite in existing_composites: + if existing_composite["clientRole"]: + existing_composite_client = self.get_client_by_id(existing_composite["containerId"], realm=realm) + if ("client_id" in composite + and composite['client_id'] is not None + and existing_composite_client["clientId"] == composite["client_id"] + and composite["name"] == existing_composite["name"]): + composite_found = True + break + else: + if (("client_id" not in composite or composite['client_id'] is None) + and composite["name"] == existing_composite["name"]): + composite_found = True + break + if not composite_found and ('state' not in composite or composite['state'] == 'present'): + if "client_id" in composite and composite['client_id'] is not None: + client_roles = self.get_client_roles(clientid=composite['client_id'], realm=realm) + for client_role in client_roles: + if client_role['name'] == composite['name']: + composites_to_be_created.append(client_role) + break + else: + realm_role = self.get_realm_role(name=composite["name"], realm=realm) + composites_to_be_created.append(realm_role) + elif composite_found and 'state' in composite and composite['state'] == 'absent': + if "client_id" in composite and composite['client_id'] is not None: + client_roles = self.get_client_roles(clientid=composite['client_id'], realm=realm) + for client_role in client_roles: + if client_role['name'] == composite['name']: + composites_to_be_deleted.append(client_role) + break + else: + realm_role = self.get_realm_role(name=composite["name"], realm=realm) + composites_to_be_deleted.append(realm_role) + + if len(composites_to_be_created) > 0: + # create new composites + self.create_role_composites(rolerep=rolerep, composites=composites_to_be_created, clientid=clientid, realm=realm) + if len(composites_to_be_deleted) > 0: + # delete new composites + self.delete_role_composites(rolerep=rolerep, composites=composites_to_be_deleted, clientid=clientid, realm=realm) def delete_realm_role(self, name, realm='master'): """ Delete a realm role. @@ -1110,13 +1922,11 @@ class KeycloakAPI(object): :param name: The name of the role. :param realm: The realm in which this role resides, default "master". """ - role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name)) + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe='')) try: - return open_url(role_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(role_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Unable to delete role %s in realm %s: %s' - % (name, realm, str(e))) + self.fail_request(e, msg=f'Unable to delete role {name} in realm {realm}: {e}') def get_client_roles(self, clientid, realm='master'): """ Obtains role representations for client roles in a specific client @@ -1127,18 +1937,14 @@ class KeycloakAPI(object): """ cid = self.get_client_id(clientid, realm=realm) if cid is None: - self.module.fail_json(msg='Could not find client %s in realm %s' - % (clientid, realm)) + self.module.fail_json(msg=f'Could not find client {clientid} in realm {realm}') rolelist_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(rolelist_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(rolelist_url, method='GET') except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for client %s in realm %s: %s' - % (clientid, realm, str(e))) + self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain list of roles for client {clientid} in realm {realm}: {e}') except Exception as e: - self.module.fail_json(msg='Could not obtain list of roles for client %s in realm %s: %s' - % (clientid, realm, str(e))) + self.fail_request(e, msg=f'Could not obtain list of roles for client {clientid} in realm {realm}: {e}') def get_client_role(self, name, clientid, realm='master'): """ Fetch a keycloak client role from the provided realm using the role's name. @@ -1151,21 +1957,17 @@ class KeycloakAPI(object): """ cid = self.get_client_id(clientid, realm=realm) if cid is None: - self.module.fail_json(msg='Could not find client %s in realm %s' - % (clientid, realm)) - role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name)) + self.module.fail_json(msg=f'Could not find client {clientid} in realm {realm}') + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe='')) try: - return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(role_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not fetch role %s in client %s of realm %s: %s' - % (name, clientid, realm, str(e))) + self.fail_request(e, msg=f'Could not fetch role {name} in client {clientid} of realm {realm}: {e}') except Exception as e: - self.module.fail_json(msg='Could not fetch role %s for client %s in realm %s: %s' - % (name, clientid, realm, str(e))) + self.module.fail_json(msg=f'Could not fetch role {name} for client {clientid} in realm {realm}: {e}') def create_client_role(self, rolerep, clientid, realm='master'): """ Create a Keycloak client role. @@ -1177,15 +1979,30 @@ class KeycloakAPI(object): """ cid = self.get_client_id(clientid, realm=realm) if cid is None: - self.module.fail_json(msg='Could not find client %s in realm %s' - % (clientid, realm)) + self.module.fail_json(msg=f'Could not find client {clientid} in realm {realm}') roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) try: - return open_url(roles_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + if "composites" in rolerep: + keycloak_compatible_composites = self.convert_role_composites(rolerep["composites"]) + rolerep["composites"] = keycloak_compatible_composites + return self._request(roles_url, method='POST', data=json.dumps(rolerep)) except Exception as e: - self.module.fail_json(msg='Could not create role %s for client %s in realm %s: %s' - % (rolerep['name'], clientid, realm, str(e))) + self.fail_request(e, msg=f"Could not create role {rolerep['name']} for client {clientid} in realm {realm}: {e}") + + def convert_role_composites(self, composites): + keycloak_compatible_composites = { + 'client': {}, + 'realm': [] + } + for composite in composites: + if 'state' not in composite or composite['state'] == 'present': + if "client_id" in composite and composite["client_id"] is not None: + if composite["client_id"] not in keycloak_compatible_composites["client"]: + keycloak_compatible_composites["client"][composite["client_id"]] = [] + keycloak_compatible_composites["client"][composite["client_id"]].append(composite["name"]) + else: + keycloak_compatible_composites["realm"].append(composite["name"]) + return keycloak_compatible_composites def update_client_role(self, rolerep, clientid, realm="master"): """ Update an existing client role. @@ -1197,15 +2014,19 @@ class KeycloakAPI(object): """ cid = self.get_client_id(clientid, realm=realm) if cid is None: - self.module.fail_json(msg='Could not find client %s in realm %s' - % (clientid, realm)) - role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep['name'])) + self.module.fail_json(msg=f'Could not find client {clientid} in realm {realm}') + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep['name'], safe='')) try: - return open_url(role_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + composites = None + if "composites" in rolerep: + composites = copy.deepcopy(rolerep["composites"]) + del rolerep['composites'] + update_role_response = self._request(role_url, method='PUT', data=json.dumps(rolerep)) + if composites is not None: + self.update_role_composites(rolerep=rolerep, clientid=clientid, composites=composites, realm=realm) + return update_role_response except Exception as e: - self.module.fail_json(msg='Could not update role %s for client %s in realm %s: %s' - % (rolerep['name'], clientid, realm, str(e))) + self.fail_request(e, msg=f"Could not update role {rolerep['name']} for client {clientid} in realm {realm}: {e}") def delete_client_role(self, name, clientid, realm="master"): """ Delete a role. One of name or roleid must be provided. @@ -1216,19 +2037,16 @@ class KeycloakAPI(object): """ cid = self.get_client_id(clientid, realm=realm) if cid is None: - self.module.fail_json(msg='Could not find client %s in realm %s' - % (clientid, realm)) - role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name)) + self.module.fail_json(msg=f'Could not find client {clientid} in realm {realm}') + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe='')) try: - return open_url(role_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(role_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Unable to delete role %s for client %s in realm %s: %s' - % (name, clientid, realm, str(e))) + self.fail_request(e, msg=f'Unable to delete role {name} for client {clientid} in realm {realm}: {e}') def get_authentication_flow_by_alias(self, alias, realm='master'): """ - Get an authentication flow by it's alias + Get an authentication flow by its alias :param alias: Alias of the authentication flow to get. :param realm: Realm. :return: Authentication flow representation. @@ -1236,15 +2054,14 @@ class KeycloakAPI(object): try: authentication_flow = {} # Check if the authentication flow exists on the Keycloak serveraders - authentications = json.load(open_url(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET', - headers=self.restheaders, timeout=self.connection_timeout, validate_certs=self.validate_certs)) + authentications = json.load(self._request(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET')) for authentication in authentications: if authentication["alias"] == alias: authentication_flow = authentication break return authentication_flow except Exception as e: - self.module.fail_json(msg="Unable get authentication flow %s: %s" % (alias, str(e))) + self.fail_request(e, msg=f"Unable get authentication flow {alias}: {e}") def delete_authentication_flow_by_id(self, id, realm='master'): """ @@ -1256,11 +2073,9 @@ class KeycloakAPI(object): flow_url = URL_AUTHENTICATION_FLOW.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(flow_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(flow_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Could not delete authentication flow %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg=f'Could not delete authentication flow {id} in realm {realm}: {e}') def copy_auth_flow(self, config, realm='master'): """ @@ -1273,31 +2088,24 @@ class KeycloakAPI(object): new_name = dict( newName=config["alias"] ) - open_url( + self._request( URL_AUTHENTICATION_FLOW_COPY.format( url=self.baseurl, realm=realm, - copyfrom=quote(config["copyFrom"])), + copyfrom=quote(config["copyFrom"], safe='')), method='POST', - headers=self.restheaders, - data=json.dumps(new_name), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(new_name)) flow_list = json.load( - open_url( + self._request( URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), - method='GET', - headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs)) + method='GET')) for flow in flow_list: if flow["alias"] == config["alias"]: return flow return None except Exception as e: - self.module.fail_json(msg='Could not copy authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) + self.fail_request(e, msg=f"Could not copy authentication flow {config['alias']} in realm {realm}: {e}") def create_empty_auth_flow(self, config, realm='master'): """ @@ -1313,31 +2121,24 @@ class KeycloakAPI(object): description=config["description"], topLevel=True ) - open_url( + self._request( URL_AUTHENTICATION_FLOWS.format( url=self.baseurl, realm=realm), method='POST', - headers=self.restheaders, - data=json.dumps(new_flow), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(new_flow)) flow_list = json.load( - open_url( + self._request( URL_AUTHENTICATION_FLOWS.format( url=self.baseurl, realm=realm), - method='GET', - headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs)) + method='GET')) for flow in flow_list: if flow["alias"] == config["alias"]: return flow return None except Exception as e: - self.module.fail_json(msg='Could not create empty authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) + self.fail_request(e, msg=f"Could not create empty authentication flow {config['alias']} in realm {realm}: {e}") def update_authentication_executions(self, flowAlias, updatedExec, realm='master'): """ Update authentication executions @@ -1347,18 +2148,17 @@ class KeycloakAPI(object): :return: HTTPResponse object on success """ try: - open_url( + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS.format( url=self.baseurl, realm=realm, - flowalias=quote(flowAlias)), + flowalias=quote(flowAlias, safe='')), method='PUT', - headers=self.restheaders, - data=json.dumps(updatedExec), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(updatedExec)) + except HTTPError as e: + self.fail_request(e, msg=f"Unable to update execution '{flowAlias}': {e!r}: {e.url};{e.msg};{e.code};{e.hdrs} {updatedExec}") except Exception as e: - self.module.fail_json(msg="Unable to update executions %s: %s" % (updatedExec, str(e))) + self.module.fail_json(msg=f"Unable to update executions {updatedExec}: {e}") def add_authenticationConfig_to_execution(self, executionId, authenticationConfig, realm='master'): """ Add autenticatorConfig to the execution @@ -1368,20 +2168,34 @@ class KeycloakAPI(object): :return: HTTPResponse object on success """ try: - open_url( + self._request( URL_AUTHENTICATION_EXECUTION_CONFIG.format( url=self.baseurl, realm=realm, id=executionId), method='POST', - headers=self.restheaders, - data=json.dumps(authenticationConfig), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(authenticationConfig)) except Exception as e: - self.module.fail_json(msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e))) + self.fail_request(e, msg=f"Unable to add authenticationConfig {executionId}: {e}") - def create_subflow(self, subflowName, flowAlias, realm='master'): + def delete_authentication_config(self, configId, realm='master'): + """ Delete authenticator config + + :param configId: id of authentication config + :param realm: realm of authentication config to be deleted + """ + try: + # Send a DELETE request to remove the specified authentication config from the Keycloak server. + self._request( + URL_AUTHENTICATION_CONFIG.format( + url=self.baseurl, + realm=realm, + id=configId), + method='DELETE') + except Exception as e: + self.fail_request(e, msg=f"Unable to delete authentication config {configId}: {e}") + + def create_subflow(self, subflowName, flowAlias, realm='master', flowType='basic-flow'): """ Create new sublow on the flow :param subflowName: name of the subflow to create @@ -1392,19 +2206,16 @@ class KeycloakAPI(object): newSubFlow = {} newSubFlow["alias"] = subflowName newSubFlow["provider"] = "registration-page-form" - newSubFlow["type"] = "basic-flow" - open_url( + newSubFlow["type"] = flowType + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format( url=self.baseurl, realm=realm, - flowalias=quote(flowAlias)), + flowalias=quote(flowAlias, safe='')), method='POST', - headers=self.restheaders, - data=json.dumps(newSubFlow), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(newSubFlow)) except Exception as e: - self.module.fail_json(msg="Unable to create new subflow %s: %s" % (subflowName, str(e))) + self.fail_request(e, msg=f"Unable to create new subflow {subflowName}: {e}") def create_execution(self, execution, flowAlias, realm='master'): """ Create new execution on the flow @@ -1417,18 +2228,19 @@ class KeycloakAPI(object): newExec = {} newExec["provider"] = execution["providerId"] newExec["requirement"] = execution["requirement"] - open_url( + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION.format( url=self.baseurl, realm=realm, - flowalias=quote(flowAlias)), + flowalias=quote(flowAlias, safe='')), method='POST', - headers=self.restheaders, - data=json.dumps(newExec), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(newExec)) + except HTTPError as e: + self.fail_request( + e, msg=f"Unable to create new execution '{flowAlias}' {execution['providerId']}: {e!r}: {e.url};{e.msg};{e.code};{e.hdrs} {newExec}" + ) except Exception as e: - self.module.fail_json(msg="Unable to create new execution %s: %s" % (execution["provider"], str(e))) + self.module.fail_json(msg=f"Unable to create new execution '{flowAlias}' {execution['providerId']}: {e}") def change_execution_priority(self, executionId, diff, realm='master'): """ Raise or lower execution priority of diff time @@ -1441,28 +2253,22 @@ class KeycloakAPI(object): try: if diff > 0: for i in range(diff): - open_url( + self._request( URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY.format( url=self.baseurl, realm=realm, id=executionId), - method='POST', - headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + method='POST') elif diff < 0: for i in range(-diff): - open_url( + self._request( URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY.format( url=self.baseurl, realm=realm, id=executionId), - method='POST', - headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + method='POST') except Exception as e: - self.module.fail_json(msg="Unable to change execution priority %s: %s" % (executionId, str(e))) + self.fail_request(e, msg=f"Unable to change execution priority {executionId}: {e}") def get_executions_representation(self, config, realm='master'): """ @@ -1474,33 +2280,124 @@ class KeycloakAPI(object): try: # Get executions created executions = json.load( - open_url( + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS.format( url=self.baseurl, realm=realm, - flowalias=quote(config["alias"])), - method='GET', - headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs)) + flowalias=quote(config["alias"], safe='')), + method='GET')) for execution in executions: if "authenticationConfig" in execution: execConfigId = execution["authenticationConfig"] execConfig = json.load( - open_url( + self._request( URL_AUTHENTICATION_CONFIG.format( url=self.baseurl, realm=realm, id=execConfigId), - method='GET', - headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs)) + method='GET')) execution["authenticationConfig"] = execConfig return executions except Exception as e: - self.module.fail_json(msg='Could not get executions for authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) + self.fail_request(e, msg=f"Could not get executions for authentication flow {config['alias']} in realm {realm}: {e}") + + def get_required_actions(self, realm='master'): + """ + Get required actions. + :param realm: Realm name (not id). + :return: List of representations of the required actions. + """ + + try: + required_actions = json.load( + self._request( + URL_AUTHENTICATION_REQUIRED_ACTIONS.format( + url=self.baseurl, + realm=realm + ), + method='GET' + ) + ) + + return required_actions + except Exception: + return None + + def register_required_action(self, rep, realm='master'): + """ + Register required action. + :param rep: JSON containing 'providerId', and 'name' attributes. + :param realm: Realm name (not id). + :return: Representation of the required action. + """ + + data = { + 'name': rep['name'], + 'providerId': rep['providerId'] + } + + try: + return self._request( + URL_AUTHENTICATION_REGISTER_REQUIRED_ACTION.format( + url=self.baseurl, + realm=realm + ), + method='POST', + data=json.dumps(data), + ) + except Exception as e: + self.fail_request( + e, + msg=f"Unable to register required action {rep['name']} in realm {realm}: {e}" + ) + + def update_required_action(self, alias, rep, realm='master'): + """ + Update required action. + :param alias: Alias of required action. + :param rep: JSON describing new state of required action. + :param realm: Realm name (not id). + :return: HTTPResponse object on success. + """ + + try: + return self._request( + URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS.format( + url=self.baseurl, + alias=quote(alias, safe=''), + realm=realm + ), + method='PUT', + data=json.dumps(rep), + ) + except Exception as e: + self.fail_request( + e, + msg=f'Unable to update required action {alias} in realm {realm}: {e}' + ) + + def delete_required_action(self, alias, realm='master'): + """ + Delete required action. + :param alias: Alias of required action. + :param realm: Realm name (not id). + :return: HTTPResponse object on success. + """ + + try: + return self._request( + URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS.format( + url=self.baseurl, + alias=quote(alias, safe=''), + realm=realm + ), + method='DELETE', + ) + except Exception as e: + self.fail_request( + e, + msg=f'Unable to delete required action {alias} in realm {realm}: {e}' + ) def get_identity_providers(self, realm='master'): """ Fetch representations for identity providers in a realm @@ -1509,14 +2406,11 @@ class KeycloakAPI(object): """ idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(idps_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(idps_url, method='GET') except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity providers for realm %s: %s' - % (realm, str(e))) + self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain list of identity providers for realm {realm}: {e}') except Exception as e: - self.module.fail_json(msg='Could not obtain list of identity providers for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg=f'Could not obtain list of identity providers for realm {realm}: {e}') def get_identity_provider(self, alias, realm='master'): """ Fetch identity provider representation from a realm using the idp's alias. @@ -1526,17 +2420,14 @@ class KeycloakAPI(object): """ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias) try: - return json.loads(to_native(open_url(idp_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(idp_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s' - % (alias, realm, str(e))) + self.fail_request(e, msg=f'Could not fetch identity provider {alias} in realm {realm}: {e}') except Exception as e: - self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s' - % (alias, realm, str(e))) + self.module.fail_json(msg=f'Could not fetch identity provider {alias} in realm {realm}: {e}') def create_identity_provider(self, idprep, realm='master'): """ Create an identity provider. @@ -1546,11 +2437,9 @@ class KeycloakAPI(object): """ idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) try: - return open_url(idps_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(idprep), validate_certs=self.validate_certs) + return self._request(idps_url, method='POST', data=json.dumps(idprep)) except Exception as e: - self.module.fail_json(msg='Could not create identity provider %s in realm %s: %s' - % (idprep['alias'], realm, str(e))) + self.fail_request(e, msg=f"Could not create identity provider {idprep['alias']} in realm {realm}: {e}") def update_identity_provider(self, idprep, realm='master'): """ Update an existing identity provider. @@ -1560,11 +2449,9 @@ class KeycloakAPI(object): """ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=idprep['alias']) try: - return open_url(idp_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(idprep), validate_certs=self.validate_certs) + return self._request(idp_url, method='PUT', data=json.dumps(idprep)) except Exception as e: - self.module.fail_json(msg='Could not update identity provider %s in realm %s: %s' - % (idprep['alias'], realm, str(e))) + self.fail_request(e, msg=f"Could not update identity provider {idprep['alias']} in realm {realm}: {e}") def delete_identity_provider(self, alias, realm='master'): """ Delete an identity provider. @@ -1573,11 +2460,9 @@ class KeycloakAPI(object): """ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias) try: - return open_url(idp_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(idp_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Unable to delete identity provider %s in realm %s: %s' - % (alias, realm, str(e))) + self.fail_request(e, msg=f'Unable to delete identity provider {alias} in realm {realm}: {e}') def get_identity_provider_mappers(self, alias, realm='master'): """ Fetch representations for identity provider mappers @@ -1587,14 +2472,30 @@ class KeycloakAPI(object): """ mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) try: - return json.loads(to_native(open_url(mappers_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(mappers_url, method='GET') except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity provider mappers for idp %s in realm %s: %s' - % (alias, realm, str(e))) + self.module.fail_json( + msg=f'API returned incorrect JSON when trying to obtain list of identity provider mappers for idp {alias} in realm {realm}: {e}' + ) except Exception as e: - self.module.fail_json(msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s' - % (alias, realm, str(e))) + self.fail_request(e, msg=f'Could not obtain list of identity provider mappers for idp {alias} in realm {realm}: {e}') + + def fetch_idp_endpoints_import_config_url(self, fromUrl, providerId='oidc', realm='master'): + """ Import an identity provider configuration through Keycloak server from a well-known URL. + :param fromUrl: URL to import the identity provider configuration from. + "param providerId: Provider ID of the identity provider to import, default 'oidc'. + :param realm: Realm + :return: IDP endpoins. + """ + try: + payload = { + "providerId": providerId, + "fromUrl": fromUrl + } + idps_url = URL_IDENTITY_PROVIDER_IMPORT.format(url=self.baseurl, realm=realm) + return self._request_and_deserialize(idps_url, method='POST', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg=f'Could not import the IdP config in realm {realm}: {e}') def get_identity_provider_mapper(self, mid, alias, realm='master'): """ Fetch identity provider representation from a realm using the idp's alias. @@ -1605,17 +2506,14 @@ class KeycloakAPI(object): """ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid) try: - return json.loads(to_native(open_url(mapper_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(mapper_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' - % (mid, alias, realm, str(e))) + self.fail_request(e, msg=f'Could not fetch mapper {mid} for identity provider {alias} in realm {realm}: {e}') except Exception as e: - self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' - % (mid, alias, realm, str(e))) + self.module.fail_json(msg=f'Could not fetch mapper {mid} for identity provider {alias} in realm {realm}: {e}') def create_identity_provider_mapper(self, mapper, alias, realm='master'): """ Create an identity provider mapper. @@ -1626,11 +2524,9 @@ class KeycloakAPI(object): """ mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) try: - return open_url(mappers_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper), validate_certs=self.validate_certs) + return self._request(mappers_url, method='POST', data=json.dumps(mapper)) except Exception as e: - self.module.fail_json(msg='Could not create identity provider mapper %s for idp %s in realm %s: %s' - % (mapper['name'], alias, realm, str(e))) + self.fail_request(e, msg=f"Could not create identity provider mapper {mapper['name']} for idp {alias} in realm {realm}: {e}") def update_identity_provider_mapper(self, mapper, alias, realm='master'): """ Update an existing identity provider. @@ -1641,11 +2537,9 @@ class KeycloakAPI(object): """ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mapper['id']) try: - return open_url(mapper_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper), validate_certs=self.validate_certs) + return self._request(mapper_url, method='PUT', data=json.dumps(mapper)) except Exception as e: - self.module.fail_json(msg='Could not update mapper %s for identity provider %s in realm %s: %s' - % (mapper['id'], alias, realm, str(e))) + self.fail_request(e, msg=f"Could not update mapper {mapper['id']} for identity provider {alias} in realm {realm}: {e}") def delete_identity_provider_mapper(self, mid, alias, realm='master'): """ Delete an identity provider. @@ -1655,11 +2549,9 @@ class KeycloakAPI(object): """ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid) try: - return open_url(mapper_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(mapper_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Unable to delete mapper %s for identity provider %s in realm %s: %s' - % (mid, alias, realm, str(e))) + self.fail_request(e, msg=f'Unable to delete mapper {mid} for identity provider {alias} in realm {realm}: {e}') def get_components(self, filter=None, realm='master'): """ Fetch representations for components in a realm @@ -1669,17 +2561,14 @@ class KeycloakAPI(object): """ comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm) if filter is not None: - comps_url += '?%s' % filter + comps_url += f'?{filter}' try: - return json.loads(to_native(open_url(comps_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(comps_url, method='GET') except ValueError as e: - self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of components for realm %s: %s' - % (realm, str(e))) + self.module.fail_json(msg=f'API returned incorrect JSON when trying to obtain list of components for realm {realm}: {e}') except Exception as e: - self.module.fail_json(msg='Could not obtain list of components for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg=f'Could not obtain list of components for realm {realm}: {e}') def get_component(self, cid, realm='master'): """ Fetch component representation from a realm using its cid. @@ -1689,17 +2578,14 @@ class KeycloakAPI(object): """ comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(comp_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(comp_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not fetch component %s in realm %s: %s' - % (cid, realm, str(e))) + self.fail_request(e, msg=f'Could not fetch component {cid} in realm {realm}: {e}') except Exception as e: - self.module.fail_json(msg='Could not fetch component %s in realm %s: %s' - % (cid, realm, str(e))) + self.module.fail_json(msg=f'Could not fetch component {cid} in realm {realm}: {e}') def create_component(self, comprep, realm='master'): """ Create an component. @@ -1709,17 +2595,13 @@ class KeycloakAPI(object): """ comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm) try: - resp = open_url(comps_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(comprep), validate_certs=self.validate_certs) + resp = self._request(comps_url, method='POST', data=json.dumps(comprep)) comp_url = resp.getheader('Location') if comp_url is None: - self.module.fail_json(msg='Could not create component in realm %s: %s' - % (realm, 'unexpected response')) - return json.loads(to_native(open_url(comp_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + self.module.fail_json(msg=f'Could not create component in realm {realm}: unexpected response') + return self._request_and_deserialize(comp_url, method="GET") except Exception as e: - self.module.fail_json(msg='Could not create component in realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg=f'Could not create component in realm {realm}: {e}') def update_component(self, comprep, realm='master'): """ Update an existing component. @@ -1732,11 +2614,9 @@ class KeycloakAPI(object): self.module.fail_json(msg='Cannot update component without id') comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) try: - return open_url(comp_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(comprep), validate_certs=self.validate_certs) + return self._request(comp_url, method='PUT', data=json.dumps(comprep)) except Exception as e: - self.module.fail_json(msg='Could not update component %s in realm %s: %s' - % (cid, realm, str(e))) + self.fail_request(e, msg=f'Could not update component {cid} in realm {realm}: {e}') def delete_component(self, cid, realm='master'): """ Delete an component. @@ -1745,8 +2625,496 @@ class KeycloakAPI(object): """ comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) try: - return open_url(comp_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(comp_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Unable to delete component %s in realm %s: %s' - % (cid, realm, str(e))) + self.fail_request(e, msg=f'Unable to delete component {cid} in realm {realm}: {e}') + + def get_authz_authorization_scope_by_name(self, name, client_id, realm): + url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm) + search_url = f"{url}/search?name={quote(name, safe='')}" + + try: + return self._request_and_deserialize(search_url, method='GET') + except Exception: + return False + + def create_authz_authorization_scope(self, payload, client_id, realm): + """Create an authorization scope for a Keycloak client""" + url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm) + + try: + return self._request(url, method='POST', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg=f"Could not create authorization scope {payload['name']} for client {client_id} in realm {realm}: {e}") + + def update_authz_authorization_scope(self, payload, id, client_id, realm): + """Update an authorization scope for a Keycloak client""" + url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm) + + try: + return self._request(url, method='PUT', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg=f"Could not create update scope {payload['name']} for client {client_id} in realm {realm}: {e}") + + def remove_authz_authorization_scope(self, id, client_id, realm): + """Remove an authorization scope from a Keycloak client""" + url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm) + + try: + return self._request(url, method='DELETE') + except Exception as e: + self.fail_request(e, msg=f'Could not delete scope {id} for client {client_id} in realm {realm}: {e}') + + def get_user_by_id(self, user_id, realm='master'): + """ + Get a User by its ID. + :param user_id: ID of the user. + :param realm: Realm + :return: Representation of the user. + """ + try: + user_url = URL_USER.format( + url=self.baseurl, + realm=realm, + id=user_id) + userrep = json.load( + self._request( + user_url, + method='GET')) + return userrep + except Exception as e: + self.fail_request(e, msg=f'Could not get user {user_id} in realm {realm}: {e}') + + def create_user(self, userrep, realm='master'): + """ + Create a new User. + :param userrep: Representation of the user to create + :param realm: Realm + :return: Representation of the user created. + """ + try: + if 'attributes' in userrep and isinstance(userrep['attributes'], list): + attributes = copy.deepcopy(userrep['attributes']) + userrep['attributes'] = self.convert_user_attributes_to_keycloak_dict(attributes=attributes) + users_url = URL_USERS.format( + url=self.baseurl, + realm=realm) + self._request(users_url, + method='POST', + data=json.dumps(userrep)) + created_user = self.get_user_by_username( + username=userrep['username'], + realm=realm) + return created_user + except Exception as e: + self.fail_request(e, msg=f"Could not create user {userrep['username']} in realm {realm}: {e}") + + def convert_user_attributes_to_keycloak_dict(self, attributes): + keycloak_user_attributes_dict = {} + for attribute in attributes: + if ('state' not in attribute or attribute['state'] == 'present') and 'name' in attribute: + keycloak_user_attributes_dict[attribute['name']] = attribute['values'] if 'values' in attribute else [] + return keycloak_user_attributes_dict + + def convert_keycloak_user_attributes_dict_to_module_list(self, attributes): + module_attributes_list = [] + for key in attributes: + attr = {} + attr['name'] = key + attr['values'] = attributes[key] + module_attributes_list.append(attr) + return module_attributes_list + + def update_user(self, userrep, realm='master'): + """ + Update a User. + :param userrep: Representation of the user to update. This representation must include the ID of the user. + :param realm: Realm + :return: Representation of the updated user. + """ + try: + if 'attributes' in userrep and isinstance(userrep['attributes'], list): + attributes = copy.deepcopy(userrep['attributes']) + userrep['attributes'] = self.convert_user_attributes_to_keycloak_dict(attributes=attributes) + user_url = URL_USER.format( + url=self.baseurl, + realm=realm, + id=userrep["id"]) + self._request( + user_url, + method='PUT', + data=json.dumps(userrep)) + updated_user = self.get_user_by_id( + user_id=userrep['id'], + realm=realm) + return updated_user + except Exception as e: + self.fail_request(e, msg=f"Could not update user {userrep['username']} in realm {realm}: {e}") + + def delete_user(self, user_id, realm='master'): + """ + Delete a User. + :param user_id: ID of the user to be deleted + :param realm: Realm + :return: HTTP response. + """ + try: + user_url = URL_USER.format( + url=self.baseurl, + realm=realm, + id=user_id) + return self._request( + user_url, + method='DELETE') + except Exception as e: + self.fail_request(e, msg=f'Could not delete user {user_id} in realm {realm}: {e}') + + def get_user_groups(self, user_id, realm='master'): + """ + Get the group names for a user. + :param user_id: User ID + :param realm: Realm + :return: The client group names as a list of strings. + """ + user_groups = self.get_user_group_details(user_id, realm) + return [user_group['name'] for user_group in user_groups if 'name' in user_group] + + def get_user_group_details(self, user_id, realm='master'): + """ + Get the group details for a user. + :param user_id: User ID + :param realm: Realm + :return: The client group details as a list of dictionaries. + """ + try: + user_groups_url = URL_USER_GROUPS.format(url=self.baseurl, realm=realm, id=user_id) + return self._request_and_deserialize(user_groups_url, method='GET') + except Exception as e: + self.fail_request(e, msg=f'Could not get groups for user {user_id} in realm {realm}: {e}') + + def add_user_in_group(self, user_id, group_id, realm='master'): + """DEPRECATED: Call add_user_to_group(...) instead. This method is scheduled for removal in community.general 13.0.0.""" + return self.add_user_to_group(user_id, group_id, realm) + + def add_user_to_group(self, user_id, group_id, realm='master'): + """ + Add a user to a group. + :param user_id: User ID + :param group_id: Group Id to add the user to. + :param realm: Realm + :return: HTTP Response + """ + try: + user_group_url = URL_USER_GROUP.format( + url=self.baseurl, + realm=realm, + id=user_id, + group_id=group_id) + return self._request( + user_group_url, + method='PUT') + except Exception as e: + self.fail_request(e, msg=f'Could not add user {user_id} to group {group_id} in realm {realm}: {e}') + + def remove_user_from_group(self, user_id, group_id, realm='master'): + """ + Remove a user from a group for a user. + :param user_id: User ID + :param group_id: Group Id to add the user to. + :param realm: Realm + :return: HTTP response + """ + try: + user_group_url = URL_USER_GROUP.format( + url=self.baseurl, + realm=realm, + id=user_id, + group_id=group_id) + return self._request( + user_group_url, + method='DELETE') + except Exception as e: + self.fail_request(e, msg=f'Could not remove user {user_id} from group {group_id} in realm {realm}: {e}') + + def update_user_groups_membership(self, userrep, groups, realm='master'): + """ + Update user's group membership + :param userrep: Representation of the user. This representation must include the ID. + :param realm: Realm + :return: True if group membership has been changed. False Otherwise. + """ + try: + groups_to_add, groups_to_remove = self.extract_groups_to_add_to_and_remove_from_user(groups) + if not groups_to_add and not groups_to_remove: + return False + + user_groups = self.get_user_group_details(user_id=userrep['id'], realm=realm) + user_group_names = [user_group['name'] for user_group in user_groups if 'name' in user_group] + user_group_paths = [user_group['path'] for user_group in user_groups if 'path' in user_group] + + groups_to_add = [group_to_add for group_to_add in groups_to_add + if group_to_add not in user_group_names and group_to_add not in user_group_paths] + groups_to_remove = [group_to_remove for group_to_remove in groups_to_remove + if group_to_remove in user_group_names or group_to_remove in user_group_paths] + if not groups_to_add and not groups_to_remove: + return False + + for group_to_add in groups_to_add: + realm_group = self.find_group_by_path(group_to_add, realm=realm) + if realm_group: + self.add_user_to_group(user_id=userrep['id'], group_id=realm_group['id'], realm=realm) + + for group_to_remove in groups_to_remove: + realm_group = self.find_group_by_path(group_to_remove, realm=realm) + if realm_group: + self.remove_user_from_group(user_id=userrep['id'], group_id=realm_group['id'], realm=realm) + + return True + except Exception as e: + self.module.fail_json(msg=f"Could not update group membership for user {userrep['username']} in realm {realm}: {e}") + + def extract_groups_to_add_to_and_remove_from_user(self, groups): + groups_to_add = [] + groups_to_remove = [] + if isinstance(groups, list): + for group in groups: + group_name = group['name'] if isinstance(group, dict) and 'name' in group else group + if isinstance(group, dict): + if 'state' not in group or group['state'] == 'present': + groups_to_add.append(group_name) + else: + groups_to_remove.append(group_name) + return groups_to_add, groups_to_remove + + def find_group_by_path(self, target, realm='master'): + """ + Finds a realm group by path, e.g. '/my/group'. + The path is formed by prepending a '/' character to `target` unless it's already present. + This adds support for finding top level groups by name and subgroups by path. + """ + groups = self.get_groups(realm=realm) + path = target if target.startswith('/') else f"/{target}" + for segment in path.split('/'): + if not segment: + continue + abort = True + for group in groups: + if group['path'] == path: + return self.get_group_by_groupid(group['id'], realm=realm) + if group['name'] == segment: + groups = self.get_subgroups(group, realm=realm) + abort = False + break + if abort: + break + return None + + def convert_user_group_list_of_str_to_list_of_dict(self, groups): + list_of_groups = [] + if isinstance(groups, list) and len(groups) > 0: + for group in groups: + if isinstance(group, str): + group_dict = {} + group_dict['name'] = group + list_of_groups.append(group_dict) + return list_of_groups + + def create_authz_custom_policy(self, policy_type, payload, client_id, realm): + """Create a custom policy for a Keycloak client""" + url = URL_AUTHZ_CUSTOM_POLICY.format(url=self.baseurl, policy_type=policy_type, client_id=client_id, realm=realm) + + try: + return self._request(url, method='POST', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg=f"Could not create permission {payload['name']} for client {client_id} in realm {realm}: {e}") + + def remove_authz_custom_policy(self, policy_id, client_id, realm): + """Remove a custom policy from a Keycloak client""" + url = URL_AUTHZ_CUSTOM_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm) + delete_url = f"{url}/{policy_id}" + + try: + return self._request(delete_url, method='DELETE') + except Exception as e: + self.fail_request(e, msg=f'Could not delete custom policy {id} for client {client_id} in realm {realm}: {e}') + + def get_authz_permission_by_name(self, name, client_id, realm): + """Get authorization permission by name""" + url = URL_AUTHZ_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm) + search_url = "%s/search?name=%s" % (url, name.replace(' ', '%20')) + + try: + return self._request_and_deserialize(search_url, method='GET') + except Exception: + return False + + def create_authz_permission(self, payload, permission_type, client_id, realm): + """Create an authorization permission for a Keycloak client""" + url = URL_AUTHZ_PERMISSIONS.format(url=self.baseurl, permission_type=permission_type, client_id=client_id, realm=realm) + + try: + return self._request(url, method='POST', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg=f"Could not create permission {payload['name']} for client {client_id} in realm {realm}: {e}") + + def remove_authz_permission(self, id, client_id, realm): + """Create an authorization permission for a Keycloak client""" + url = URL_AUTHZ_POLICY.format(url=self.baseurl, id=id, client_id=client_id, realm=realm) + + try: + return self._request(url, method='DELETE') + except Exception as e: + self.fail_request(e, msg=f'Could not delete permission {id} for client {client_id} in realm {realm}: {e}') + + def update_authz_permission(self, payload, permission_type, id, client_id, realm): + """Update a permission for a Keycloak client""" + url = URL_AUTHZ_PERMISSION.format(url=self.baseurl, permission_type=permission_type, id=id, client_id=client_id, realm=realm) + + try: + return self._request(url, method='PUT', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg=f"Could not create update permission {payload['name']} for client {client_id} in realm {realm}: {e}") + + def get_authz_resource_by_name(self, name, client_id, realm): + """Get authorization resource by name""" + url = URL_AUTHZ_RESOURCES.format(url=self.baseurl, client_id=client_id, realm=realm) + search_url = "%s/search?name=%s" % (url, name.replace(' ', '%20')) + + try: + return self._request_and_deserialize(search_url, method='GET') + except Exception: + return False + + def get_authz_policy_by_name(self, name, client_id, realm): + """Get authorization policy by name""" + url = URL_AUTHZ_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm) + search_url = "%s/search?name=%s&permission=false" % (url, name.replace(' ', '%20')) + + try: + return self._request_and_deserialize(search_url, method='GET') + except Exception: + return False + + def get_client_role_scope_from_client(self, clientid, clientscopeid, realm="master"): + """ Fetch the roles associated with the client's scope for a specific client on the Keycloak server. + :param clientid: ID of the client from which to obtain the associated roles. + :param clientscopeid: ID of the client who owns the roles. + :param realm: Realm from which to obtain the scope. + :return: The client scope of roles from specified client. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) + try: + return self._request_and_deserialize(client_role_scope_url, method='GET') + except Exception as e: + self.fail_request(e, msg=f'Could not fetch roles scope for client {clientid} in realm {realm}: {e}') + + def update_client_role_scope_from_client(self, payload, clientid, clientscopeid, realm="master"): + """ Update and fetch the roles associated with the client's scope on the Keycloak server. + :param payload: List of roles to be added to the scope. + :param clientid: ID of the client to update scope. + :param clientscopeid: ID of the client who owns the roles. + :param realm: Realm from which to obtain the clients. + :return: The client scope of roles from specified client. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) + try: + self._request(client_role_scope_url, method='POST', data=json.dumps(payload)) + + except Exception as e: + self.fail_request(e, msg=f'Could not update roles scope for client {clientid} in realm {realm}: {e}') + + return self.get_client_role_scope_from_client(clientid, clientscopeid, realm) + + def delete_client_role_scope_from_client(self, payload, clientid, clientscopeid, realm="master"): + """ Delete the roles contains in the payload from the client's scope on the Keycloak server. + :param payload: List of roles to be deleted. + :param clientid: ID of the client to delete roles from scope. + :param clientscopeid: ID of the client who owns the roles. + :param realm: Realm from which to obtain the clients. + :return: The client scope of roles from specified client. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) + try: + self._request(client_role_scope_url, method='DELETE', data=json.dumps(payload)) + + except Exception as e: + self.fail_request(e, msg=f'Could not delete roles scope for client {clientid} in realm {realm}: {e}') + + return self.get_client_role_scope_from_client(clientid, clientscopeid, realm) + + def get_client_role_scope_from_realm(self, clientid, realm="master"): + """ Fetch the realm roles from the client's scope on the Keycloak server. + :param clientid: ID of the client from which to obtain the associated realm roles. + :param realm: Realm from which to obtain the clients. + :return: The client realm roles scope. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) + try: + return self._request_and_deserialize(client_role_scope_url, method='GET') + except Exception as e: + self.fail_request(e, msg=f'Could not fetch roles scope for client {clientid} in realm {realm}: {e}') + + def update_client_role_scope_from_realm(self, payload, clientid, realm="master"): + """ Update and fetch the realm roles from the client's scope on the Keycloak server. + :param payload: List of realm roles to add. + :param clientid: ID of the client to update scope. + :param realm: Realm from which to obtain the clients. + :return: The client realm roles scope. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) + try: + self._request(client_role_scope_url, method='POST', data=json.dumps(payload)) + + except Exception as e: + self.fail_request(e, msg=f'Could not update roles scope for client {clientid} in realm {realm}: {e}') + + return self.get_client_role_scope_from_realm(clientid, realm) + + def delete_client_role_scope_from_realm(self, payload, clientid, realm="master"): + """ Delete the realm roles contains in the payload from the client's scope on the Keycloak server. + :param payload: List of realm roles to delete. + :param clientid: ID of the client to delete roles from scope. + :param realm: Realm from which to obtain the clients. + :return: The client realm roles scope. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) + try: + self._request(client_role_scope_url, method='DELETE', data=json.dumps(payload)) + + except Exception as e: + self.fail_request(e, msg=f'Could not delete roles scope for client {clientid} in realm {realm}: {e}') + + return self.get_client_role_scope_from_realm(clientid, realm) + + def fail_request(self, e, msg, **kwargs): + """ Triggers a module failure. This should be called + when an exception occurs during/after a request. + Attempts to parse the exception e as an HTTP error + and append it to msg. + + :param e: exception which triggered the failure + :param msg: error message to display to the user + :param kwargs: additional arguments to pass to module.fail_json + :return: None + """ + try: + if isinstance(e, HTTPError): + msg = f"{msg}: {to_native(e.read())}" + except Exception: + pass + self.module.fail_json(msg, **kwargs) + + def fail_open_url(self, e, msg, **kwargs): + """ DEPRECATED: Use fail_request instead. + + Triggers a module failure. This should be called + when an exception occurs during/after a request. + Attempts to parse the exception e as an HTTP error + and append it to msg. + + :param e: exception which triggered the failure + :param msg: error message to display to the user + :param kwargs: additional arguments to pass to module.fail_json + :return: None + """ + return self.fail_request(e, msg, **kwargs) diff --git a/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py b/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py new file mode 100644 index 0000000000..2118e8f6e2 --- /dev/null +++ b/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +# Copyright (c) 2022, John Cant +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import \ + keycloak_argument_spec + + +def keycloak_clientsecret_module(): + """ + Returns an AnsibleModule definition for modules that interact with a client + secret. + + :return: argument_spec dict + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + realm=dict(default='master'), + id=dict(type='str'), + client_id=dict(type='str', aliases=['clientId']), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'client_id'], + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + mutually_exclusive=[ + ['token', 'auth_realm'], + ['token', 'auth_username'], + ['token', 'auth_password'] + ]) + + return module + + +def keycloak_clientsecret_module_resolve_params(module, kc): + """ + Given an AnsibleModule definition for keycloak_clientsecret_*, and a + KeycloakAPI client, resolve the params needed to interact with the Keycloak + client secret, looking up the client by clientId if necessary via an API + call. + + :return: tuple of id, realm + """ + + realm = module.params.get('realm') + id = module.params.get('id') + client_id = module.params.get('client_id') + + # only lookup the client_id if id isn't provided. + # in the case that both are provided, prefer the ID, since it is one + # less lookup. + if id is None: + # Due to the required_one_of spec, client_id is guaranteed to not be None + client = kc.get_client_by_clientid(client_id, realm=realm) + + if client is None: + module.fail_json( + msg=f'Client does not exist {client_id}' + ) + + id = client['id'] + + return id, realm diff --git a/plugins/module_utils/ilo_redfish_utils.py b/plugins/module_utils/ilo_redfish_utils.py index 04b08ae52f..fd5b7fe64d 100644 --- a/plugins/module_utils/ilo_redfish_utils.py +++ b/plugins/module_utils/ilo_redfish_utils.py @@ -1,12 +1,12 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +import time class iLORedfishUtils(RedfishUtils): @@ -20,20 +20,20 @@ class iLORedfishUtils(RedfishUtils): properties = ['Description', 'Id', 'Name', 'UserName'] # Changed self.sessions_uri to Hardcoded string. - response = self.get_request( - self.root_uri + self.service_root + "SessionService/Sessions/") + response = self.get_request(f"{self.root_uri}{self.service_root}SessionService/Sessions/") if not response['ret']: return response result['ret'] = True data = response['data'] + current_session = None if 'Oem' in data: if data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]: current_session = data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"] - for sessions in data[u'Members']: + for sessions in data['Members']: # session_list[] are URIs - session_list.append(sessions[u'@odata.id']) + session_list.append(sessions['@odata.id']) # for each session, get details for uri in session_list: session = {} @@ -82,19 +82,18 @@ class iLORedfishUtils(RedfishUtils): if not res_dhv6['ret']: return res_dhv6 - datetime_uri = self.manager_uri + "DateTime" + datetime_uri = f"{self.manager_uri}DateTime" - response = self.get_request(self.root_uri + datetime_uri) - if not response['ret']: - return response + listofips = mgr_attributes['mgr_attr_value'].split(" ") + if len(listofips) > 2: + return {'ret': False, 'changed': False, 'msg': "More than 2 NTP Servers mentioned"} - data = response['data'] + ntp_list = [] + for ips in listofips: + ntp_list.append(ips) - ntp_list = data[setkey] - if(len(ntp_list) == 2): - ntp_list.pop(0) - - ntp_list.append(mgr_attributes['mgr_attr_value']) + while len(ntp_list) < 2: + ntp_list.append("0.0.0.0") payload = {setkey: ntp_list} @@ -102,12 +101,12 @@ class iLORedfishUtils(RedfishUtils): if not response1['ret']: return response1 - return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgr_attributes['mgr_attr_name']} + return {'ret': True, 'changed': True, 'msg': f"Modified {mgr_attributes['mgr_attr_name']}"} def set_time_zone(self, attr): key = attr['mgr_attr_name'] - uri = self.manager_uri + "DateTime/" + uri = f"{self.manager_uri}DateTime/" response = self.get_request(self.root_uri + uri) if not response['ret']: return response @@ -115,7 +114,7 @@ class iLORedfishUtils(RedfishUtils): data = response["data"] if key not in data: - return {'ret': False, 'changed': False, 'msg': "Key %s not found" % key} + return {'ret': False, 'changed': False, 'msg': f"Key {key} not found"} timezones = data["TimeZoneList"] index = "" @@ -129,25 +128,23 @@ class iLORedfishUtils(RedfishUtils): if not response['ret']: return response - return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']} + return {'ret': True, 'changed': True, 'msg': f"Modified {attr['mgr_attr_name']}"} def set_dns_server(self, attr): key = attr['mgr_attr_name'] nic_info = self.get_manager_ethernet_uri() uri = nic_info["nic_addr"] - response = self.get_request(self.root_uri + uri) - if not response['ret']: - return response + listofips = attr['mgr_attr_value'].split(" ") + if len(listofips) > 3: + return {'ret': False, 'changed': False, 'msg': "More than 3 DNS Servers mentioned"} - data = response['data'] + dns_list = [] + for ips in listofips: + dns_list.append(ips) - dns_list = data["Oem"]["Hpe"]["IPv4"][key] - - if len(dns_list) == 3: - dns_list.pop(0) - - dns_list.append(attr['mgr_attr_value']) + while len(dns_list) < 3: + dns_list.append("0.0.0.0") payload = { "Oem": { @@ -163,7 +160,7 @@ class iLORedfishUtils(RedfishUtils): if not response['ret']: return response - return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']} + return {'ret': True, 'changed': True, 'msg': f"Modified {attr['mgr_attr_name']}"} def set_domain_name(self, attr): key = attr['mgr_attr_name'] @@ -208,7 +205,7 @@ class iLORedfishUtils(RedfishUtils): response = self.patch_request(self.root_uri + ethuri, payload) if not response['ret']: return response - return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']} + return {'ret': True, 'changed': True, 'msg': f"Modified {attr['mgr_attr_name']}"} def set_wins_registration(self, mgrattr): Key = mgrattr['mgr_attr_name'] @@ -229,4 +226,80 @@ class iLORedfishUtils(RedfishUtils): response = self.patch_request(self.root_uri + ethuri, payload) if not response['ret']: return response - return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgrattr['mgr_attr_name']} + return {'ret': True, 'changed': True, 'msg': f"Modified {mgrattr['mgr_attr_name']}"} + + def get_server_poststate(self): + # Get server details + response = self.get_request(self.root_uri + self.systems_uri) + if not response["ret"]: + return response + server_data = response["data"] + + if "Hpe" in server_data["Oem"]: + return { + "ret": True, + "server_poststate": server_data["Oem"]["Hpe"]["PostState"] + } + else: + return { + "ret": True, + "server_poststate": server_data["Oem"]["Hp"]["PostState"] + } + + def wait_for_ilo_reboot_completion(self, polling_interval=60, max_polling_time=1800): + # This method checks if OOB controller reboot is completed + time.sleep(10) + + # Check server poststate + state = self.get_server_poststate() + if not state["ret"]: + return state + + count = int(max_polling_time / polling_interval) + times = 0 + + # When server is powered OFF + pcount = 0 + while state["server_poststate"] in ["PowerOff", "Off"] and pcount < 5: + time.sleep(10) + state = self.get_server_poststate() + if not state["ret"]: + return state + + if state["server_poststate"] not in ["PowerOff", "Off"]: + break + pcount = pcount + 1 + if state["server_poststate"] in ["PowerOff", "Off"]: + return { + "ret": False, + "changed": False, + "msg": "Server is powered OFF" + } + + # When server is not rebooting + if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]: + return { + "ret": True, + "changed": False, + "msg": "Server is not rebooting" + } + + while state["server_poststate"] not in ["InPostDiscoveryComplete", "FinishedPost"] and count > times: + state = self.get_server_poststate() + if not state["ret"]: + return state + + if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]: + return { + "ret": True, + "changed": True, + "msg": "Server reboot is completed" + } + time.sleep(polling_interval) + times = times + 1 + + return { + "ret": False, + "changed": False, + "msg": f"Server Reboot has failed, server state: {state} " + } diff --git a/plugins/module_utils/influxdb.py b/plugins/module_utils/influxdb.py index c171131a95..9eed90cfda 100644 --- a/plugins/module_utils/influxdb.py +++ b/plugins/module_utils/influxdb.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2017, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import traceback @@ -14,7 +13,7 @@ from ansible_collections.community.general.plugins.module_utils.version import L REQUESTS_IMP_ERR = None try: - import requests.exceptions + import requests.exceptions # noqa: F401, pylint: disable=unused-import HAS_REQUESTS = True except ImportError: REQUESTS_IMP_ERR = traceback.format_exc() @@ -24,7 +23,7 @@ INFLUXDB_IMP_ERR = None try: from influxdb import InfluxDBClient from influxdb import __version__ as influxdb_version - from influxdb import exceptions + from influxdb import exceptions # noqa: F401, pylint: disable=unused-import HAS_INFLUXDB = True except ImportError: INFLUXDB_IMP_ERR = traceback.format_exc() diff --git a/plugins/module_utils/ipa.py b/plugins/module_utils/ipa.py index 3d8c2580d8..96010d503b 100644 --- a/plugins/module_utils/ipa.py +++ b/plugins/module_utils/ipa.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -7,10 +6,10 @@ # # Copyright (c) 2016 Thomas Krahn (@Nosmoht) # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json import os @@ -19,10 +18,9 @@ import uuid import re from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text -from ansible.module_utils.six import PY3 -from ansible.module_utils.six.moves.urllib.parse import quote from ansible.module_utils.urls import fetch_url, HAS_GSSAPI from ansible.module_utils.basic import env_fallback, AnsibleFallbackNotFound +from urllib.parse import quote def _env_then_dns_fallback(*args, **kwargs): @@ -53,16 +51,16 @@ class IPAClient(object): self.use_gssapi = False def get_base_url(self): - return '%s://%s/ipa' % (self.protocol, self.host) + return f'{self.protocol}://{self.host}/ipa' def get_json_url(self): - return '%s/session/json' % self.get_base_url() + return f'{self.get_base_url()}/session/json' def login(self, username, password): if 'KRB5CCNAME' in os.environ and HAS_GSSAPI: self.use_gssapi = True elif 'KRB5_CLIENT_KTNAME' in os.environ and HAS_GSSAPI: - ccache = "MEMORY:" + str(uuid.uuid4()) + ccache = f"MEMORY:{uuid.uuid4()!s}" os.environ['KRB5CCNAME'] = ccache self.use_gssapi = True else: @@ -73,8 +71,8 @@ class IPAClient(object): 'GSSAPI. To use GSSAPI, please set the ' 'KRB5_CLIENT_KTNAME or KRB5CCNAME (or both) ' ' environment variables.') - url = '%s/session/login_password' % self.get_base_url() - data = 'user=%s&password=%s' % (quote(username, safe=''), quote(password, safe='')) + url = f'{self.get_base_url()}/session/login_password' + data = f"user={quote(username, safe='')}&password={quote(password, safe='')}" headers = {'referer': self.get_base_url(), 'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain'} @@ -99,11 +97,11 @@ class IPAClient(object): err_string = e.get('message') else: err_string = e - self.module.fail_json(msg='%s: %s' % (msg, err_string)) + self.module.fail_json(msg=f'{msg}: {err_string}') def get_ipa_version(self): response = self.ping()['summary'] - ipa_ver_regex = re.compile(r'IPA server version (\d\.\d\.\d).*') + ipa_ver_regex = re.compile(r'IPA server version (\d+\.\d+\.\d+).*') version_match = ipa_ver_regex.match(response) ipa_version = None if version_match: @@ -116,7 +114,7 @@ class IPAClient(object): def _post_json(self, method, name, item=None): if item is None: item = {} - url = '%s/session/json' % self.get_base_url() + url = f'{self.get_base_url()}/session/json' data = dict(method=method) # TODO: We should probably handle this a little better. @@ -134,20 +132,13 @@ class IPAClient(object): if status_code not in [200, 201, 204]: self._fail(method, info['msg']) except Exception as e: - self._fail('post %s' % method, to_native(e)) + self._fail(f'post {method}', to_native(e)) - if PY3: - charset = resp.headers.get_content_charset('latin-1') - else: - response_charset = resp.headers.getparam('charset') - if response_charset: - charset = response_charset - else: - charset = 'latin-1' + charset = resp.headers.get_content_charset('latin-1') resp = json.loads(to_text(resp.read(), encoding=charset)) err = resp.get('error') if err is not None: - self._fail('response %s' % method, err) + self._fail(f'response {method}', err) if 'result' in resp: result = resp.get('result') diff --git a/plugins/module_utils/jenkins.py b/plugins/module_utils/jenkins.py new file mode 100644 index 0000000000..26334f89b8 --- /dev/null +++ b/plugins/module_utils/jenkins.py @@ -0,0 +1,33 @@ + +# Copyright (c) 2022, Alexei Znamensky +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +import os +import time + + +def download_updates_file(updates_expiration): + updates_filename = 'jenkins-plugin-cache.json' + updates_dir = os.path.expanduser('~/.ansible/tmp') + updates_file = os.path.join(updates_dir, updates_filename) + download_updates = True + + # Make sure the destination directory exists + if not os.path.isdir(updates_dir): + os.makedirs(updates_dir, 0o700) + + # Check if we need to download new updates file + if os.path.isfile(updates_file): + # Get timestamp when the file was changed last time + ts_file = os.stat(updates_file).st_mtime + ts_now = time.time() + + if ts_now - ts_file < updates_expiration: + download_updates = False + + return updates_file, download_updates diff --git a/plugins/module_utils/known_hosts.py b/plugins/module_utils/known_hosts.py index ea6c95b6e2..ec20b8d88b 100644 --- a/plugins/module_utils/known_hosts.py +++ b/plugins/module_utils/known_hosts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -7,16 +6,16 @@ # # Copyright (c), Michael DeHaan , 2012-2013 # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import hmac import re -from ansible.module_utils.six.moves.urllib.parse import urlparse +from urllib.parse import urlparse try: from hashlib import sha1 @@ -59,17 +58,14 @@ def get_fqdn_and_port(repo_url): elif "://" in repo_url: # this should be something we can parse with urlparse parts = urlparse(repo_url) - # parts[1] will be empty on python2.4 on ssh:// or git:// urls, so - # ensure we actually have a parts[1] before continuing. - if parts[1] != '': - fqdn = parts[1] - if "@" in fqdn: - fqdn = fqdn.split("@", 1)[1] - match = ipv6_re.match(fqdn) - if match: - fqdn, port = match.groups() - elif ":" in fqdn: - fqdn, port = fqdn.split(":")[0:2] + fqdn = parts[1] + if "@" in fqdn: + fqdn = fqdn.split("@", 1)[1] + match = ipv6_re.match(fqdn) + if match: + fqdn, port = match.groups() + elif ":" in fqdn: + fqdn, port = fqdn.split(":")[0:2] return fqdn, port @@ -102,13 +98,11 @@ def not_in_host_file(self, host): continue try: - host_fh = open(hf) + with open(hf) as host_fh: + data = host_fh.read() except IOError: hfiles_not_found += 1 continue - else: - data = host_fh.read() - host_fh.close() for line in data.split("\n"): if line is None or " " not in line: @@ -152,28 +146,28 @@ def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False): try: os.makedirs(user_ssh_dir, int('700', 8)) except Exception: - module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir) + module.fail_json(msg=f"failed to create host key directory: {user_ssh_dir}") else: - module.fail_json(msg="%s does not exist" % user_ssh_dir) + module.fail_json(msg=f"{user_ssh_dir} does not exist") elif not os.path.isdir(user_ssh_dir): - module.fail_json(msg="%s is not a directory" % user_ssh_dir) + module.fail_json(msg=f"{user_ssh_dir} is not a directory") if port: - this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn) + this_cmd = f"{keyscan_cmd} -t {key_type} -p {port} {fqdn}" else: - this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn) + this_cmd = f"{keyscan_cmd} -t {key_type} {fqdn}" rc, out, err = module.run_command(this_cmd) # ssh-keyscan gives a 0 exit code and prints nothing on timeout if rc != 0 or not out: msg = 'failed to retrieve hostkey' if not out: - msg += '. "%s" returned no matches.' % this_cmd + msg += f'. "{this_cmd}" returned no matches.' else: - msg += ' using command "%s". [stdout]: %s' % (this_cmd, out) + msg += f' using command "{this_cmd}". [stdout]: {out}' if err: - msg += ' [stderr]: %s' % err + msg += f' [stderr]: {err}' module.fail_json(msg=msg) diff --git a/plugins/module_utils/ldap.py b/plugins/module_utils/ldap.py index 30dbaf7640..e0ee5940e2 100644 --- a/plugins/module_utils/ldap.py +++ b/plugins/module_utils/ldap.py @@ -1,19 +1,21 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Peter Sagerson -# Copyright: (c) 2016, Jiri Tyr -# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) +# Copyright (c) 2016, Peter Sagerson +# Copyright (c) 2016, Jiri Tyr +# Copyright (c) 2017-2018 Keller Fuchs (@KellerFuchs) # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations +import re import traceback from ansible.module_utils.common.text.converters import to_native try: import ldap + import ldap.dn + import ldap.filter import ldap.sasl HAS_LDAP = True @@ -30,33 +32,50 @@ def gen_specs(**specs): specs.update({ 'bind_dn': dict(), 'bind_pw': dict(default='', no_log=True), + 'ca_path': dict(type='path'), 'dn': dict(required=True), 'referrals_chasing': dict(type='str', default='anonymous', choices=['disabled', 'anonymous']), 'server_uri': dict(default='ldapi:///'), 'start_tls': dict(default=False, type='bool'), 'validate_certs': dict(default=True, type='bool'), 'sasl_class': dict(choices=['external', 'gssapi'], default='external', type='str'), + 'xorder_discovery': dict(choices=['enable', 'auto', 'disable'], default='auto', type='str'), + 'client_cert': dict(default=None, type='path'), + 'client_key': dict(default=None, type='path'), }) return specs +def ldap_required_together(): + return [['client_cert', 'client_key']] + + class LdapGeneric(object): def __init__(self, module): # Shortcuts self.module = module self.bind_dn = self.module.params['bind_dn'] self.bind_pw = self.module.params['bind_pw'] - self.dn = self.module.params['dn'] + self.ca_path = self.module.params['ca_path'] self.referrals_chasing = self.module.params['referrals_chasing'] self.server_uri = self.module.params['server_uri'] self.start_tls = self.module.params['start_tls'] self.verify_cert = self.module.params['validate_certs'] self.sasl_class = self.module.params['sasl_class'] + self.xorder_discovery = self.module.params['xorder_discovery'] + self.client_cert = self.module.params['client_cert'] + self.client_key = self.module.params['client_key'] # Establish connection self.connection = self._connect_to_ldap() + if self.xorder_discovery == "enable" or (self.xorder_discovery == "auto" and not self._xorder_dn()): + # Try to find the X_ORDERed version of the DN + self.dn = self._find_dn() + else: + self.dn = self.module.params['dn'] + def fail(self, msg, exn): self.module.fail_json( msg=msg, @@ -64,10 +83,35 @@ class LdapGeneric(object): exception=traceback.format_exc() ) + def _find_dn(self): + dn = self.module.params['dn'] + + explode_dn = ldap.dn.explode_dn(dn) + + if len(explode_dn) > 1: + try: + escaped_value = ldap.filter.escape_filter_chars(explode_dn[0]) + filterstr = f"({escaped_value})" + dns = self.connection.search_s(','.join(explode_dn[1:]), + ldap.SCOPE_ONELEVEL, filterstr) + if len(dns) == 1: + dn, dummy = dns[0] + except Exception: + pass + + return dn + def _connect_to_ldap(self): if not self.verify_cert: ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) + if self.ca_path: + ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, self.ca_path) + + if self.client_cert and self.client_key: + ldap.set_option(ldap.OPT_X_TLS_CERTFILE, self.client_cert) + ldap.set_option(ldap.OPT_X_TLS_KEYFILE, self.client_key) + connection = ldap.initialize(self.server_uri) if self.referrals_chasing == 'disabled': @@ -90,3 +134,10 @@ class LdapGeneric(object): self.fail("Cannot bind to the server.", e) return connection + + def _xorder_dn(self): + # match X_ORDERed DNs + regex = r".+\{\d+\}.+" + explode_dn = ldap.dn.explode_dn(self.module.params['dn']) + + return re.match(regex, explode_dn[0]) is not None diff --git a/plugins/module_utils/linode.py b/plugins/module_utils/linode.py index 9d7c37e68d..3700082bd8 100644 --- a/plugins/module_utils/linode.py +++ b/plugins/module_utils/linode.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -7,16 +6,14 @@ # # Copyright (c), Luke Murphy @decentral1se # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +from ansible.module_utils.ansible_release import __version__ as ansible_version def get_user_agent(module): """Retrieve a user-agent to send with LinodeClient requests.""" - try: - from ansible.module_utils.ansible_release import __version__ as ansible_version - except ImportError: - ansible_version = 'unknown' - return 'Ansible-%s/%s' % (module, ansible_version) + return f'Ansible-{module}/{ansible_version}' diff --git a/plugins/module_utils/locale_gen.py b/plugins/module_utils/locale_gen.py new file mode 100644 index 0000000000..b8a48d320b --- /dev/null +++ b/plugins/module_utils/locale_gen.py @@ -0,0 +1,29 @@ +# Copyright (c) 2023, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def locale_runner(module): + runner = CmdRunner( + module, + command=["locale", "-a"], + check_rc=True, + ) + return runner + + +def locale_gen_runner(module): + runner = CmdRunner( + module, + command="locale-gen", + arg_formats=dict( + name=cmd_runner_fmt.as_list(), + purge=cmd_runner_fmt.as_fixed('--purge'), + ), + check_rc=True, + ) + return runner diff --git a/plugins/module_utils/lxd.py b/plugins/module_utils/lxd.py index e25caf11f3..cc8e05c0f0 100644 --- a/plugins/module_utils/lxd.py +++ b/plugins/module_utils/lxd.py @@ -1,33 +1,25 @@ -# -*- coding: utf-8 -*- -# (c) 2016, Hiroaki Nakamura -# -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2016, Hiroaki Nakamura +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations +import http.client as http_client +import os import socket import ssl +import json +from urllib.parse import urlparse from ansible.module_utils.urls import generic_urlparse -from ansible.module_utils.six.moves.urllib.parse import urlparse -from ansible.module_utils.six.moves import http_client from ansible.module_utils.common.text.converters import to_text # httplib/http.client connection using unix domain socket HTTPConnection = http_client.HTTPConnection HTTPSConnection = http_client.HTTPSConnection -import json - class UnixHTTPConnection(HTTPConnection): def __init__(self, path): @@ -47,7 +39,7 @@ class LXDClientException(Exception): class LXDClient(object): - def __init__(self, url, key_file=None, cert_file=None, debug=False): + def __init__(self, url, key_file=None, cert_file=None, debug=False, server_cert_file=None, server_check_hostname=True): """LXD Client. :param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1) @@ -58,6 +50,10 @@ class LXDClient(object): :type cert_file: ``str`` :param debug: The debug flag. The request and response are stored in logs when debug is true. :type debug: ``bool`` + :param server_cert_file: The path of the server certificate file. + :type server_cert_file: ``str`` + :param server_check_hostname: Whether to check the server's hostname as part of TLS verification. + :type debug: ``bool`` """ self.url = url self.debug = debug @@ -66,7 +62,11 @@ class LXDClient(object): self.cert_file = cert_file self.key_file = key_file parts = generic_urlparse(urlparse(self.url)) - ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) + ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) + if server_cert_file: + # Check that the received cert is signed by the provided server_cert_file + ctx.load_verify_locations(cafile=server_cert_file) + ctx.check_hostname = server_check_hostname ctx.load_cert_chain(cert_file, keyfile=key_file) self.connection = HTTPSConnection(parts.get('netloc'), context=ctx) elif url.startswith('unix:'): @@ -78,7 +78,7 @@ class LXDClient(object): def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None, wait_for_container=None): resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout) if resp_json['type'] == 'async': - url = '{0}/wait'.format(resp_json['operation']) + url = f"{resp_json['operation']}/wait" resp_json = self._send_request('GET', url) if wait_for_container: while resp_json['metadata']['status'] == 'Running': @@ -130,3 +130,11 @@ class LXDClient(object): if err is None: err = resp_json.get('error', None) return err + + +def default_key_file(): + return os.path.expanduser('~/.config/lxc/client.key') + + +def default_cert_file(): + return os.path.expanduser('~/.config/lxc/client.crt') diff --git a/plugins/module_utils/manageiq.py b/plugins/module_utils/manageiq.py index 98e5590cc6..4b89fd78cc 100644 --- a/plugins/module_utils/manageiq.py +++ b/plugins/module_utils/manageiq.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2017, Daniel Korn # @@ -8,10 +7,10 @@ # still belong to the author of the module, and may assign their own license # to the complete work. # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os @@ -94,12 +93,12 @@ class ManageIQ(object): ca_bundle_path = params['ca_cert'] self._module = module - self._api_url = url + '/api' + self._api_url = f"{url}/api" self._auth = dict(user=username, password=password, token=token) try: self._client = ManageIQClient(self._api_url, self._auth, verify_ssl=verify_ssl, ca_bundle_path=ca_bundle_path) except Exception as e: - self.module.fail_json(msg="failed to open connection (%s): %s" % (url, str(e))) + self.module.fail_json(msg=f"failed to open connection ({url}): {e}") @property def module(self): @@ -139,7 +138,7 @@ class ManageIQ(object): except ValueError: return None except Exception as e: - self.module.fail_json(msg="failed to find resource {error}".format(error=e)) + self.module.fail_json(msg=f"failed to find resource {e}") return vars(entity) def find_collection_resource_or_fail(self, collection_name, **params): @@ -152,6 +151,290 @@ class ManageIQ(object): if resource: return resource else: - msg = "{collection_name} where {params} does not exist in manageiq".format( - collection_name=collection_name, params=str(params)) + msg = f"{collection_name} where {params} does not exist in manageiq" self.module.fail_json(msg=msg) + + def policies(self, resource_id, resource_type, resource_name): + manageiq = ManageIQ(self.module) + + # query resource id, fail if resource does not exist + if resource_id is None: + resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id'] + + return ManageIQPolicies(manageiq, resource_type, resource_id) + + def query_resource_id(self, resource_type, resource_name): + """ Query the resource name in ManageIQ. + + Returns: + the resource ID if it exists in ManageIQ, Fail otherwise. + """ + resource = self.find_collection_resource_by(resource_type, name=resource_name) + if resource: + return resource["id"] + else: + msg = f"{resource_name} {resource_type} does not exist in manageiq" + self.module.fail_json(msg=msg) + + +class ManageIQPolicies(object): + """ + Object to execute policies management operations of manageiq resources. + """ + + def __init__(self, manageiq, resource_type, resource_id): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + self.resource_type = resource_type + self.resource_id = resource_id + self.resource_url = f'{self.api_url}/{resource_type}/{resource_id}' + + def query_profile_href(self, profile): + """ Add or Update the policy_profile href field + + Example: + {name: STR, ...} => {name: STR, href: STR} + """ + resource = self.manageiq.find_collection_resource_or_fail( + "policy_profiles", **profile) + return dict(name=profile['name'], href=resource['href']) + + def query_resource_profiles(self): + """ Returns a set of the profile objects objects assigned to the resource + """ + url = '{resource_url}/policy_profiles?expand=resources' + try: + response = self.client.get(url.format(resource_url=self.resource_url)) + except Exception as e: + msg = f"Failed to query {self.resource_type} policies: {e}" + self.module.fail_json(msg=msg) + + resources = response.get('resources', []) + + # clean the returned rest api profile object to look like: + # {profile_name: STR, profile_description: STR, policies: ARR} + profiles = [self.clean_profile_object(profile) for profile in resources] + + return profiles + + def query_profile_policies(self, profile_id): + """ Returns a set of the policy objects assigned to the resource + """ + url = '{api_url}/policy_profiles/{profile_id}?expand=policies' + try: + response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id)) + except Exception as e: + msg = f"Failed to query {self.resource_type} policies: {e}" + self.module.fail_json(msg=msg) + + resources = response.get('policies', []) + + # clean the returned rest api policy object to look like: + # {name: STR, description: STR, active: BOOL} + policies = [self.clean_policy_object(policy) for policy in resources] + + return policies + + def clean_policy_object(self, policy): + """ Clean a policy object to have human readable form of: + { + name: STR, + description: STR, + active: BOOL + } + """ + name = policy.get('name') + description = policy.get('description') + active = policy.get('active') + + return dict( + name=name, + description=description, + active=active) + + def clean_profile_object(self, profile): + """ Clean a profile object to have human readable form of: + { + profile_name: STR, + profile_description: STR, + policies: ARR + } + """ + profile_id = profile['id'] + name = profile.get('name') + description = profile.get('description') + policies = self.query_profile_policies(profile_id) + + return dict( + profile_name=name, + profile_description=description, + policies=policies) + + def profiles_to_update(self, profiles, action): + """ Create a list of policies we need to update in ManageIQ. + + Returns: + Whether or not a change took place and a message describing the + operation executed. + """ + profiles_to_post = [] + assigned_profiles = self.query_resource_profiles() + + # make a list of assigned full profile names strings + # e.g. ['openscap profile', ...] + assigned_profiles_set = set(profile['profile_name'] for profile in assigned_profiles) + + for profile in profiles: + assigned = profile.get('name') in assigned_profiles_set + + if (action == 'unassign' and assigned) or (action == 'assign' and not assigned): + # add/update the policy profile href field + # {name: STR, ...} => {name: STR, href: STR} + profile = self.query_profile_href(profile) + profiles_to_post.append(profile) + + return profiles_to_post + + def assign_or_unassign_profiles(self, profiles, action): + """ Perform assign/unassign action + """ + # get a list of profiles needed to be changed + profiles_to_post = self.profiles_to_update(profiles, action) + if not profiles_to_post: + return dict( + changed=False, + msg=f"Profiles {profiles} already {action}ed, nothing to do") + + # try to assign or unassign profiles to resource + url = f'{self.resource_url}/policy_profiles' + try: + response = self.client.post(url, action=action, resources=profiles_to_post) + except Exception as e: + msg = f"Failed to {action} profile: {e}" + self.module.fail_json(msg=msg) + + # check all entities in result to be successful + for result in response['results']: + if not result['success']: + msg = f"Failed to {action}: {result['message']}" + self.module.fail_json(msg=msg) + + # successfully changed all needed profiles + return dict( + changed=True, + msg=f"Successfully {action}ed profiles: {profiles}") + + +class ManageIQTags(object): + """ + Object to execute tags management operations of manageiq resources. + """ + + def __init__(self, manageiq, resource_type, resource_id): + self.manageiq = manageiq + + self.module = self.manageiq.module + self.api_url = self.manageiq.api_url + self.client = self.manageiq.client + + self.resource_type = resource_type + self.resource_id = resource_id + self.resource_url = f'{self.api_url}/{resource_type}/{resource_id}' + + def full_tag_name(self, tag): + """ Returns the full tag name in manageiq + """ + return f"/managed/{tag['category']}/{tag['name']}" + + def clean_tag_object(self, tag): + """ Clean a tag object to have human readable form of: + { + full_name: STR, + name: STR, + display_name: STR, + category: STR + } + """ + full_name = tag.get('name') + categorization = tag.get('categorization', {}) + + return dict( + full_name=full_name, + name=categorization.get('name'), + display_name=categorization.get('display_name'), + category=categorization.get('category', {}).get('name')) + + def query_resource_tags(self): + """ Returns a set of the tag objects assigned to the resource + """ + url = '{resource_url}/tags?expand=resources&attributes=categorization' + try: + response = self.client.get(url.format(resource_url=self.resource_url)) + except Exception as e: + msg = f"Failed to query {self.resource_type} tags: {e}" + self.module.fail_json(msg=msg) + + resources = response.get('resources', []) + + # clean the returned rest api tag object to look like: + # {full_name: STR, name: STR, display_name: STR, category: STR} + tags = [self.clean_tag_object(tag) for tag in resources] + + return tags + + def tags_to_update(self, tags, action): + """ Create a list of tags we need to update in ManageIQ. + + Returns: + Whether or not a change took place and a message describing the + operation executed. + """ + tags_to_post = [] + assigned_tags = self.query_resource_tags() + + # make a list of assigned full tag names strings + # e.g. ['/managed/environment/prod', ...] + assigned_tags_set = set(tag['full_name'] for tag in assigned_tags) + + for tag in tags: + assigned = self.full_tag_name(tag) in assigned_tags_set + + if assigned and action == 'unassign': + tags_to_post.append(tag) + elif (not assigned) and action == 'assign': + tags_to_post.append(tag) + + return tags_to_post + + def assign_or_unassign_tags(self, tags, action): + """ Perform assign/unassign action + """ + # get a list of tags needed to be changed + tags_to_post = self.tags_to_update(tags, action) + if not tags_to_post: + return dict( + changed=False, + msg=f"Tags already {action}ed, nothing to do") + + # try to assign or unassign tags to resource + url = f'{self.resource_url}/tags' + try: + response = self.client.post(url, action=action, resources=tags) + except Exception as e: + msg = f"Failed to {action} tag: {e}" + self.module.fail_json(msg=msg) + + # check all entities in result to be successful + for result in response['results']: + if not result['success']: + msg = f"Failed to {action}: {result['message']}" + self.module.fail_json(msg=msg) + + # successfully changed all needed tags + return dict( + changed=True, + msg=f"Successfully {action}ed tags") diff --git a/plugins/module_utils/memset.py b/plugins/module_utils/memset.py index 7813290a72..cbfbc9108a 100644 --- a/plugins/module_utils/memset.py +++ b/plugins/module_utils/memset.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -7,14 +6,15 @@ # # Copyright (c) 2018, Simon Weald # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import open_url, urllib_error +from urllib.parse import urlencode +from ansible.module_utils.urls import open_url from ansible.module_utils.basic import json +import urllib.error as urllib_error class Response(object): @@ -25,6 +25,7 @@ class Response(object): def __init__(self): self.content = None self.status_code = None + self.stderr = None def json(self): return json.loads(self.content) @@ -54,7 +55,7 @@ def memset_api_call(api_key, api_method, payload=None): data = urlencode(payload) headers = {'Content-Type': 'application/x-www-form-urlencoded'} api_uri_base = 'https://api.memset.com/v1/json/' - api_uri = '{0}{1}/' . format(api_uri_base, api_method) + api_uri = f'{api_uri_base}{api_method}/' try: resp = open_url(api_uri, data=data, headers=headers, method="POST", force_basic_auth=True, url_username=api_key) @@ -71,14 +72,18 @@ def memset_api_call(api_key, api_method, payload=None): response.status_code = errorcode if response.status_code is not None: - msg = "Memset API returned a {0} response ({1}, {2})." . format(response.status_code, response.json()['error_type'], response.json()['error']) + msg = f"Memset API returned a {response.status_code} response ({response.json()['error_type']}, {response.json()['error']})." else: - msg = "Memset API returned an error ({0}, {1})." . format(response.json()['error_type'], response.json()['error']) + msg = f"Memset API returned an error ({response.json()['error_type']}, {response.json()['error']})." + except urllib_error.URLError as e: + has_failed = True + msg = f"An URLError occurred ({type(e)})." + response.stderr = f"{e}" if msg is None: msg = response.json() - return(has_failed, msg, response) + return has_failed, msg, response def check_zone_domain(data, domain): @@ -92,7 +97,7 @@ def check_zone_domain(data, domain): if zone_domain['domain'] == domain: exists = True - return(exists) + return exists def check_zone(data, name): @@ -109,7 +114,7 @@ def check_zone(data, name): if counter == 1: exists = True - return(exists, counter) + return exists, counter def get_zone_id(zone_name, current_zones): @@ -135,4 +140,4 @@ def get_zone_id(zone_name, current_zones): zone_id = None msg = 'Zone ID could not be returned as duplicate zone names were detected' - return(zone_exists, msg, counter, zone_id) + return zone_exists, msg, counter, zone_id diff --git a/plugins/module_utils/mh/base.py b/plugins/module_utils/mh/base.py index 0871a527be..688d65fc35 100644 --- a/plugins/module_utils/mh/base.py +++ b/plugins/module_utils/mh/base.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2020, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException as _MHE @@ -15,7 +14,7 @@ class ModuleHelperBase(object): module = None ModuleHelperException = _MHE _delegated_to_module = ( - 'check_mode', 'get_bin_path', 'warn', 'deprecate', + 'check_mode', 'get_bin_path', 'warn', 'deprecate', 'debug', ) def __init__(self, module=None): @@ -31,13 +30,17 @@ class ModuleHelperBase(object): def diff_mode(self): return self.module._diff + @property + def verbosity(self): + return self.module._verbosity + def do_raise(self, *args, **kwargs): raise _MHE(*args, **kwargs) def __getattr__(self, attr): if attr in self._delegated_to_module: return getattr(self.module, attr) - raise AttributeError("ModuleHelperBase has no attribute '%s'" % (attr, )) + raise AttributeError(f"ModuleHelperBase has no attribute '{attr}'") def __init_module__(self): pass diff --git a/plugins/module_utils/mh/deco.py b/plugins/module_utils/mh/deco.py index 62d460b4e6..0be576ccfa 100644 --- a/plugins/module_utils/mh/deco.py +++ b/plugins/module_utils/mh/deco.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2020, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import traceback from functools import wraps @@ -12,23 +11,21 @@ from functools import wraps from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException -def cause_changes(on_success=None, on_failure=None): - +def cause_changes(when=None): def deco(func): - if on_success is None and on_failure is None: - return func - @wraps(func) - def wrapper(*args, **kwargs): + def wrapper(self, *args, **kwargs): try: - self = args[0] - func(*args, **kwargs) - if on_success is not None: - self.changed = on_success + func(self, *args, **kwargs) + if when == "success": + self.changed = True except Exception: - if on_failure is not None: - self.changed = on_failure + if when == "failure": + self.changed = True raise + finally: + if when == "always": + self.changed = True return wrapper @@ -36,21 +33,32 @@ def cause_changes(on_success=None, on_failure=None): def module_fails_on_exception(func): + conflict_list = ('msg', 'exception', 'output', 'vars', 'changed') + @wraps(func) def wrapper(self, *args, **kwargs): + def fix_key(k): + return k if k not in conflict_list else f"_{k}" + + def fix_var_conflicts(output): + result = {fix_key(k): v for k, v in output.items()} + return result + try: func(self, *args, **kwargs) - except SystemExit: - raise except ModuleHelperException as e: if e.update_output: self.update_output(e.update_output) + # patchy solution to resolve conflict with output variables + output = fix_var_conflicts(self.output) self.module.fail_json(msg=e.msg, exception=traceback.format_exc(), - output=self.output, vars=self.vars.output(), **self.output) + output=self.output, vars=self.vars.output(), **output) except Exception as e: - msg = "Module failed with exception: {0}".format(str(e).strip()) + # patchy solution to resolve conflict with output variables + output = fix_var_conflicts(self.output) + msg = f"Module failed with exception: {str(e).strip()}" self.module.fail_json(msg=msg, exception=traceback.format_exc(), - output=self.output, vars=self.vars.output(), **self.output) + output=self.output, vars=self.vars.output(), **output) return wrapper @@ -59,6 +67,7 @@ def check_mode_skip(func): def wrapper(self, *args, **kwargs): if not self.module.check_mode: return func(self, *args, **kwargs) + return wrapper @@ -73,7 +82,7 @@ def check_mode_skip_returns(callable=None, value=None): return func(self, *args, **kwargs) return wrapper_callable - if value is not None: + else: @wraps(func) def wrapper_value(self, *args, **kwargs): if self.module.check_mode: @@ -81,7 +90,4 @@ def check_mode_skip_returns(callable=None, value=None): return func(self, *args, **kwargs) return wrapper_value - if callable is None and value is None: - return check_mode_skip - return deco diff --git a/plugins/module_utils/mh/exceptions.py b/plugins/module_utils/mh/exceptions.py index 558dcca05f..94bb7d7fff 100644 --- a/plugins/module_utils/mh/exceptions.py +++ b/plugins/module_utils/mh/exceptions.py @@ -1,22 +1,17 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2020, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations + +from ansible.module_utils.common.text.converters import to_native class ModuleHelperException(Exception): - @staticmethod - def _get_remove(key, kwargs): - if key in kwargs: - result = kwargs[key] - del kwargs[key] - return result - return None - - def __init__(self, *args, **kwargs): - self.msg = self._get_remove('msg', kwargs) or "Module failed with exception: {0}".format(self) - self.update_output = self._get_remove('update_output', kwargs) or {} + def __init__(self, msg, update_output=None, *args, **kwargs): + self.msg = to_native(msg or f"Module failed with exception: {self}") + if update_output is None: + update_output = {} + self.update_output = update_output super(ModuleHelperException, self).__init__(*args) diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py deleted file mode 100644 index 58d50fbdf8..0000000000 --- a/plugins/module_utils/mh/mixins/cmd.py +++ /dev/null @@ -1,188 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -from functools import partial - - -class ArgFormat(object): - """ - Argument formatter for use as a command line parameter. Used in CmdMixin. - """ - BOOLEAN = 0 - PRINTF = 1 - FORMAT = 2 - BOOLEAN_NOT = 3 - - @staticmethod - def stars_deco(num): - if num == 1: - def deco(f): - return lambda v: f(*v) - return deco - elif num == 2: - def deco(f): - return lambda v: f(**v) - return deco - - return lambda f: f - - def __init__(self, name, fmt=None, style=FORMAT, stars=0): - """ - Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for - the CLI command execution. - :param name: Name of the argument to be formatted - :param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that - :param style: Whether arg_format (as str) should use printf-style formatting. - Ignored if arg_format is None or not a str (should be callable). - :param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value - """ - def printf_fmt(_fmt, v): - try: - return [_fmt % v] - except TypeError as e: - if e.args[0] != 'not all arguments converted during string formatting': - raise - return [_fmt] - - _fmts = { - ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []), - ArgFormat.BOOLEAN_NOT: lambda _fmt, v: ([] if bool(v) else [_fmt]), - ArgFormat.PRINTF: printf_fmt, - ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)], - } - - self.name = name - self.stars = stars - self.style = style - - if fmt is None: - fmt = "{0}" - style = ArgFormat.FORMAT - - if isinstance(fmt, str): - func = _fmts[style] - self.arg_format = partial(func, fmt) - elif isinstance(fmt, list) or isinstance(fmt, tuple): - self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt] - elif hasattr(fmt, '__call__'): - self.arg_format = fmt - else: - raise TypeError('Parameter fmt must be either: a string, a list/tuple of ' - 'strings or a function: type={0}, value={1}'.format(type(fmt), fmt)) - - if stars: - self.arg_format = (self.stars_deco(stars))(self.arg_format) - - def to_text(self, value): - if value is None and self.style != ArgFormat.BOOLEAN_NOT: - return [] - func = self.arg_format - return [str(p) for p in func(value)] - - -class CmdMixin(object): - """ - Mixin for mapping module options to running a CLI command with its arguments. - """ - command = None - command_args_formats = {} - run_command_fixed_options = {} - check_rc = False - force_lang = "C" - - @property - def module_formats(self): - result = {} - for param in self.module.params.keys(): - result[param] = ArgFormat(param) - return result - - @property - def custom_formats(self): - result = {} - for param, fmt_spec in self.command_args_formats.items(): - result[param] = ArgFormat(param, **fmt_spec) - return result - - def _calculate_args(self, extra_params=None, params=None): - def add_arg_formatted_param(_cmd_args, arg_format, _value): - args = list(arg_format.to_text(_value)) - return _cmd_args + args - - def find_format(_param): - return self.custom_formats.get(_param, self.module_formats.get(_param)) - - extra_params = extra_params or dict() - cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command) - try: - cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True) - except ValueError: - pass - param_list = params if params else self.vars.keys() - - for param in param_list: - if isinstance(param, dict): - if len(param) != 1: - self.do_raise("run_command parameter as a dict must contain only one key: {0}".format(param)) - _param = list(param.keys())[0] - fmt = find_format(_param) - value = param[_param] - elif isinstance(param, str): - if param in self.vars.keys(): - fmt = find_format(param) - value = self.vars[param] - elif param in extra_params: - fmt = find_format(param) - value = extra_params[param] - else: - self.do_raise('Cannot determine value for parameter: {0}'.format(param)) - else: - self.do_raise("run_command parameter must be either a str or a dict: {0}".format(param)) - cmd_args = add_arg_formatted_param(cmd_args, fmt, value) - - return cmd_args - - def process_command_output(self, rc, out, err): - return rc, out, err - - def run_command(self, - extra_params=None, - params=None, - process_output=None, - publish_rc=True, - publish_out=True, - publish_err=True, - publish_cmd=True, - *args, **kwargs): - cmd_args = self._calculate_args(extra_params, params) - options = dict(self.run_command_fixed_options) - options['check_rc'] = options.get('check_rc', self.check_rc) - options.update(kwargs) - env_update = dict(options.get('environ_update', {})) - if self.force_lang: - env_update.update({ - 'LANGUAGE': self.force_lang, - 'LC_ALL': self.force_lang, - }) - self.update_output(force_lang=self.force_lang) - options['environ_update'] = env_update - rc, out, err = self.module.run_command(cmd_args, *args, **options) - if publish_rc: - self.update_output(rc=rc) - if publish_out: - self.update_output(stdout=out) - if publish_err: - self.update_output(stderr=err) - if publish_cmd: - self.update_output(cmd_args=cmd_args) - if process_output is None: - _process = self.process_command_output - else: - _process = process_output - - return _process(rc, out, err) diff --git a/plugins/module_utils/mh/mixins/deprecate_attrs.py b/plugins/module_utils/mh/mixins/deprecate_attrs.py index fb440aba4c..166e365782 100644 --- a/plugins/module_utils/mh/mixins/deprecate_attrs.py +++ b/plugins/module_utils/mh/mixins/deprecate_attrs.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2020, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.basic import AnsibleModule @@ -16,7 +15,7 @@ class DeprecateAttrsMixin(object): if target is None: target = self if not hasattr(target, attr): - raise ValueError("Target {0} has no attribute {1}".format(target, attr)) + raise ValueError(f"Target {target} has no attribute {attr}") if module is None: if isinstance(target, AnsibleModule): module = target @@ -58,4 +57,4 @@ class DeprecateAttrsMixin(object): # override attribute prop = property(_getter) setattr(target, attr, prop) - setattr(target, "_{0}_setter".format(attr), prop.setter(_setter)) + setattr(target, f"_{attr}_setter", prop.setter(_setter)) diff --git a/plugins/module_utils/mh/mixins/deps.py b/plugins/module_utils/mh/mixins/deps.py deleted file mode 100644 index 1c6c9ae484..0000000000 --- a/plugins/module_utils/mh/mixins/deps.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import traceback - -from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase -from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception - - -class DependencyCtxMgr(object): - def __init__(self, name, msg=None): - self.name = name - self.msg = msg - self.has_it = False - self.exc_type = None - self.exc_val = None - self.exc_tb = None - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.has_it = exc_type is None - self.exc_type = exc_type - self.exc_val = exc_val - self.exc_tb = exc_tb - return not self.has_it - - @property - def text(self): - return self.msg or str(self.exc_val) - - -class DependencyMixin(ModuleHelperBase): - _dependencies = [] - - @classmethod - def dependency(cls, name, msg): - cls._dependencies.append(DependencyCtxMgr(name, msg)) - return cls._dependencies[-1] - - def fail_on_missing_deps(self): - for d in self._dependencies: - if not d.has_it: - self.module.fail_json(changed=False, - exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)), - msg=d.text, - **self.output) - - @module_fails_on_exception - def run(self): - self.fail_on_missing_deps() - super(DependencyMixin, self).run() diff --git a/plugins/module_utils/mh/mixins/state.py b/plugins/module_utils/mh/mixins/state.py index b946090ac9..a04c3b1386 100644 --- a/plugins/module_utils/mh/mixins/state.py +++ b/plugins/module_utils/mh/mixins/state.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2020, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations class StateMixin(object): @@ -16,7 +15,7 @@ class StateMixin(object): return self.default_state if state is None else state def _method(self, state): - return "{0}_{1}".format(self.state_param, state) + return f"{self.state_param}_{state}" def __run__(self): state = self._state() @@ -36,4 +35,4 @@ class StateMixin(object): return func() def __state_fallback__(self): - raise ValueError("Cannot find method: {0}".format(self._method(self._state()))) + raise ValueError(f"Cannot find method: {self._method(self._state())}") diff --git a/plugins/module_utils/mh/mixins/vars.py b/plugins/module_utils/mh/mixins/vars.py deleted file mode 100644 index a11110ed60..0000000000 --- a/plugins/module_utils/mh/mixins/vars.py +++ /dev/null @@ -1,134 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import copy - - -class VarMeta(object): - NOTHING = object() - - def __init__(self, diff=False, output=True, change=None, fact=False): - self.init = False - self.initial_value = None - self.value = None - - self.diff = diff - self.change = diff if change is None else change - self.output = output - self.fact = fact - - def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING): - if diff is not None: - self.diff = diff - if output is not None: - self.output = output - if change is not None: - self.change = change - if fact is not None: - self.fact = fact - if initial_value is not self.NOTHING: - self.initial_value = copy.deepcopy(initial_value) - - def set_value(self, value): - if not self.init: - self.initial_value = copy.deepcopy(value) - self.init = True - self.value = value - return self - - @property - def has_changed(self): - return self.change and (self.initial_value != self.value) - - @property - def diff_result(self): - return None if not (self.diff and self.has_changed) else { - 'before': self.initial_value, - 'after': self.value, - } - - def __str__(self): - return "".format( - self.value, self.initial_value, self.diff, self.output, self.change - ) - - -class VarDict(object): - def __init__(self): - self._data = dict() - self._meta = dict() - - def __getitem__(self, item): - return self._data[item] - - def __setitem__(self, key, value): - self.set(key, value) - - def __getattr__(self, item): - try: - return self._data[item] - except KeyError: - return getattr(self._data, item) - - def __setattr__(self, key, value): - if key in ('_data', '_meta'): - super(VarDict, self).__setattr__(key, value) - else: - self.set(key, value) - - def meta(self, name): - return self._meta[name] - - def set_meta(self, name, **kwargs): - self.meta(name).set(**kwargs) - - def set(self, name, value, **kwargs): - if name in ('_data', '_meta'): - raise ValueError("Names _data and _meta are reserved for use by ModuleHelper") - self._data[name] = value - if name in self._meta: - meta = self.meta(name) - else: - meta = VarMeta(**kwargs) - meta.set_value(value) - self._meta[name] = meta - - def output(self): - return dict((k, v) for k, v in self._data.items() if self.meta(k).output) - - def diff(self): - diff_results = [(k, self.meta(k).diff_result) for k in self._data] - diff_results = [dr for dr in diff_results if dr[1] is not None] - if diff_results: - before = dict((dr[0], dr[1]['before']) for dr in diff_results) - after = dict((dr[0], dr[1]['after']) for dr in diff_results) - return {'before': before, 'after': after} - return None - - def facts(self): - facts_result = dict((k, v) for k, v in self._data.items() if self._meta[k].fact) - return facts_result if facts_result else None - - def change_vars(self): - return [v for v in self._data if self.meta(v).change] - - def has_changed(self, v): - return self._meta[v].has_changed - - -class VarsMixin(object): - - def __init__(self, module=None): - self.vars = VarDict() - super(VarsMixin, self).__init__(module) - - def update_vars(self, meta=None, **kwargs): - if meta is None: - meta = {} - for k, v in kwargs.items(): - self.vars.set(k, v, **meta) diff --git a/plugins/module_utils/mh/module_helper.py b/plugins/module_utils/mh/module_helper.py index 71731411e0..fdce99045c 100644 --- a/plugins/module_utils/mh/module_helper.py +++ b/plugins/module_utils/mh/module_helper.py @@ -1,33 +1,30 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# (c) 2020-2024, Alexei Znamensky +# Copyright (c) 2020-2024, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import annotations -from __future__ import absolute_import, division, print_function -__metaclass__ = type from ansible.module_utils.common.dict_transformations import dict_merge -from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase, AnsibleModule -from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin +from ansible_collections.community.general.plugins.module_utils.vardict import VarDict +from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin -from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin -from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _VD from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin -class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelperBase): - _output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed') +class ModuleHelper(DeprecateAttrsMixin, ModuleHelperBase): facts_name = None output_params = () diff_params = () change_params = () facts_params = () - VarDict = _VD # for backward compatibility, will be deprecated at some point - def __init__(self, module=None): super(ModuleHelper, self).__init__(module) + + self.vars = VarDict() for name, value in self.module.params.items(): self.vars.set( name, value, @@ -37,15 +34,11 @@ class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelper fact=name in self.facts_params, ) - self._deprecate_attr( - attr="VarDict", - msg="ModuleHelper.VarDict attribute is deprecated, use VarDict from " - "the ansible_collections.community.general.plugins.module_utils.mh.mixins.vars module instead", - version="6.0.0", - collection_name="community.general", - target=ModuleHelper, - module=self.module, - ) + def update_vars(self, meta=None, **kwargs): + if meta is None: + meta = {} + for k, v in kwargs.items(): + self.vars.set(k, v, **meta) def update_output(self, **kwargs): self.update_vars(meta={"output": True}, **kwargs) @@ -54,7 +47,7 @@ class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelper self.update_vars(meta={"fact": True}, **kwargs) def _vars_changed(self): - return any(self.vars.has_changed(v) for v in self.vars.change_vars()) + return self.vars.has_changed def has_changed(self): return self.changed or self._vars_changed() @@ -71,20 +64,8 @@ class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelper vars_diff = self.vars.diff() or {} result['diff'] = dict_merge(dict(diff), vars_diff) - for varname in result: - if varname in self._output_conflict_list: - result["_" + varname] = result[varname] - del result[varname] return result class StateModuleHelper(StateMixin, ModuleHelper): pass - - -class CmdModuleHelper(CmdMixin, ModuleHelper): - pass - - -class CmdStateModuleHelper(CmdMixin, StateMixin, ModuleHelper): - pass diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py index a6b35bdd33..f5c6275741 100644 --- a/plugins/module_utils/module_helper.py +++ b/plugins/module_utils/module_helper.py @@ -1,18 +1,16 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky -# Copyright: (c) 2020, Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2020, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations +# pylint: disable=unused-import from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ( - ModuleHelper, StateModuleHelper, CmdModuleHelper, CmdStateModuleHelper, AnsibleModule + ModuleHelper, StateModuleHelper, +) +from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401 +from ansible_collections.community.general.plugins.module_utils.mh.deco import ( + cause_changes, module_fails_on_exception, check_mode_skip, check_mode_skip_returns, ) -from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin, ArgFormat -from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin -from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr -from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException -from ansible_collections.community.general.plugins.module_utils.mh.deco import cause_changes, module_fails_on_exception -from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict diff --git a/plugins/module_utils/net_tools/pritunl/api.py b/plugins/module_utils/net_tools/pritunl/api.py index 91f97ecc96..7d6bd7fe86 100644 --- a/plugins/module_utils/net_tools/pritunl/api.py +++ b/plugins/module_utils/net_tools/pritunl/api.py @@ -1,12 +1,12 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Florian Dambrine -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later """ Pritunl API that offers CRUD operations on Pritunl Organizations and Users """ -from __future__ import absolute_import, division, print_function +from __future__ import annotations import base64 import hashlib @@ -15,11 +15,8 @@ import json import time import uuid -from ansible.module_utils.six import iteritems from ansible.module_utils.urls import open_url -__metaclass__ = type - class PritunlException(Exception): pass @@ -65,7 +62,7 @@ def _delete_pritunl_organization( api_token=api_token, api_secret=api_secret, method="DELETE", - path="/organization/%s" % (organization_id), + path=f"/organization/{organization_id}", validate_certs=validate_certs, ) @@ -78,7 +75,7 @@ def _post_pritunl_organization( api_secret=api_secret, base_url=base_url, method="POST", - path="/organization/%s", + path="/organization", headers={"Content-Type": "application/json"}, data=json.dumps(organization_data), validate_certs=validate_certs, @@ -93,7 +90,7 @@ def _get_pritunl_users( api_secret=api_secret, base_url=base_url, method="GET", - path="/user/%s" % organization_id, + path=f"/user/{organization_id}", validate_certs=validate_certs, ) @@ -106,7 +103,7 @@ def _delete_pritunl_user( api_secret=api_secret, base_url=base_url, method="DELETE", - path="/user/%s/%s" % (organization_id, user_id), + path=f"/user/{organization_id}/{user_id}", validate_certs=validate_certs, ) @@ -119,7 +116,7 @@ def _post_pritunl_user( api_secret=api_secret, base_url=base_url, method="POST", - path="/user/%s" % organization_id, + path=f"/user/{organization_id}", headers={"Content-Type": "application/json"}, data=json.dumps(user_data), validate_certs=validate_certs, @@ -140,7 +137,7 @@ def _put_pritunl_user( api_secret=api_secret, base_url=base_url, method="PUT", - path="/user/%s/%s" % (organization_id, user_id), + path=f"/user/{organization_id}/{user_id}", headers={"Content-Type": "application/json"}, data=json.dumps(user_data), validate_certs=validate_certs, @@ -169,7 +166,7 @@ def list_pritunl_organizations( else: if not any( filter_val != org[filter_key] - for filter_key, filter_val in iteritems(filters) + for filter_key, filter_val in filters.items() ): orgs.append(org) @@ -200,7 +197,7 @@ def list_pritunl_users( else: if not any( filter_val != user[filter_key] - for filter_key, filter_val in iteritems(filters) + for filter_key, filter_val in filters.items() ): users.append(user) @@ -219,12 +216,12 @@ def post_pritunl_organization( api_secret=api_secret, base_url=base_url, organization_data={"name": organization_name}, - validate_certs=True, + validate_certs=validate_certs, ) if response.getcode() != 200: raise PritunlException( - "Could not add organization %s to Pritunl" % (organization_name) + f"Could not add organization {organization_name} to Pritunl" ) # The user PUT request returns the updated user object return json.loads(response.read()) @@ -247,13 +244,12 @@ def post_pritunl_user( base_url=base_url, organization_id=organization_id, user_data=user_data, - validate_certs=True, + validate_certs=validate_certs, ) if response.getcode() != 200: raise PritunlException( - "Could not remove user %s from organization %s from Pritunl" - % (user_id, organization_id) + f"Could not remove user {user_id} from organization {organization_id} from Pritunl" ) # user POST request returns an array of a single item, # so return this item instead of the list @@ -266,13 +262,12 @@ def post_pritunl_user( organization_id=organization_id, user_data=user_data, user_id=user_id, - validate_certs=True, + validate_certs=validate_certs, ) if response.getcode() != 200: raise PritunlException( - "Could not update user %s from organization %s from Pritunl" - % (user_id, organization_id) + f"Could not update user {user_id} from organization {organization_id} from Pritunl" ) # The user PUT request returns the updated user object return json.loads(response.read()) @@ -286,12 +281,12 @@ def delete_pritunl_organization( api_secret=api_secret, base_url=base_url, organization_id=organization_id, - validate_certs=True, + validate_certs=validate_certs, ) if response.getcode() != 200: raise PritunlException( - "Could not remove organization %s from Pritunl" % (organization_id) + f"Could not remove organization {organization_id} from Pritunl" ) return json.loads(response.read()) @@ -306,13 +301,12 @@ def delete_pritunl_user( base_url=base_url, organization_id=organization_id, user_id=user_id, - validate_certs=True, + validate_certs=validate_certs, ) if response.getcode() != 200: raise PritunlException( - "Could not remove user %s from organization %s from Pritunl" - % (user_id, organization_id) + f"Could not remove user {user_id} from organization {organization_id} from Pritunl" ) return json.loads(response.read()) @@ -330,14 +324,12 @@ def pritunl_auth_request( ): """ Send an API call to a Pritunl server. - Taken from https://pritunl.com/api and adaped work with Ansible open_url + Taken from https://pritunl.com/api and adapted to work with Ansible open_url """ auth_timestamp = str(int(time.time())) auth_nonce = uuid.uuid4().hex - auth_string = "&".join( - [api_token, auth_timestamp, auth_nonce, method.upper(), path] - ) + auth_string = f"{api_token}&{auth_timestamp}&{auth_nonce}&{method.upper()}&{path}" auth_signature = base64.b64encode( hmac.new( @@ -356,7 +348,7 @@ def pritunl_auth_request( auth_headers.update(headers) try: - uri = "%s%s" % (base_url, path) + uri = f"{base_url}{path}" return open_url( uri, diff --git a/plugins/module_utils/ocapi_utils.py b/plugins/module_utils/ocapi_utils.py new file mode 100644 index 0000000000..fd606d9bcc --- /dev/null +++ b/plugins/module_utils/ocapi_utils.py @@ -0,0 +1,491 @@ +# Copyright (c) 2022 Western Digital Corporation +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +import json +import os +import uuid +from urllib.error import URLError, HTTPError +from urllib.parse import urlparse + +from ansible.module_utils.urls import open_url +from ansible.module_utils.common.text.converters import to_native + + +GET_HEADERS = {'accept': 'application/json'} +PUT_HEADERS = {'content-type': 'application/json', 'accept': 'application/json'} +POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json'} +DELETE_HEADERS = {'accept': 'application/json'} + +HEALTH_OK = 5 + + +class OcapiUtils(object): + + def __init__(self, creds, base_uri, proxy_slot_number, timeout, module): + self.root_uri = base_uri + self.proxy_slot_number = proxy_slot_number + self.creds = creds + self.timeout = timeout + self.module = module + + def _auth_params(self): + """ + Return tuple of required authentication params based on the username and password. + + :return: tuple of username, password + """ + username = self.creds['user'] + password = self.creds['pswd'] + force_basic_auth = True + return username, password, force_basic_auth + + def get_request(self, uri): + req_headers = dict(GET_HEADERS) + username, password, basic_auth = self._auth_params() + try: + resp = open_url(uri, method="GET", headers=req_headers, + url_username=username, url_password=password, + force_basic_auth=basic_auth, validate_certs=False, + follow_redirects='all', + use_proxy=True, timeout=self.timeout) + data = json.loads(to_native(resp.read())) + headers = {k.lower(): v for (k, v) in resp.info().items()} + except HTTPError as e: + return {'ret': False, + 'msg': f"HTTP Error {e.code} on GET request to '{uri}'", + 'status': e.code} + except URLError as e: + return {'ret': False, 'msg': f"URL Error on GET request to '{uri}': '{e.reason}'"} + # Almost all errors should be caught above, but just in case + except Exception as e: + return {'ret': False, + 'msg': f"Failed GET request to '{uri}': '{e}'"} + return {'ret': True, 'data': data, 'headers': headers} + + def delete_request(self, uri, etag=None): + req_headers = dict(DELETE_HEADERS) + if etag is not None: + req_headers['If-Match'] = etag + username, password, basic_auth = self._auth_params() + try: + resp = open_url(uri, method="DELETE", headers=req_headers, + url_username=username, url_password=password, + force_basic_auth=basic_auth, validate_certs=False, + follow_redirects='all', + use_proxy=True, timeout=self.timeout) + if resp.status != 204: + data = json.loads(to_native(resp.read())) + else: + data = "" + headers = {k.lower(): v for (k, v) in resp.info().items()} + except HTTPError as e: + return {'ret': False, + 'msg': f"HTTP Error {e.code} on DELETE request to '{uri}'", + 'status': e.code} + except URLError as e: + return {'ret': False, 'msg': f"URL Error on DELETE request to '{uri}': '{e.reason}'"} + # Almost all errors should be caught above, but just in case + except Exception as e: + return {'ret': False, + 'msg': f"Failed DELETE request to '{uri}': '{e}'"} + return {'ret': True, 'data': data, 'headers': headers} + + def put_request(self, uri, payload, etag=None): + req_headers = dict(PUT_HEADERS) + if etag is not None: + req_headers['If-Match'] = etag + username, password, basic_auth = self._auth_params() + try: + resp = open_url(uri, data=json.dumps(payload), + headers=req_headers, method="PUT", + url_username=username, url_password=password, + force_basic_auth=basic_auth, validate_certs=False, + follow_redirects='all', + use_proxy=True, timeout=self.timeout) + headers = {k.lower(): v for (k, v) in resp.info().items()} + except HTTPError as e: + return {'ret': False, + 'msg': f"HTTP Error {e.code} on PUT request to '{uri}'", + 'status': e.code} + except URLError as e: + return {'ret': False, 'msg': f"URL Error on PUT request to '{uri}': '{e.reason}'"} + # Almost all errors should be caught above, but just in case + except Exception as e: + return {'ret': False, + 'msg': f"Failed PUT request to '{uri}': '{e}'"} + return {'ret': True, 'headers': headers, 'resp': resp} + + def post_request(self, uri, payload, content_type="application/json", timeout=None): + req_headers = dict(POST_HEADERS) + if content_type != "application/json": + req_headers["content-type"] = content_type + username, password, basic_auth = self._auth_params() + if content_type == "application/json": + request_data = json.dumps(payload) + else: + request_data = payload + try: + resp = open_url(uri, data=request_data, + headers=req_headers, method="POST", + url_username=username, url_password=password, + force_basic_auth=basic_auth, validate_certs=False, + follow_redirects='all', + use_proxy=True, timeout=self.timeout if timeout is None else timeout) + headers = {k.lower(): v for (k, v) in resp.info().items()} + except HTTPError as e: + return {'ret': False, + 'msg': f"HTTP Error {e.code} on POST request to '{uri}'", + 'status': e.code} + except URLError as e: + return {'ret': False, 'msg': f"URL Error on POST request to '{uri}': '{e.reason}'"} + # Almost all errors should be caught above, but just in case + except Exception as e: + return {'ret': False, + 'msg': f"Failed POST request to '{uri}': '{e}'"} + return {'ret': True, 'headers': headers, 'resp': resp} + + def get_uri_with_slot_number_query_param(self, uri): + """Return the URI with proxy slot number added as a query param, if there is one. + + If a proxy slot number is provided, to access it, we must append it as a query parameter. + This method returns the given URI with the slotnumber query param added, if there is one. + If there is not a proxy slot number, it just returns the URI as it was passed in. + """ + if self.proxy_slot_number is not None: + parsed_url = urlparse(uri) + return parsed_url._replace(query=f"slotnumber={self.proxy_slot_number}").geturl() + else: + return uri + + def manage_system_power(self, command): + """Process a command to manage the system power. + + :param str command: The Ansible command being processed. + """ + if command == "PowerGracefulRestart": + resource_uri = self.root_uri + resource_uri = self.get_uri_with_slot_number_query_param(resource_uri) + + # Get the resource so that we have the Etag + response = self.get_request(resource_uri) + if 'etag' not in response['headers']: + return {'ret': False, 'msg': 'Etag not found in response.'} + etag = response['headers']['etag'] + if response['ret'] is False: + return response + + # Issue the PUT to do the reboot (unless we are in check mode) + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + payload = {'Reboot': True} + response = self.put_request(resource_uri, payload, etag) + if response['ret'] is False: + return response + elif command.startswith("PowerMode"): + return self.manage_power_mode(command) + else: + return {'ret': False, 'msg': f"Invalid command: {command}"} + + return {'ret': True} + + def manage_chassis_indicator_led(self, command): + """Process a command to manage the chassis indicator LED. + + :param string command: The Ansible command being processed. + """ + return self.manage_indicator_led(command, self.root_uri) + + def manage_indicator_led(self, command, resource_uri=None): + """Process a command to manage an indicator LED. + + :param string command: The Ansible command being processed. + :param string resource_uri: URI of the resource whose indicator LED is being managed. + """ + key = "IndicatorLED" + if resource_uri is None: + resource_uri = self.root_uri + resource_uri = self.get_uri_with_slot_number_query_param(resource_uri) + + payloads = { + 'IndicatorLedOn': { + 'ID': 2 + }, + 'IndicatorLedOff': { + 'ID': 4 + } + } + + response = self.get_request(resource_uri) + if 'etag' not in response['headers']: + return {'ret': False, 'msg': 'Etag not found in response.'} + etag = response['headers']['etag'] + if response['ret'] is False: + return response + data = response['data'] + if key not in data: + return {'ret': False, 'msg': f"Key {key} not found"} + if 'ID' not in data[key]: + return {'ret': False, 'msg': 'IndicatorLED for resource has no ID.'} + + if command in payloads.keys(): + # See if the LED is already set as requested. + current_led_status = data[key]['ID'] + if current_led_status == payloads[command]['ID']: + return {'ret': True, 'changed': False} + + # Set the LED (unless we are in check mode) + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + payload = {'IndicatorLED': payloads[command]} + response = self.put_request(resource_uri, payload, etag) + if response['ret'] is False: + return response + else: + return {'ret': False, 'msg': 'Invalid command'} + + return {'ret': True} + + def manage_power_mode(self, command): + key = "PowerState" + resource_uri = self.get_uri_with_slot_number_query_param(self.root_uri) + + payloads = { + "PowerModeNormal": 2, + "PowerModeLow": 4 + } + + response = self.get_request(resource_uri) + if 'etag' not in response['headers']: + return {'ret': False, 'msg': 'Etag not found in response.'} + etag = response['headers']['etag'] + if response['ret'] is False: + return response + data = response['data'] + if key not in data: + return {'ret': False, 'msg': f"Key {key} not found"} + if 'ID' not in data[key]: + return {'ret': False, 'msg': 'PowerState for resource has no ID.'} + + if command in payloads.keys(): + # See if the PowerState is already set as requested. + current_power_state = data[key]['ID'] + if current_power_state == payloads[command]: + return {'ret': True, 'changed': False} + + # Set the Power State (unless we are in check mode) + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + payload = {'PowerState': {"ID": payloads[command]}} + response = self.put_request(resource_uri, payload, etag) + if response['ret'] is False: + return response + else: + return {'ret': False, 'msg': f"Invalid command: {command}"} + + return {'ret': True} + + def prepare_multipart_firmware_upload(self, filename): + """Prepare a multipart/form-data body for OCAPI firmware upload. + + :arg filename: The name of the file to upload. + :returns: tuple of (content_type, body) where ``content_type`` is + the ``multipart/form-data`` ``Content-Type`` header including + ``boundary`` and ``body`` is the prepared bytestring body + + Prepares the body to include "FirmwareFile" field with the contents of the file. + Because some OCAPI targets do not support Base-64 encoding for multipart/form-data, + this method sends the file as binary. + """ + boundary = str(uuid.uuid4()) # Generate a random boundary + body = f"--{boundary}\r\n" + body += f'Content-Disposition: form-data; name="FirmwareFile"; filename="{to_native(os.path.basename(filename))}"\r\n' + body += 'Content-Type: application/octet-stream\r\n\r\n' + body_bytes = bytearray(body, 'utf-8') + with open(filename, 'rb') as f: + body_bytes += f.read() + body_bytes += bytearray(f"\r\n--{boundary}--", 'utf-8') + return (f"multipart/form-data; boundary={boundary}", + body_bytes) + + def upload_firmware_image(self, update_image_path): + """Perform Firmware Upload to the OCAPI storage device. + + :param str update_image_path: The path/filename of the firmware image, on the local filesystem. + """ + if not (os.path.exists(update_image_path) and os.path.isfile(update_image_path)): + return {'ret': False, 'msg': 'File does not exist.'} + url = f"{self.root_uri}OperatingSystem" + url = self.get_uri_with_slot_number_query_param(url) + content_type, b_form_data = self.prepare_multipart_firmware_upload(update_image_path) + + # Post the firmware (unless we are in check mode) + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + result = self.post_request(url, b_form_data, content_type=content_type, timeout=300) + if result['ret'] is False: + return result + return {'ret': True} + + def update_firmware_image(self): + """Perform a Firmware Update on the OCAPI storage device.""" + resource_uri = self.root_uri + resource_uri = self.get_uri_with_slot_number_query_param(resource_uri) + # We have to do a GET to obtain the Etag. It's required on the PUT. + response = self.get_request(resource_uri) + if response['ret'] is False: + return response + if 'etag' not in response['headers']: + return {'ret': False, 'msg': 'Etag not found in response.'} + etag = response['headers']['etag'] + + # Issue the PUT (unless we are in check mode) + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + payload = {'FirmwareUpdate': True} + response = self.put_request(resource_uri, payload, etag) + if response['ret'] is False: + return response + + return {'ret': True, 'jobUri': response["headers"]["location"]} + + def activate_firmware_image(self): + """Perform a Firmware Activate on the OCAPI storage device.""" + resource_uri = self.root_uri + resource_uri = self.get_uri_with_slot_number_query_param(resource_uri) + # We have to do a GET to obtain the Etag. It's required on the PUT. + response = self.get_request(resource_uri) + if 'etag' not in response['headers']: + return {'ret': False, 'msg': 'Etag not found in response.'} + etag = response['headers']['etag'] + if response['ret'] is False: + return response + + # Issue the PUT (unless we are in check mode) + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + payload = {'FirmwareActivate': True} + response = self.put_request(resource_uri, payload, etag) + if response['ret'] is False: + return response + + return {'ret': True, 'jobUri': response["headers"]["location"]} + + def get_job_status(self, job_uri): + """Get the status of a job. + + :param str job_uri: The URI of the job's status monitor. + """ + job_uri = self.get_uri_with_slot_number_query_param(job_uri) + response = self.get_request(job_uri) + if response['ret'] is False: + if response.get('status') == 404: + # Job not found -- assume 0% + return { + "ret": True, + "percentComplete": 0, + "operationStatus": "Not Available", + "operationStatusId": 1, + "operationHealth": None, + "operationHealthId": None, + "details": "Job does not exist.", + "jobExists": False + } + else: + return response + details = response["data"]["Status"].get("Details") + if isinstance(details, str): + details = [details] + health_list = response["data"]["Status"]["Health"] + return_value = { + "ret": True, + "percentComplete": response["data"]["PercentComplete"], + "operationStatus": response["data"]["Status"]["State"]["Name"], + "operationStatusId": response["data"]["Status"]["State"]["ID"], + "operationHealth": health_list[0]["Name"] if len(health_list) > 0 else None, + "operationHealthId": health_list[0]["ID"] if len(health_list) > 0 else None, + "details": details, + "jobExists": True + } + return return_value + + def delete_job(self, job_uri): + """Delete the OCAPI job referenced by the specified job_uri.""" + job_uri = self.get_uri_with_slot_number_query_param(job_uri) + # We have to do a GET to obtain the Etag. It's required on the DELETE. + response = self.get_request(job_uri) + + if response['ret'] is True: + if 'etag' not in response['headers']: + return {'ret': False, 'msg': 'Etag not found in response.'} + else: + etag = response['headers']['etag'] + + if response['data']['PercentComplete'] != 100: + return { + 'ret': False, + 'changed': False, + 'msg': 'Cannot delete job because it is in progress.' + } + + if response['ret'] is False: + if response['status'] == 404: + return { + 'ret': True, + 'changed': False, + 'msg': 'Job already deleted.' + } + return response + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + + # Do the DELETE (unless we are in check mode) + response = self.delete_request(job_uri, etag) + if response['ret'] is False: + if response['status'] == 404: + return { + 'ret': True, + 'changed': False + } + elif response['status'] == 409: + return { + 'ret': False, + 'changed': False, + 'msg': 'Cannot delete job because it is in progress.' + } + return response + return { + 'ret': True, + 'changed': True + } diff --git a/plugins/module_utils/oneandone.py b/plugins/module_utils/oneandone.py index 5f65b670f3..1c9cb73d73 100644 --- a/plugins/module_utils/oneandone.py +++ b/plugins/module_utils/oneandone.py @@ -1,14 +1,8 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) Ansible project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import time @@ -215,7 +209,7 @@ def wait_for_resource_creation_completion(oneandone_conn, (resource_type != OneAndOneResources.server and resource_state.lower() == 'active')): return elif resource_state.lower() == 'failed': - raise Exception('%s creation failed for %s' % (resource_type, resource_id)) + raise Exception(f'{resource_type} creation failed for {resource_id}') elif resource_state.lower() in ('active', 'enabled', 'deploying', @@ -223,10 +217,10 @@ def wait_for_resource_creation_completion(oneandone_conn, continue else: raise Exception( - 'Unknown %s state %s' % (resource_type, resource_state)) + f'Unknown {resource_type} state {resource_state}') raise Exception( - 'Timed out waiting for %s completion for %s' % (resource_type, resource_id)) + f'Timed out waiting for {resource_type} completion for {resource_id}') def wait_for_resource_deletion_completion(oneandone_conn, @@ -252,7 +246,7 @@ def wait_for_resource_deletion_completion(oneandone_conn, _type = 'PRIVATENETWORK' else: raise Exception( - 'Unsupported wait_for delete operation for %s resource' % resource_type) + f'Unsupported wait_for delete operation for {resource_type} resource') for log in logs: if (log['resource']['id'] == resource_id and @@ -261,4 +255,4 @@ def wait_for_resource_deletion_completion(oneandone_conn, log['status']['state'] == 'OK'): return raise Exception( - 'Timed out waiting for %s deletion for %s' % (resource_type, resource_id)) + f'Timed out waiting for {resource_type} deletion for {resource_id}') diff --git a/plugins/module_utils/onepassword.py b/plugins/module_utils/onepassword.py index 3a86e22e16..5e52a9af41 100644 --- a/plugins/module_utils/onepassword.py +++ b/plugins/module_utils/onepassword.py @@ -1,8 +1,8 @@ -# -*- coding: utf-8 -*- -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) Ansible project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os diff --git a/plugins/module_utils/oneview.py b/plugins/module_utils/oneview.py index 6d786b0b80..1f57355f58 100644 --- a/plugins/module_utils/oneview.py +++ b/plugins/module_utils/oneview.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -7,16 +6,16 @@ # # Copyright (2016-2017) Hewlett Packard Enterprise Development LP # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import abc import collections import json -import os import traceback +from collections.abc import Mapping HPE_ONEVIEW_IMP_ERR = None try: @@ -26,10 +25,8 @@ except ImportError: HPE_ONEVIEW_IMP_ERR = traceback.format_exc() HAS_HPE_ONEVIEW = False -from ansible.module_utils import six from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.common._collections_compat import Mapping def transform_list_to_dict(list_): @@ -130,7 +127,7 @@ class OneViewModuleException(Exception): self.msg = None self.oneview_response = None - if isinstance(data, six.string_types): + if isinstance(data, str): self.msg = data else: self.oneview_response = data @@ -180,8 +177,7 @@ class OneViewModuleResourceNotFound(OneViewModuleException): pass -@six.add_metaclass(abc.ABCMeta) -class OneViewModuleBase(object): +class OneViewModuleBase(object, metaclass=abc.ABCMeta): MSG_CREATED = 'Resource created successfully.' MSG_UPDATED = 'Resource updated successfully.' MSG_DELETED = 'Resource deleted successfully.' @@ -399,11 +395,11 @@ class OneViewModuleBase(object): resource1 = first_resource resource2 = second_resource - debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2) + debug_resources = f"resource1 = {resource1}, resource2 = {resource2}" # The first resource is True / Not Null and the second resource is False / Null if resource1 and not resource2: - self.module.log("resource1 and not resource2. " + debug_resources) + self.module.log(f"resource1 and not resource2. {debug_resources}") return False # Checks all keys in first dict against the second dict @@ -453,15 +449,15 @@ class OneViewModuleBase(object): resource1 = first_resource resource2 = second_resource - debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2) + debug_resources = f"resource1 = {resource1}, resource2 = {resource2}" # The second list is null / empty / False if not resource2: - self.module.log("resource 2 is null. " + debug_resources) + self.module.log(f"resource 2 is null. {debug_resources}") return False if len(resource1) != len(resource2): - self.module.log("resources have different length. " + debug_resources) + self.module.log(f"resources have different length. {debug_resources}") return False resource1 = sorted(resource1, key=_str_sorted) @@ -471,15 +467,15 @@ class OneViewModuleBase(object): if isinstance(val, Mapping): # change comparison function to compare dictionaries if not self.compare(val, resource2[i]): - self.module.log("resources are different. " + debug_resources) + self.module.log(f"resources are different. {debug_resources}") return False elif isinstance(val, list): # recursive call if not self.compare_list(val, resource2[i]): - self.module.log("lists are different. " + debug_resources) + self.module.log(f"lists are different. {debug_resources}") return False elif _standardize_value(val) != _standardize_value(resource2[i]): - self.module.log("values are different. " + debug_resources) + self.module.log(f"values are different. {debug_resources}") return False # no differences found diff --git a/plugins/module_utils/online.py b/plugins/module_utils/online.py index b5acbcc017..303abffab2 100644 --- a/plugins/module_utils/online.py +++ b/plugins/module_utils/online.py @@ -1,8 +1,8 @@ -# -*- coding: utf-8 -*- -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) Ansible project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json import sys diff --git a/plugins/module_utils/opennebula.py b/plugins/module_utils/opennebula.py index c896a9c6fa..ce9ec76b0d 100644 --- a/plugins/module_utils/opennebula.py +++ b/plugins/module_utils/opennebula.py @@ -1,20 +1,19 @@ -# -*- coding: utf-8 -*- # # Copyright 2018 www.privaz.io Valletech AB # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import time import ssl from os import environ -from ansible.module_utils.six import string_types from ansible.module_utils.basic import AnsibleModule +IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] HAS_PYONE = True try: @@ -25,6 +24,41 @@ except ImportError: HAS_PYONE = False +# A helper function to mitigate https://github.com/OpenNebula/one/issues/6064. +# It allows for easily handling lists like "NIC" or "DISK" in the JSON-like template representation. +# There are either lists of dictionaries (length > 1) or just dictionaries. +def flatten(to_flatten, extract=False): + """Flattens nested lists (with optional value extraction).""" + def recurse(to_flatten): + return sum(map(recurse, to_flatten), []) if isinstance(to_flatten, list) else [to_flatten] + value = recurse(to_flatten) + if extract and len(value) == 1: + return value[0] + return value + + +# A helper function to mitigate https://github.com/OpenNebula/one/issues/6064. +# It renders JSON-like template representation into OpenNebula's template syntax (string). +def render(to_render): + """Converts dictionary to OpenNebula template.""" + def recurse(to_render): + for key, value in sorted(to_render.items()): + if value is None: + continue + if isinstance(value, dict): + yield f"{key}=[{','.join(recurse(value))}]" + continue + if isinstance(value, list): + for item in value: + yield f"{key}=[{','.join(recurse(item))}]" + continue + if isinstance(value, str): + yield '{0:}="{1:}"'.format(key, value.replace('\\', '\\\\').replace('"', '\\"')) + continue + yield f'{key}="{value}"' + return '\n'.join(recurse(to_render)) + + class OpenNebulaModule: """ Base class for all OpenNebula Ansible Modules. @@ -83,14 +117,14 @@ class OpenNebulaModule: if self.module.params.get("api_username"): username = self.module.params.get("api_username") else: - self.fail("Either api_username or the environment vairable ONE_USERNAME must be provided") + self.fail("Either api_username or the environment variable ONE_USERNAME must be provided") if self.module.params.get("api_password"): password = self.module.params.get("api_password") else: - self.fail("Either api_password or the environment vairable ONE_PASSWORD must be provided") + self.fail("Either api_password or the environment variable ONE_PASSWORD must be provided") - session = "%s:%s" % (username, password) + session = f"{username}:{password}" if not self.module.params.get("validate_certs") and "PYTHONHTTPSVERIFY" not in environ: return OneServer(url, session=session, context=no_ssl_validation_context) @@ -228,7 +262,7 @@ class OpenNebulaModule: self.cast_template(template[key]) elif isinstance(value, list): template[key] = ', '.join(value) - elif not isinstance(value, string_types): + elif not isinstance(value, str): template[key] = str(value) def requires_template_update(self, current, desired): @@ -278,11 +312,11 @@ class OpenNebulaModule: current_state = state() if current_state in invalid_states: - self.fail('invalid %s state %s' % (element_name, state_name(current_state))) + self.fail(f'invalid {element_name} state {state_name(current_state)}') if transition_states: if current_state not in transition_states: - self.fail('invalid %s transition state %s' % (element_name, state_name(current_state))) + self.fail(f'invalid {element_name} transition state {state_name(current_state)}') if current_state in target_states: return True @@ -300,7 +334,7 @@ class OpenNebulaModule: try: self.run(self.one, self.module, self.result) except OneException as e: - self.fail(msg="OpenNebula Exception: %s" % e) + self.fail(msg=f"OpenNebula Exception: {e}") def run(self, one, module, result): """ @@ -311,3 +345,90 @@ class OpenNebulaModule: result: the Ansible result """ raise NotImplementedError("Method requires implementation") + + def get_image_list_id(self, image, element): + """ + This is a helper function for get_image_info to iterate over a simple list of objects + """ + list_of_id = [] + + if element == 'VMS': + image_list = image.VMS + if element == 'CLONES': + image_list = image.CLONES + if element == 'APP_CLONES': + image_list = image.APP_CLONES + + for iter in image_list.ID: + list_of_id.append( + # These are optional so firstly check for presence + getattr(iter, 'ID', 'Null'), + ) + return list_of_id + + def get_image_snapshots_list(self, image): + """ + This is a helper function for get_image_info to iterate over a dictionary + """ + list_of_snapshots = [] + + for iter in image.SNAPSHOTS.SNAPSHOT: + list_of_snapshots.append({ + 'date': iter['DATE'], + 'parent': iter['PARENT'], + 'size': iter['SIZE'], + # These are optional so firstly check for presence + 'allow_orhans': getattr(image.SNAPSHOTS, 'ALLOW_ORPHANS', 'Null'), + 'children': getattr(iter, 'CHILDREN', 'Null'), + 'active': getattr(iter, 'ACTIVE', 'Null'), + 'name': getattr(iter, 'NAME', 'Null'), + }) + return list_of_snapshots + + def get_image_info(self, image): + """ + This method is used by one_image and one_image_info modules to retrieve + information from XSD scheme of an image + Returns: a copy of the parameters that includes the resolved parameters. + """ + info = { + 'id': image.ID, + 'name': image.NAME, + 'state': IMAGE_STATES[image.STATE], + 'running_vms': image.RUNNING_VMS, + 'used': bool(image.RUNNING_VMS), + 'user_name': image.UNAME, + 'user_id': image.UID, + 'group_name': image.GNAME, + 'group_id': image.GID, + 'permissions': { + 'owner_u': image.PERMISSIONS.OWNER_U, + 'owner_m': image.PERMISSIONS.OWNER_M, + 'owner_a': image.PERMISSIONS.OWNER_A, + 'group_u': image.PERMISSIONS.GROUP_U, + 'group_m': image.PERMISSIONS.GROUP_M, + 'group_a': image.PERMISSIONS.GROUP_A, + 'other_u': image.PERMISSIONS.OTHER_U, + 'other_m': image.PERMISSIONS.OTHER_M, + 'other_a': image.PERMISSIONS.OTHER_A + }, + 'type': image.TYPE, + 'disk_type': image.DISK_TYPE, + 'persistent': image.PERSISTENT, + 'regtime': image.REGTIME, + 'source': image.SOURCE, + 'path': image.PATH, + 'fstype': getattr(image, 'FSTYPE', 'Null'), + 'size': image.SIZE, + 'cloning_ops': image.CLONING_OPS, + 'cloning_id': image.CLONING_ID, + 'target_snapshot': image.TARGET_SNAPSHOT, + 'datastore_id': image.DATASTORE_ID, + 'datastore': image.DATASTORE, + 'vms': self.get_image_list_id(image, 'VMS'), + 'clones': self.get_image_list_id(image, 'CLONES'), + 'app_clones': self.get_image_list_id(image, 'APP_CLONES'), + 'snapshots': self.get_image_snapshots_list(image), + 'template': image.TEMPLATE, + } + return info diff --git a/plugins/module_utils/oracle/oci_utils.py b/plugins/module_utils/oracle/oci_utils.py index 88e577af5c..0910d24cae 100644 --- a/plugins/module_utils/oracle/oci_utils.py +++ b/plugins/module_utils/oracle/oci_utils.py @@ -1,21 +1,27 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017, 2018, 2019 Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This module utils is deprecated and will be removed in community.general 13.0.0 +# import logging import logging.config import os import tempfile -from datetime import datetime +# (TODO: remove next line!) +from datetime import datetime # noqa: F401, pylint: disable=unused-import from operator import eq import time try: - import yaml + import yaml # noqa: F401, pylint: disable=unused-import import oci from oci.constants import HEADER_NEXT_PAGE @@ -40,7 +46,6 @@ except ImportError: from ansible.module_utils.common.text.converters import to_bytes -from ansible.module_utils.six import iteritems __version__ = "1.6.0-dev" @@ -432,7 +437,7 @@ def check_and_update_attributes( target_instance, attr_name, input_value, existing_value, changed ): """ - This function checks the difference between two resource attributes of literal types and sets the attrbute + This function checks the difference between two resource attributes of literal types and sets the attribute value in the target instance type holding the attribute. :param target_instance: The instance which contains the attribute whose values to be compared :param attr_name: Name of the attribute whose value required to be compared @@ -559,7 +564,7 @@ def are_lists_equal(s, t): if s is None and t is None: return True - if (s is None and len(t) >= 0) or (t is None and len(s) >= 0) or (len(s) != len(t)): + if s is None or t is None or (len(s) != len(t)): return False if len(s) == 0: @@ -568,7 +573,7 @@ def are_lists_equal(s, t): s = to_dict(s) t = to_dict(t) - if type(s[0]) == dict: + if isinstance(s[0], dict): # Handle list of dicts. Dictionary returned by the API may have additional keys. For example, a get call on # service gateway has an attribute `services` which is a list of `ServiceIdResponseDetails`. This has a key # `service_name` which is not provided in the list of `services` by a user while making an update call; only @@ -602,9 +607,9 @@ def get_attr_to_update(get_fn, kwargs_get, module, update_attributes): user_provided_attr_value = module.params.get(attr, None) unequal_list_attr = ( - type(resources_attr_value) == list or type(user_provided_attr_value) == list + isinstance(resources_attr_value, list) or isinstance(user_provided_attr_value, list) ) and not are_lists_equal(user_provided_attr_value, resources_attr_value) - unequal_attr = type(resources_attr_value) != list and to_dict( + unequal_attr = not isinstance(resources_attr_value, list) and to_dict( resources_attr_value ) != to_dict(user_provided_attr_value) if unequal_list_attr or unequal_attr: @@ -691,7 +696,7 @@ def check_and_create_resource( :param model: Model used to create a resource. :param exclude_attributes: The attributes which should not be used to distinguish the resource. e.g. display_name, dns_label. - :param dead_states: List of states which can't transition to any of the usable states of the resource. This deafults + :param dead_states: List of states which can't transition to any of the usable states of the resource. This defaults to ["TERMINATING", "TERMINATED", "FAULTY", "FAILED", "DELETING", "DELETED", "UNKNOWN_ENUM_VALUE"] :param default_attribute_values: A dictionary containing default values for attributes. :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True} @@ -783,7 +788,7 @@ def _get_attributes_to_consider(exclude_attributes, model, module): attributes_to_consider = list(model.attribute_map) if "freeform_tags" in attributes_to_consider: attributes_to_consider.remove("freeform_tags") - # Temporarily removing node_count as the exisiting resource does not reflect it + # Temporarily removing node_count as the existing resource does not reflect it if "node_count" in attributes_to_consider: attributes_to_consider.remove("node_count") _debug("attributes to consider: {0}".format(attributes_to_consider)) @@ -814,7 +819,7 @@ def is_attr_assigned_default(default_attribute_values, attr, assigned_value): # this is to ensure forward compatibility when the API returns new keys that are not known during # the time when the module author provided default values for the attribute keys = {} - for k, v in iteritems(assigned_value.items()): + for k, v in assigned_value.items().items(): if k in default_val_for_attr: keys[k] = v @@ -934,9 +939,9 @@ def tuplize(d): list_of_tuples = [] key_list = sorted(list(d.keys())) for key in key_list: - if type(d[key]) == list: + if isinstance(d[key], list): # Convert a value which is itself a list of dict to a list of tuples. - if d[key] and type(d[key][0]) == dict: + if d[key] and isinstance(d[key][0], dict): sub_tuples = [] for sub_dict in d[key]: sub_tuples.append(tuplize(sub_dict)) @@ -946,7 +951,7 @@ def tuplize(d): list_of_tuples.append((sub_tuples is None, key, sub_tuples)) else: list_of_tuples.append((d[key] is None, key, d[key])) - elif type(d[key]) == dict: + elif isinstance(d[key], dict): tupled_value = tuplize(d[key]) list_of_tuples.append((tupled_value is None, key, tupled_value)) else: @@ -967,13 +972,13 @@ def sort_dictionary(d): """ sorted_d = {} for key in d: - if type(d[key]) == list: - if d[key] and type(d[key][0]) == dict: + if isinstance(d[key], list): + if d[key] and isinstance(d[key][0], dict): sorted_value = sort_list_of_dictionary(d[key]) sorted_d[key] = sorted_value else: sorted_d[key] = sorted(d[key]) - elif type(d[key]) == dict: + elif isinstance(d[key], dict): sorted_d[key] = sort_dictionary(d[key]) else: sorted_d[key] = d[key] @@ -1024,10 +1029,7 @@ def check_if_user_value_matches_resources_attr( return if ( - resources_value_for_attr is None - and len(user_provided_value_for_attr) >= 0 - or user_provided_value_for_attr is None - and len(resources_value_for_attr) >= 0 + resources_value_for_attr is None or user_provided_value_for_attr is None ): res[0] = False return @@ -1042,7 +1044,7 @@ def check_if_user_value_matches_resources_attr( if ( user_provided_value_for_attr - and type(user_provided_value_for_attr[0]) == dict + and isinstance(user_provided_value_for_attr[0], dict) ): # Process a list of dict sorted_user_provided_value_for_attr = sort_list_of_dictionary( @@ -1189,7 +1191,7 @@ def are_dicts_equal( def should_dict_attr_be_excluded(map_option_name, option_key, exclude_list): - """An entry for the Exclude list for excluding a map's key is specifed as a dict with the map option name as the + """An entry for the Exclude list for excluding a map's key is specified as a dict with the map option name as the key, and the value as a list of keys to be excluded within that map. For example, if the keys "k1" and "k2" of a map option named "m1" needs to be excluded, the exclude list must have an entry {'m1': ['k1','k2']} """ for exclude_item in exclude_list: @@ -1530,7 +1532,7 @@ def delete_and_wait( result[resource_type] = resource return result # oci.wait_until() returns an instance of oci.util.Sentinel in case the resource is not found. - if type(wait_response) is not Sentinel: + if not isinstance(wait_response, Sentinel): resource = to_dict(wait_response.data) else: resource["lifecycle_state"] = "DELETED" @@ -1545,7 +1547,7 @@ def delete_and_wait( except ServiceError as ex: # DNS API throws a 400 InvalidParameter when a zone id is provided for zone_name_or_id and if the zone # resource is not available, instead of the expected 404. So working around this for now. - if type(client) == oci.dns.DnsClient: + if isinstance(client, oci.dns.DnsClient): if ex.status == 400 and ex.code == "InvalidParameter": _debug( "Resource {0} with {1} already deleted. So returning changed=False".format( @@ -1772,7 +1774,7 @@ def update_class_type_attr_difference( ): """ Checks the difference and updates an attribute which is represented by a class - instance. Not aplicable if the attribute type is a primitive value. + instance. Not applicable if the attribute type is a primitive value. For example, if a class name is A with an attribute x, then if A.x = X(), then only this method works. :param update_class_details The instance which should be updated if there is change in @@ -1934,7 +1936,7 @@ def get_target_resource_from_list( module, list_resource_fn, target_resource_id=None, **kwargs ): """ - Returns a resource filtered by identifer from a list of resources. This method should be + Returns a resource filtered by identifier from a list of resources. This method should be used as an alternative of 'get resource' method when 'get resource' is nor provided by resource api. This method returns a wrapper of response object but that should not be used as an input to 'wait_until' utility as this is only a partial wrapper of response object. diff --git a/plugins/module_utils/pacemaker.py b/plugins/module_utils/pacemaker.py new file mode 100644 index 0000000000..355fd55cc2 --- /dev/null +++ b/plugins/module_utils/pacemaker.py @@ -0,0 +1,79 @@ +# Copyright (c) 2025, Dexter Le +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +import re + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + +_state_map = { + "present": "create", + "absent": "remove", + "cloned": "clone", + "status": "status", + "enabled": "enable", + "disabled": "disable", + "online": "start", + "offline": "stop", + "maintenance": "set", + "config": "config", + "cleanup": "cleanup", +} + + +def fmt_resource_type(value): + return [":".join(value[k] for k in ['resource_standard', 'resource_provider', 'resource_name'] if value.get(k) is not None)] + + +def fmt_resource_operation(value): + cmd = [] + for op in value: + cmd.append("op") + cmd.append(op.get('operation_action')) + for operation_option in op.get('operation_option'): + cmd.append(operation_option) + + return cmd + + +def fmt_resource_argument(value): + return ['--group' if value['argument_action'] == 'group' else value['argument_action']] + value['argument_option'] + + +def get_pacemaker_maintenance_mode(runner): + with runner("cli_action config") as ctx: + rc, out, err = ctx.run(cli_action="property") + maint_mode_re = re.compile(r"maintenance-mode.*true", re.IGNORECASE) + maintenance_mode_output = [line for line in out.splitlines() if maint_mode_re.search(line)] + return bool(maintenance_mode_output) + + +def pacemaker_runner(module, **kwargs): + runner_command = ['pcs'] + runner = CmdRunner( + module, + command=runner_command, + arg_formats=dict( + cli_action=cmd_runner_fmt.as_list(), + state=cmd_runner_fmt.as_map(_state_map), + name=cmd_runner_fmt.as_list(), + resource_type=cmd_runner_fmt.as_func(fmt_resource_type), + resource_option=cmd_runner_fmt.as_list(), + resource_operation=cmd_runner_fmt.as_func(fmt_resource_operation), + resource_meta=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("meta"), + resource_argument=cmd_runner_fmt.as_func(fmt_resource_argument), + resource_clone_ids=cmd_runner_fmt.as_list(), + resource_clone_meta=cmd_runner_fmt.as_list(), + apply_all=cmd_runner_fmt.as_bool("--all"), + agent_validation=cmd_runner_fmt.as_bool("--agent-validation"), + wait=cmd_runner_fmt.as_opt_eq_val("--wait"), + config=cmd_runner_fmt.as_fixed("config"), + force=cmd_runner_fmt.as_bool("--force"), + version=cmd_runner_fmt.as_fixed("--version"), + output_format=cmd_runner_fmt.as_opt_eq_val("--output-format"), + ), + **kwargs + ) + return runner diff --git a/plugins/module_utils/pipx.py b/plugins/module_utils/pipx.py new file mode 100644 index 0000000000..3d81a6c5f2 --- /dev/null +++ b/plugins/module_utils/pipx.py @@ -0,0 +1,119 @@ +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +import json + + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +pipx_common_argspec = { + "global": dict(type='bool', default=False), + "executable": dict(type='path'), +} + + +_state_map = dict( + install='install', + install_all='install-all', + present='install', + uninstall='uninstall', + absent='uninstall', + uninstall_all='uninstall-all', + inject='inject', + uninject='uninject', + upgrade='upgrade', + upgrade_shared='upgrade-shared', + upgrade_all='upgrade-all', + reinstall='reinstall', + reinstall_all='reinstall-all', + pin='pin', + unpin='unpin', +) + + +def pipx_runner(module, command, **kwargs): + arg_formats = dict( + state=cmd_runner_fmt.as_map(_state_map), + name=cmd_runner_fmt.as_list(), + name_source=cmd_runner_fmt.as_func(cmd_runner_fmt.unpack_args(lambda n, s: [s] if s else [n])), + install_apps=cmd_runner_fmt.as_bool("--include-apps"), + install_deps=cmd_runner_fmt.as_bool("--include-deps"), + inject_packages=cmd_runner_fmt.as_list(), + force=cmd_runner_fmt.as_bool("--force"), + include_injected=cmd_runner_fmt.as_bool("--include-injected"), + index_url=cmd_runner_fmt.as_opt_val('--index-url'), + python=cmd_runner_fmt.as_opt_val('--python'), + system_site_packages=cmd_runner_fmt.as_bool("--system-site-packages"), + _list=cmd_runner_fmt.as_fixed(['list', '--include-injected', '--json']), + editable=cmd_runner_fmt.as_bool("--editable"), + pip_args=cmd_runner_fmt.as_opt_eq_val('--pip-args'), + suffix=cmd_runner_fmt.as_opt_val('--suffix'), + spec_metadata=cmd_runner_fmt.as_list(), + version=cmd_runner_fmt.as_fixed('--version'), + ) + arg_formats["global"] = cmd_runner_fmt.as_bool("--global") + + runner = CmdRunner( + module, + command=command, + arg_formats=arg_formats, + environ_update={'USE_EMOJI': '0', 'PIPX_USE_EMOJI': '0'}, + check_rc=True, + **kwargs + ) + return runner + + +def _make_entry(venv_name, venv, include_injected, include_deps): + entry = { + 'name': venv_name, + 'version': venv['metadata']['main_package']['package_version'], + 'pinned': venv['metadata']['main_package'].get('pinned'), + } + if include_injected: + entry['injected'] = {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()} + if include_deps: + entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies']) + return entry + + +def make_process_dict(include_injected, include_deps=False): + def process_dict(rc, out, err): + if not out: + return {} + + results = {} + raw_data = json.loads(out) + for venv_name, venv in raw_data['venvs'].items(): + results[venv_name] = _make_entry(venv_name, venv, include_injected, include_deps) + + return results, raw_data + + return process_dict + + +def make_process_list(mod_helper, **kwargs): + # + # ATTENTION! + # + # The function `make_process_list()` is deprecated and will be removed in community.general 13.0.0 + # + process_dict = make_process_dict(mod_helper, **kwargs) + + def process_list(rc, out, err): + res_dict, raw_data = process_dict(rc, out, err) + + if kwargs.get("include_raw"): + mod_helper.vars.raw_output = raw_data + + return [ + entry + for name, entry in res_dict.items() + if name == kwargs.get("name") + ] + return process_list diff --git a/plugins/module_utils/pkg_req.py b/plugins/module_utils/pkg_req.py new file mode 100644 index 0000000000..13c824440f --- /dev/null +++ b/plugins/module_utils/pkg_req.py @@ -0,0 +1,71 @@ +# Copyright (c) 2025, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils import deps + + +with deps.declare("packaging"): + from packaging.requirements import Requirement + from packaging.version import parse as parse_version, InvalidVersion + + +class PackageRequirement: + def __init__(self, module, name): + self.module = module + self.parsed_name, self.requirement = self._parse_spec(name) + + def _parse_spec(self, name): + """ + Parse a package name that may include version specifiers using PEP 508. + Returns a tuple of (name, requirement) where requirement is of type packaging.requirements.Requirement and it may be None. + + Example inputs: + "package" + "package>=1.0" + "package>=1.0,<2.0" + "package[extra]>=1.0" + "package[foo,bar]>=1.0,!=1.5" + + :param name: Package name with optional version specifiers and extras + :return: Tuple of (name, requirement) + :raises ValueError: If the package specification is invalid + """ + if not name: + return name, None + + # Quick check for simple package names + if not any(c in name for c in '>= -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import atexit -import time -import re -import traceback - -PROXMOXER_IMP_ERR = None -try: - from proxmoxer import ProxmoxAPI - HAS_PROXMOXER = True -except ImportError: - HAS_PROXMOXER = False - PROXMOXER_IMP_ERR = traceback.format_exc() - - -from ansible.module_utils.basic import env_fallback, missing_required_lib -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - - -def proxmox_auth_argument_spec(): - return dict( - api_host=dict(type='str', - required=True, - fallback=(env_fallback, ['PROXMOX_HOST']) - ), - api_user=dict(type='str', - required=True, - fallback=(env_fallback, ['PROXMOX_USER']) - ), - api_password=dict(type='str', - no_log=True, - fallback=(env_fallback, ['PROXMOX_PASSWORD']) - ), - api_token_id=dict(type='str', - no_log=False - ), - api_token_secret=dict(type='str', - no_log=True - ), - validate_certs=dict(type='bool', - default=False - ), - ) - - -def proxmox_to_ansible_bool(value): - '''Convert Proxmox representation of a boolean to be ansible-friendly''' - return True if value == 1 else False - - -def ansible_to_proxmox_bool(value): - '''Convert Ansible representation of a boolean to be proxmox-friendly''' - if value is None: - return None - - if not isinstance(value, bool): - raise ValueError("%s must be of type bool not %s" % (value, type(value))) - - return 1 if value else 0 - - -class ProxmoxAnsible(object): - """Base class for Proxmox modules""" - def __init__(self, module): - if not HAS_PROXMOXER: - module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR) - - self.module = module - self.proxmox_api = self._connect() - # Test token validity - try: - self.proxmox_api.version.get() - except Exception as e: - module.fail_json(msg='%s' % e, exception=traceback.format_exc()) - - def _connect(self): - api_host = self.module.params['api_host'] - api_user = self.module.params['api_user'] - api_password = self.module.params['api_password'] - api_token_id = self.module.params['api_token_id'] - api_token_secret = self.module.params['api_token_secret'] - validate_certs = self.module.params['validate_certs'] - - auth_args = {'user': api_user} - if api_password: - auth_args['password'] = api_password - else: - auth_args['token_name'] = api_token_id - auth_args['token_value'] = api_token_secret - - try: - return ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args) - except Exception as e: - self.module.fail_json(msg='%s' % e, exception=traceback.format_exc()) - - def version(self): - apireturn = self.proxmox_api.version.get() - return LooseVersion(apireturn['version']) - - def get_node(self, node): - nodes = [n for n in self.proxmox_api.nodes.get() if n['node'] == node] - return nodes[0] if nodes else None - - def get_nextvmid(self): - vmid = self.proxmox_api.cluster.nextid.get() - return vmid - - def get_vmid(self, name, ignore_missing=False, choose_first_if_multiple=False): - vms = [vm['vmid'] for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm.get('name') == name] - - if not vms: - if ignore_missing: - return None - - self.module.fail_json(msg='No VM with name %s found' % name) - elif len(vms) > 1: - self.module.fail_json(msg='Multiple VMs with name %s found, provide vmid instead' % name) - - return vms[0] - - def get_vm(self, vmid, ignore_missing=False): - vms = [vm for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)] - - if vms: - return vms[0] - else: - if ignore_missing: - return None - - self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid) diff --git a/plugins/module_utils/puppet.py b/plugins/module_utils/puppet.py new file mode 100644 index 0000000000..3b093d8c9d --- /dev/null +++ b/plugins/module_utils/puppet.py @@ -0,0 +1,108 @@ +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +import os + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +_PUPPET_PATH_PREFIX = ["/opt/puppetlabs/bin"] + + +def get_facter_dir(): + if os.getuid() == 0: + return '/etc/facter/facts.d' + else: + return os.path.expanduser('~/.facter/facts.d') + + +def _puppet_cmd(module): + return module.get_bin_path("puppet", False, _PUPPET_PATH_PREFIX) + + +# If the `timeout` CLI command feature is removed, +# Then we could add this as a fixed param to `puppet_runner` +def ensure_agent_enabled(module): + runner = CmdRunner( + module, + command="puppet", + path_prefix=_PUPPET_PATH_PREFIX, + arg_formats=dict( + _agent_disabled=cmd_runner_fmt.as_fixed(['config', 'print', 'agent_disabled_lockfile']), + ), + check_rc=False, + ) + + rc, stdout, stderr = runner("_agent_disabled").run() + if os.path.exists(stdout.strip()): + module.fail_json( + msg="Puppet agent is administratively disabled.", + disabled=True) + elif rc != 0: + module.fail_json( + msg="Puppet agent state could not be determined.") + + +def puppet_runner(module): + + # Keeping backward compatibility, allow for running with the `timeout` CLI command. + # If this can be replaced with ansible `timeout` parameter in playbook, + # then this function could be removed. + def _prepare_base_cmd(): + _tout_cmd = module.get_bin_path("timeout", False) + if _tout_cmd: + cmd = ["timeout", "-s", "9", module.params["timeout"], _puppet_cmd(module)] + else: + cmd = ["puppet"] + return cmd + + def noop_func(v): + return ["--noop"] if module.check_mode or v else ["--no-noop"] + + _logdest_map = { + "syslog": ["--logdest", "syslog"], + "all": ["--logdest", "syslog", "--logdest", "console"], + } + + @cmd_runner_fmt.unpack_args + def execute_func(execute, manifest): + if execute: + return ["--execute", execute] + else: + return [manifest] + + runner = CmdRunner( + module, + command=_prepare_base_cmd(), + path_prefix=_PUPPET_PATH_PREFIX, + arg_formats=dict( + _agent_fixed=cmd_runner_fmt.as_fixed([ + "agent", "--onetime", "--no-daemonize", "--no-usecacheonfailure", + "--no-splay", "--detailed-exitcodes", "--verbose", "--color", "0", + ]), + _apply_fixed=cmd_runner_fmt.as_fixed(["apply", "--detailed-exitcodes"]), + puppetmaster=cmd_runner_fmt.as_opt_val("--server"), + show_diff=cmd_runner_fmt.as_bool("--show-diff"), + confdir=cmd_runner_fmt.as_opt_val("--confdir"), + environment=cmd_runner_fmt.as_opt_val("--environment"), + tags=cmd_runner_fmt.as_func(lambda v: ["--tags", ",".join(v)]), + skip_tags=cmd_runner_fmt.as_func(lambda v: ["--skip_tags", ",".join(v)]), + certname=cmd_runner_fmt.as_opt_eq_val("--certname"), + noop=cmd_runner_fmt.as_func(noop_func), + use_srv_records=cmd_runner_fmt.as_bool("--usr_srv_records", "--no-usr_srv_records", ignore_none=True), + logdest=cmd_runner_fmt.as_map(_logdest_map, default=[]), + modulepath=cmd_runner_fmt.as_opt_eq_val("--modulepath"), + _execute=cmd_runner_fmt.as_func(execute_func), + summarize=cmd_runner_fmt.as_bool("--summarize"), + waitforlock=cmd_runner_fmt.as_opt_val("--waitforlock"), + debug=cmd_runner_fmt.as_bool("--debug"), + verbose=cmd_runner_fmt.as_bool("--verbose"), + ), + check_rc=False, + force_lang=module.params["environment_lang"], + ) + return runner diff --git a/plugins/module_utils/pure.py b/plugins/module_utils/pure.py deleted file mode 100644 index ebd41b1ce5..0000000000 --- a/plugins/module_utils/pure.py +++ /dev/null @@ -1,112 +0,0 @@ -# -*- coding: utf-8 -*- - -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Copyright (c), Simon Dodsley ,2017 -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -HAS_PURESTORAGE = True -try: - from purestorage import purestorage -except ImportError: - HAS_PURESTORAGE = False - -HAS_PURITY_FB = True -try: - from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix, rest -except ImportError: - HAS_PURITY_FB = False - -from functools import wraps -from os import environ -from os import path -import platform - -VERSION = 1.2 -USER_AGENT_BASE = 'Ansible' -API_AGENT_VERSION = 1.5 - - -def get_system(module): - """Return System Object or Fail""" - user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % { - 'base': USER_AGENT_BASE, - 'class': __name__, - 'version': VERSION, - 'platform': platform.platform() - } - array_name = module.params['fa_url'] - api = module.params['api_token'] - - if array_name and api: - system = purestorage.FlashArray(array_name, api_token=api, user_agent=user_agent) - elif environ.get('PUREFA_URL') and environ.get('PUREFA_API'): - system = purestorage.FlashArray(environ.get('PUREFA_URL'), api_token=(environ.get('PUREFA_API')), user_agent=user_agent) - else: - module.fail_json(msg="You must set PUREFA_URL and PUREFA_API environment variables or the fa_url and api_token module arguments") - try: - system.get() - except Exception: - module.fail_json(msg="Pure Storage FlashArray authentication failed. Check your credentials") - return system - - -def get_blade(module): - """Return System Object or Fail""" - user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % { - 'base': USER_AGENT_BASE, - 'class': __name__, - 'version': VERSION, - 'platform': platform.platform() - } - blade_name = module.params['fb_url'] - api = module.params['api_token'] - - if blade_name and api: - blade = PurityFb(blade_name) - blade.disable_verify_ssl() - try: - blade.login(api) - versions = blade.api_version.list_versions().versions - if API_AGENT_VERSION in versions: - blade._api_client.user_agent = user_agent - except rest.ApiException as e: - module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials") - elif environ.get('PUREFB_URL') and environ.get('PUREFB_API'): - blade = PurityFb(environ.get('PUREFB_URL')) - blade.disable_verify_ssl() - try: - blade.login(environ.get('PUREFB_API')) - versions = blade.api_version.list_versions().versions - if API_AGENT_VERSION in versions: - blade._api_client.user_agent = user_agent - except rest.ApiException as e: - module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials") - else: - module.fail_json(msg="You must set PUREFB_URL and PUREFB_API environment variables or the fb_url and api_token module arguments") - return blade - - -def purefa_argument_spec(): - """Return standard base dictionary used for the argument_spec argument in AnsibleModule""" - - return dict( - fa_url=dict(), - api_token=dict(no_log=True), - ) - - -def purefb_argument_spec(): - """Return standard base dictionary used for the argument_spec argument in AnsibleModule""" - - return dict( - fb_url=dict(), - api_token=dict(no_log=True), - ) diff --git a/plugins/module_utils/python_runner.py b/plugins/module_utils/python_runner.py new file mode 100644 index 0000000000..7d9b94f50e --- /dev/null +++ b/plugins/module_utils/python_runner.py @@ -0,0 +1,34 @@ +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +import os + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, _ensure_list + + +class PythonRunner(CmdRunner): + def __init__(self, module, command, arg_formats=None, default_args_order=(), + check_rc=False, force_lang="C", path_prefix=None, environ_update=None, + python="python", venv=None): + self.python = python + self.venv = venv + self.has_venv = venv is not None + + if os.path.isabs(python) or '/' in python: + self.python = python + elif self.has_venv: + if path_prefix is None: + path_prefix = [] + path_prefix.append(os.path.join(venv, "bin")) + if environ_update is None: + environ_update = {} + environ_update["PATH"] = f"{':'.join(path_prefix)}:{os.environ['PATH']}" + environ_update["VIRTUAL_ENV"] = venv + + python_cmd = [self.python] + _ensure_list(command) + + super(PythonRunner, self).__init__(module, python_cmd, arg_formats, default_args_order, + check_rc, force_lang, path_prefix, environ_update) diff --git a/plugins/module_utils/rax.py b/plugins/module_utils/rax.py deleted file mode 100644 index 84effee97c..0000000000 --- a/plugins/module_utils/rax.py +++ /dev/null @@ -1,316 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by -# Ansible still belong to the author of the module, and may assign their own -# license to the complete work. -# -# Copyright (c), Michael DeHaan , 2012-2013 -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -import os -import re -from uuid import UUID - -from ansible.module_utils.six import text_type, binary_type - -FINAL_STATUSES = ('ACTIVE', 'ERROR') -VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use', - 'error', 'error_deleting') - -CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN', - 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN'] -CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS', - 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP', - 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP'] - -NON_CALLABLES = (text_type, binary_type, bool, dict, int, list, type(None)) -PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000" -SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111" - - -def rax_slugify(value): - """Prepend a key with rax_ and normalize the key name""" - return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) - - -def rax_clb_node_to_dict(obj): - """Function to convert a CLB Node object to a dict""" - if not obj: - return {} - node = obj.to_dict() - node['id'] = obj.id - node['weight'] = obj.weight - return node - - -def rax_to_dict(obj, obj_type='standard'): - """Generic function to convert a pyrax object to a dict - - obj_type values: - standard - clb - server - - """ - instance = {} - for key in dir(obj): - value = getattr(obj, key) - if obj_type == 'clb' and key == 'nodes': - instance[key] = [] - for node in value: - instance[key].append(rax_clb_node_to_dict(node)) - elif (isinstance(value, list) and len(value) > 0 and - not isinstance(value[0], NON_CALLABLES)): - instance[key] = [] - for item in value: - instance[key].append(rax_to_dict(item)) - elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')): - if obj_type == 'server': - if key == 'image': - if not value: - instance['rax_boot_source'] = 'volume' - else: - instance['rax_boot_source'] = 'local' - key = rax_slugify(key) - instance[key] = value - - if obj_type == 'server': - for attr in ['id', 'accessIPv4', 'name', 'status']: - instance[attr] = instance.get(rax_slugify(attr)) - - return instance - - -def rax_find_bootable_volume(module, rax_module, server, exit=True): - """Find a servers bootable volume""" - cs = rax_module.cloudservers - cbs = rax_module.cloud_blockstorage - server_id = rax_module.utils.get_id(server) - volumes = cs.volumes.get_server_volumes(server_id) - bootable_volumes = [] - for volume in volumes: - vol = cbs.get(volume) - if module.boolean(vol.bootable): - bootable_volumes.append(vol) - if not bootable_volumes: - if exit: - module.fail_json(msg='No bootable volumes could be found for ' - 'server %s' % server_id) - else: - return False - elif len(bootable_volumes) > 1: - if exit: - module.fail_json(msg='Multiple bootable volumes found for server ' - '%s' % server_id) - else: - return False - - return bootable_volumes[0] - - -def rax_find_image(module, rax_module, image, exit=True): - """Find a server image by ID or Name""" - cs = rax_module.cloudservers - try: - UUID(image) - except ValueError: - try: - image = cs.images.find(human_id=image) - except(cs.exceptions.NotFound, - cs.exceptions.NoUniqueMatch): - try: - image = cs.images.find(name=image) - except (cs.exceptions.NotFound, - cs.exceptions.NoUniqueMatch): - if exit: - module.fail_json(msg='No matching image found (%s)' % - image) - else: - return False - - return rax_module.utils.get_id(image) - - -def rax_find_volume(module, rax_module, name): - """Find a Block storage volume by ID or name""" - cbs = rax_module.cloud_blockstorage - try: - UUID(name) - volume = cbs.get(name) - except ValueError: - try: - volume = cbs.find(name=name) - except rax_module.exc.NotFound: - volume = None - except Exception as e: - module.fail_json(msg='%s' % e) - return volume - - -def rax_find_network(module, rax_module, network): - """Find a cloud network by ID or name""" - cnw = rax_module.cloud_networks - try: - UUID(network) - except ValueError: - if network.lower() == 'public': - return cnw.get_server_networks(PUBLIC_NET_ID) - elif network.lower() == 'private': - return cnw.get_server_networks(SERVICE_NET_ID) - else: - try: - network_obj = cnw.find_network_by_label(network) - except (rax_module.exceptions.NetworkNotFound, - rax_module.exceptions.NetworkLabelNotUnique): - module.fail_json(msg='No matching network found (%s)' % - network) - else: - return cnw.get_server_networks(network_obj) - else: - return cnw.get_server_networks(network) - - -def rax_find_server(module, rax_module, server): - """Find a Cloud Server by ID or name""" - cs = rax_module.cloudservers - try: - UUID(server) - server = cs.servers.get(server) - except ValueError: - servers = cs.servers.list(search_opts=dict(name='^%s$' % server)) - if not servers: - module.fail_json(msg='No Server was matched by name, ' - 'try using the Server ID instead') - if len(servers) > 1: - module.fail_json(msg='Multiple servers matched by name, ' - 'try using the Server ID instead') - - # We made it this far, grab the first and hopefully only server - # in the list - server = servers[0] - return server - - -def rax_find_loadbalancer(module, rax_module, loadbalancer): - """Find a Cloud Load Balancer by ID or name""" - clb = rax_module.cloud_loadbalancers - try: - found = clb.get(loadbalancer) - except Exception: - found = [] - for lb in clb.list(): - if loadbalancer == lb.name: - found.append(lb) - - if not found: - module.fail_json(msg='No loadbalancer was matched') - - if len(found) > 1: - module.fail_json(msg='Multiple loadbalancers matched') - - # We made it this far, grab the first and hopefully only item - # in the list - found = found[0] - - return found - - -def rax_argument_spec(): - """Return standard base dictionary used for the argument_spec - argument in AnsibleModule - - """ - return dict( - api_key=dict(type='str', aliases=['password'], no_log=True), - auth_endpoint=dict(type='str'), - credentials=dict(type='path', aliases=['creds_file']), - env=dict(type='str'), - identity_type=dict(type='str', default='rackspace'), - region=dict(type='str'), - tenant_id=dict(type='str'), - tenant_name=dict(type='str'), - username=dict(type='str'), - validate_certs=dict(type='bool', aliases=['verify_ssl']), - ) - - -def rax_required_together(): - """Return the default list used for the required_together argument to - AnsibleModule""" - return [['api_key', 'username']] - - -def setup_rax_module(module, rax_module, region_required=True): - """Set up pyrax in a standard way for all modules""" - rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version, - rax_module.USER_AGENT) - - api_key = module.params.get('api_key') - auth_endpoint = module.params.get('auth_endpoint') - credentials = module.params.get('credentials') - env = module.params.get('env') - identity_type = module.params.get('identity_type') - region = module.params.get('region') - tenant_id = module.params.get('tenant_id') - tenant_name = module.params.get('tenant_name') - username = module.params.get('username') - verify_ssl = module.params.get('validate_certs') - - if env is not None: - rax_module.set_environment(env) - - rax_module.set_setting('identity_type', identity_type) - if verify_ssl is not None: - rax_module.set_setting('verify_ssl', verify_ssl) - if auth_endpoint is not None: - rax_module.set_setting('auth_endpoint', auth_endpoint) - if tenant_id is not None: - rax_module.set_setting('tenant_id', tenant_id) - if tenant_name is not None: - rax_module.set_setting('tenant_name', tenant_name) - - try: - username = username or os.environ.get('RAX_USERNAME') - if not username: - username = rax_module.get_setting('keyring_username') - if username: - api_key = 'USE_KEYRING' - if not api_key: - api_key = os.environ.get('RAX_API_KEY') - credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or - os.environ.get('RAX_CREDS_FILE')) - region = (region or os.environ.get('RAX_REGION') or - rax_module.get_setting('region')) - except KeyError as e: - module.fail_json(msg='Unable to load %s' % e.message) - - try: - if api_key and username: - if api_key == 'USE_KEYRING': - rax_module.keyring_auth(username, region=region) - else: - rax_module.set_credentials(username, api_key=api_key, - region=region) - elif credentials: - credentials = os.path.expanduser(credentials) - rax_module.set_credential_file(credentials, region=region) - else: - raise Exception('No credentials supplied!') - except Exception as e: - if e.message: - msg = str(e.message) - else: - msg = repr(e) - module.fail_json(msg=msg) - - if region_required and region not in rax_module.regions: - module.fail_json(msg='%s is not a valid region, must be one of: %s' % - (region, ','.join(rax_module.regions))) - - return rax_module diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 31750861f7..55332e46c6 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1,23 +1,29 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017-2018 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations +import http.client as http_client import json +import os +import random +import string +import time from ansible.module_utils.urls import open_url from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.six.moves import http_client -from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError -from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.common.text.converters import to_bytes +from urllib.error import URLError, HTTPError +from urllib.parse import urlparse GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'} POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', 'OData-Version': '4.0'} PATCH_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', 'OData-Version': '4.0'} +PUT_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', + 'OData-Version': '4.0'} DELETE_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'} FAIL_MSG = 'Issuing a data modification command without specifying the '\ @@ -25,20 +31,40 @@ FAIL_MSG = 'Issuing a data modification command without specifying the '\ 'than one %(resource)s is no longer allowed. Use the `resource_id` '\ 'option to specify the target %(resource)s ID.' +# Use together with the community.general.redfish docs fragment +REDFISH_COMMON_ARGUMENT_SPEC = { + "validate_certs": { + "type": "bool", + "default": False, + }, + "ca_path": { + "type": "path", + }, + "ciphers": { + "type": "list", + "elements": "str", + }, +} + class RedfishUtils(object): def __init__(self, creds, root_uri, timeout, module, resource_id=None, - data_modification=False, strip_etag_quotes=False): + data_modification=False, strip_etag_quotes=False, ciphers=None): self.root_uri = root_uri self.creds = creds self.timeout = timeout self.module = module self.service_root = '/redfish/v1/' + self.session_service_uri = '/redfish/v1/SessionService' + self.sessions_uri = '/redfish/v1/SessionService/Sessions' self.resource_id = resource_id self.data_modification = data_modification self.strip_etag_quotes = strip_etag_quotes - self._init_session() + self.ciphers = ciphers if ciphers is not None else module.params.get("ciphers") + self._vendor = None + self.validate_certs = module.params.get("validate_certs", False) + self.ca_path = module.params.get("ca_path") def _auth_params(self, headers): """ @@ -60,62 +86,211 @@ class RedfishUtils(object): force_basic_auth = True return username, password, force_basic_auth + def _check_request_payload(self, req_pyld, cur_pyld, uri): + """ + Checks the request payload with the values currently held by the + service. Will check if changes are needed and if properties are + supported by the service. + + :param req_pyld: dict containing the properties to apply + :param cur_pyld: dict containing the properties currently set + :param uri: string containing the URI being modified + :return: dict containing response information + """ + + change_required = False + for prop in req_pyld: + # Check if the property is supported by the service + if prop not in cur_pyld: + return {'ret': False, + 'changed': False, + 'msg': f'{uri} does not support the property {prop}', + 'changes_required': False} + + # Perform additional checks based on the type of property + if isinstance(req_pyld[prop], dict) and isinstance(cur_pyld[prop], dict): + # If the property is a dictionary, check the nested properties + sub_resp = self._check_request_payload(req_pyld[prop], cur_pyld[prop], uri) + if not sub_resp['ret']: + # Unsupported property or other error condition; no change + return sub_resp + if sub_resp['changes_required']: + # Subordinate dictionary requires changes + change_required = True + + else: + # For other properties, just compare the values + + # Note: This is also a fallthrough for cases where the request + # payload and current settings do not match in their data type. + # There are cases where this can be expected, such as when a + # property is always 'null' in responses, so we want to attempt + # the PATCH request. + + # Note: This is also a fallthrough for properties that are + # arrays of objects. Some services erroneously omit properties + # within arrays of objects when not configured, and it is + # expecting the client to provide them anyway. + + if req_pyld[prop] != cur_pyld[prop]: + change_required = True + + resp = {'ret': True, 'changes_required': change_required} + if not change_required: + # No changes required; all properties set + resp['changed'] = False + resp['msg'] = f'Properties in {uri} are already set' + return resp + + def _request(self, uri, **kwargs): + kwargs.setdefault("validate_certs", self.validate_certs) + kwargs.setdefault("follow_redirects", "all") + kwargs.setdefault("use_proxy", True) + kwargs.setdefault("timeout", self.timeout) + kwargs.setdefault("ciphers", self.ciphers) + kwargs.setdefault("ca_path", self.ca_path) + resp = open_url(uri, **kwargs) + headers = {k.lower(): v for (k, v) in resp.info().items()} + return resp, headers + # The following functions are to send GET/POST/PATCH/DELETE requests - def get_request(self, uri): + def get_request(self, uri, override_headers=None, allow_no_resp=False, timeout=None): req_headers = dict(GET_HEADERS) + if override_headers: + req_headers.update(override_headers) username, password, basic_auth = self._auth_params(req_headers) + if timeout is None: + timeout = self.timeout try: - resp = open_url(uri, method="GET", headers=req_headers, - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) - data = json.loads(to_native(resp.read())) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + # Service root is an unauthenticated resource; remove credentials + # in case the caller will be using sessions later. + if uri == (self.root_uri + self.service_root): + basic_auth = False + resp, headers = self._request( + uri, + method="GET", + headers=req_headers, + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + timeout=timeout, + ) + try: + data = json.loads(to_native(resp.read())) + except Exception as e: + # No response data; this is okay in certain cases + data = None + if not allow_no_resp: + raise except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, - 'msg': "HTTP Error %s on GET request to '%s', extended message: '%s'" - % (e.code, uri, msg), - 'status': e.code} + 'msg': f"HTTP Error {e.code} on GET request to '{uri}', extended message: '{msg}'", + 'status': e.code, 'data': data} except URLError as e: - return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'" - % (uri, e.reason)} + return {'ret': False, 'msg': f"URL Error on GET request to '{uri}': '{e.reason}'"} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, - 'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))} - return {'ret': True, 'data': data, 'headers': headers} + 'msg': f"Failed GET request to '{uri}': '{e}'"} + return {'ret': True, 'data': data, 'headers': headers, 'resp': resp} - def post_request(self, uri, pyld): + def post_request(self, uri, pyld, multipart=False): req_headers = dict(POST_HEADERS) username, password, basic_auth = self._auth_params(req_headers) try: - resp = open_url(uri, data=json.dumps(pyld), - headers=req_headers, method="POST", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + # When performing a POST to the session collection, credentials are + # provided in the request body. Do not provide the basic auth + # header since this can cause conflicts with some services + if self.sessions_uri is not None and uri == (self.root_uri + self.sessions_uri): + basic_auth = False + if multipart: + # Multipart requests require special handling to encode the request body + multipart_encoder = self._prepare_multipart(pyld) + data = multipart_encoder[0] + req_headers['content-type'] = multipart_encoder[1] + else: + data = json.dumps(pyld) + resp, headers = self._request( + uri, + data=data, + headers=req_headers, + method="POST", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) + try: + data = json.loads(to_native(resp.read())) + except Exception as e: + # No response data; this is okay in many cases + data = None except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, - 'msg': "HTTP Error %s on POST request to '%s', extended message: '%s'" - % (e.code, uri, msg), - 'status': e.code} + 'msg': f"HTTP Error {e.code} on POST request to '{uri}', extended message: '{msg}'", + 'status': e.code, 'data': data} except URLError as e: - return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'" - % (uri, e.reason)} + return {'ret': False, 'msg': f"URL Error on POST request to '{uri}': '{e.reason}'"} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, - 'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))} - return {'ret': True, 'headers': headers, 'resp': resp} + 'msg': f"Failed POST request to '{uri}': '{e}'"} + return {'ret': True, 'data': data, 'headers': headers, 'resp': resp} - def patch_request(self, uri, pyld): + def patch_request(self, uri, pyld, check_pyld=False): req_headers = dict(PATCH_HEADERS) r = self.get_request(uri) + if r['ret']: + # Get etag from etag header or @odata.etag property + etag = r['headers'].get('etag') + if not etag: + etag = r['data'].get('@odata.etag') + if etag: + if self.strip_etag_quotes: + etag = etag.strip('"') + req_headers['If-Match'] = etag + + if check_pyld: + # Check the payload with the current settings to see if changes + # are needed or if there are unsupported properties + if r['ret']: + check_resp = self._check_request_payload(pyld, r['data'], uri) + if not check_resp.pop('changes_required'): + check_resp['changed'] = False + return check_resp + else: + r['changed'] = False + return r + + username, password, basic_auth = self._auth_params(req_headers) + try: + resp, dummy = self._request( + uri, + data=json.dumps(pyld), + headers=req_headers, + method="PATCH", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) + except HTTPError as e: + msg, data = self._get_extended_message(e) + return {'ret': False, 'changed': False, + 'msg': f"HTTP Error {e.code} on PATCH request to '{uri}', extended message: '{msg}'", + 'status': e.code, 'data': data} + except URLError as e: + return {'ret': False, 'changed': False, + 'msg': f"URL Error on PATCH request to '{uri}': '{e.reason}'"} + # Almost all errors should be caught above, but just in case + except Exception as e: + return {'ret': False, 'changed': False, + 'msg': f"Failed PATCH request to '{uri}': '{e}'"} + return {'ret': True, 'changed': True, 'resp': resp, 'msg': f'Modified {uri}'} + + def put_request(self, uri, pyld): + req_headers = dict(PUT_HEADERS) + r = self.get_request(uri) if r['ret']: # Get etag from etag header or @odata.etag property etag = r['headers'].get('etag') @@ -127,25 +302,26 @@ class RedfishUtils(object): req_headers['If-Match'] = etag username, password, basic_auth = self._auth_params(req_headers) try: - resp = open_url(uri, data=json.dumps(pyld), - headers=req_headers, method="PATCH", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) + resp, dummy = self._request( + uri, + data=json.dumps(pyld), + headers=req_headers, + method="PUT", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, - 'msg': "HTTP Error %s on PATCH request to '%s', extended message: '%s'" - % (e.code, uri, msg), - 'status': e.code} + 'msg': f"HTTP Error {e.code} on PUT request to '{uri}', extended message: '{msg}'", + 'status': e.code, 'data': data} except URLError as e: - return {'ret': False, 'msg': "URL Error on PATCH request to '%s': '%s'" - % (uri, e.reason)} + return {'ret': False, 'msg': f"URL Error on PUT request to '{uri}': '{e.reason}'"} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, - 'msg': "Failed PATCH request to '%s': '%s'" % (uri, to_text(e))} + 'msg': f"Failed PUT request to '{uri}': '{e}'"} return {'ret': True, 'resp': resp} def delete_request(self, uri, pyld=None): @@ -153,27 +329,81 @@ class RedfishUtils(object): username, password, basic_auth = self._auth_params(req_headers) try: data = json.dumps(pyld) if pyld else None - resp = open_url(uri, data=data, - headers=req_headers, method="DELETE", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) + resp, dummy = self._request( + uri, + data=data, + headers=req_headers, + method="DELETE", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, - 'msg': "HTTP Error %s on DELETE request to '%s', extended message: '%s'" - % (e.code, uri, msg), - 'status': e.code} + 'msg': f"HTTP Error {e.code} on DELETE request to '{uri}', extended message: '{msg}'", + 'status': e.code, 'data': data} except URLError as e: - return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'" - % (uri, e.reason)} + return {'ret': False, 'msg': f"URL Error on DELETE request to '{uri}': '{e.reason}'"} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, - 'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))} + 'msg': f"Failed DELETE request to '{uri}': '{e}'"} return {'ret': True, 'resp': resp} + @staticmethod + def _prepare_multipart(fields): + """Prepares a multipart body based on a set of fields provided. + + Ideally it would have been good to use the existing 'prepare_multipart' + found in ansible.module_utils.urls, but it takes files and encodes them + as Base64 strings, which is not expected by Redfish services. It also + adds escaping of certain bytes in the payload, such as inserting '\r' + any time it finds a standalone '\n', which corrupts the image payload + send to the service. This implementation is simplified to Redfish's + usage and doesn't necessarily represent an exhaustive method of + building multipart requests. + """ + + def write_buffer(body, line): + # Adds to the multipart body based on the provided data type + # At this time there is only support for strings, dictionaries, and bytes (default) + if isinstance(line, str): + body.append(to_bytes(line, encoding='utf-8')) + elif isinstance(line, dict): + body.append(to_bytes(json.dumps(line), encoding='utf-8')) + else: + body.append(line) + return + + # Generate a random boundary marker; may need to consider probing the + # payload for potential conflicts in the future + boundary = ''.join(random.choice(string.digits + string.ascii_letters) for i in range(30)) + body = [] + for form in fields: + # Fill in the form details + write_buffer(body, f"--{boundary}") + + # Insert the headers (Content-Disposition and Content-Type) + if 'filename' in fields[form]: + name = os.path.basename(fields[form]['filename']).replace('"', '\\"') + write_buffer(body, f'Content-Disposition: form-data; name="{to_text(form)}"; filename="{to_text(name)}"') + else: + write_buffer(body, f'Content-Disposition: form-data; name="{form}"') + write_buffer(body, f"Content-Type: {fields[form]['mime_type']}") + write_buffer(body, '') + + # Insert the payload; read from the file if not given by the caller + if 'content' not in fields[form]: + with open(to_bytes(fields[form]['filename'], errors='surrogate_or_strict'), 'rb') as f: + fields[form]['content'] = f.read() + write_buffer(body, fields[form]['content']) + + # Finalize the entire request + write_buffer(body, f"--{boundary}--") + write_buffer(body, '') + return (b'\r\n'.join(body), f"multipart/form-data; boundary={boundary}") + @staticmethod def _get_extended_message(error): """ @@ -181,8 +411,10 @@ class RedfishUtils(object): :param error: an HTTPError exception :type error: HTTPError :return: the ExtendedInfo message if present, else standard HTTP error + :return: the JSON data of the response if present """ msg = http_client.responses.get(error.code, '') + data = None if error.code >= 400: try: body = error.read().decode('utf-8') @@ -196,10 +428,36 @@ class RedfishUtils(object): msg = str(data['error']['@Message.ExtendedInfo']) except Exception: pass - return msg + return msg, data - def _init_session(self): - pass + def _get_vendor(self): + # If we got the vendor info once, don't get it again + if self._vendor is not None: + return {'ret': 'True', 'Vendor': self._vendor} + + # Find the vendor info from the service root + response = self.get_request(self.root_uri + self.service_root) + if response['ret'] is False: + return {'ret': False, 'Vendor': ''} + data = response['data'] + + if 'Vendor' in data: + # Extract the vendor string from the Vendor property + self._vendor = data["Vendor"] + return {'ret': True, 'Vendor': data["Vendor"]} + elif 'Oem' in data and len(data['Oem']) > 0: + # Determine the vendor from the OEM object if needed + vendor = list(data['Oem'].keys())[0] + if vendor == 'Hpe' or vendor == 'Hp': + # HPE uses Pascal-casing for their OEM object + # Older systems reported 'Hp' (pre-split) + vendor = 'HPE' + self._vendor = vendor + return {'ret': True, 'Vendor': vendor} + else: + # Could not determine; use an empty string + self._vendor = '' + return {'ret': True, 'Vendor': ''} def _find_accountservice_resource(self): response = self.get_request(self.root_uri + self.service_root) @@ -221,22 +479,23 @@ class RedfishUtils(object): return {'ret': True} def _find_sessionservice_resource(self): + # Get the service root response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] - if 'SessionService' not in data: + + # Check for the session service and session collection. Well-known + # defaults are provided in the constructor, but services that predate + # Redfish 1.6.0 might contain different values. + self.session_service_uri = data.get('SessionService', {}).get('@odata.id') + self.sessions_uri = data.get('Links', {}).get('Sessions', {}).get('@odata.id') + + # If one isn't found, return an error + if self.session_service_uri is None: return {'ret': False, 'msg': "SessionService resource not found"} - else: - session_service = data["SessionService"]["@odata.id"] - response = self.get_request(self.root_uri + session_service) - if response['ret'] is False: - return response - data = response['data'] - sessions = data['Sessions']['@odata.id'] - if sessions[-1:] == '/': - sessions = sessions[:-1] - self.sessions_uri = sessions + if self.sessions_uri is None: + return {'ret': False, 'msg': "SessionCollection resource not found"} return {'ret': True} def _get_resource_uri_by_id(self, uris, id_prop): @@ -273,7 +532,7 @@ class RedfishUtils(object): if not self.systems_uri: return { 'ret': False, - 'msg': "System resource %s not found" % self.resource_id} + 'msg': f"System resource {self.resource_id} not found"} elif len(self.systems_uris) > 1: self.module.fail_json(msg=FAIL_MSG % {'resource': 'System'}) return {'ret': True} @@ -294,9 +553,9 @@ class RedfishUtils(object): data = response['data'] self.firmware_uri = self.software_uri = None if 'FirmwareInventory' in data: - self.firmware_uri = data['FirmwareInventory'][u'@odata.id'] + self.firmware_uri = data['FirmwareInventory']['@odata.id'] if 'SoftwareInventory' in data: - self.software_uri = data['SoftwareInventory'][u'@odata.id'] + self.software_uri = data['SoftwareInventory']['@odata.id'] return {'ret': True} def _find_chassis_resource(self): @@ -323,7 +582,7 @@ class RedfishUtils(object): if not self.chassis_uri: return { 'ret': False, - 'msg': "Chassis resource %s not found" % self.resource_id} + 'msg': f"Chassis resource {self.resource_id} not found"} elif len(self.chassis_uris) > 1: self.module.fail_json(msg=FAIL_MSG % {'resource': 'Chassis'}) return {'ret': True} @@ -352,7 +611,7 @@ class RedfishUtils(object): if not self.manager_uri: return { 'ret': False, - 'msg': "Manager resource %s not found" % self.resource_id} + 'msg': f"Manager resource {self.resource_id} not found"} elif len(self.manager_uris) > 1: self.module.fail_json(msg=FAIL_MSG % {'resource': 'Manager'}) return {'ret': True} @@ -370,12 +629,13 @@ class RedfishUtils(object): data = response['data'] if 'Parameters' in data: params = data['Parameters'] - ai = dict((p['Name'], p) - for p in params if 'Name' in p) + ai = {p['Name']: p for p in params if 'Name' in p} if not ai: - ai = dict((k[:-24], - {'AllowableValues': v}) for k, v in action.items() - if k.endswith('@Redfish.AllowableValues')) + ai = { + k[:-24]: {'AllowableValues': v} + for k, v in action.items() + if k.endswith('@Redfish.AllowableValues') + } return ai def _get_allowable_values(self, action, name, default_values=None): @@ -388,6 +648,24 @@ class RedfishUtils(object): allowable_values = default_values return allowable_values + def check_service_availability(self): + """ + Checks if the service is accessible. + + :return: dict containing the status of the service + """ + + # Get the service root + # Override the timeout since the service root is expected to be readily + # available. + service_root = self.get_request(self.root_uri + self.service_root, timeout=10) + if service_root['ret'] is False: + # Failed, either due to a timeout or HTTP error; not available + return {'ret': True, 'available': False} + + # Successfully accessed the service root; available + return {'ret': True, 'available': True} + def get_logs(self): log_svcs_uri_list = [] list_of_logs = [] @@ -409,12 +687,12 @@ class RedfishUtils(object): return response data = response['data'] for log_svcs_entry in data.get('Members', []): - response = self.get_request(self.root_uri + log_svcs_entry[u'@odata.id']) + response = self.get_request(self.root_uri + log_svcs_entry['@odata.id']) if response['ret'] is False: return response _data = response['data'] if 'Entries' in _data: - log_svcs_uri_list.append(_data['Entries'][u'@odata.id']) + log_svcs_uri_list.append(_data['Entries']['@odata.id']) # For each entry in LogServices, get log name and all log entries for log_svcs_uri in log_svcs_uri_list: @@ -434,7 +712,7 @@ class RedfishUtils(object): entry[prop] = logEntry.get(prop) if entry: list_of_log_entries.append(entry) - log_name = log_svcs_uri.split('/')[-1] + log_name = log_svcs_uri.rstrip('/').split('/')[-1] logs[log_name] = list_of_log_entries list_of_logs.append(logs) @@ -457,15 +735,15 @@ class RedfishUtils(object): return response data = response['data'] - for log_svcs_entry in data[u'Members']: + for log_svcs_entry in data['Members']: response = self.get_request(self.root_uri + log_svcs_entry["@odata.id"]) if response['ret'] is False: return response _data = response['data'] # Check to make sure option is available, otherwise error is ugly if "Actions" in _data: - if "#LogService.ClearLog" in _data[u"Actions"]: - self.post_request(self.root_uri + _data[u"Actions"]["#LogService.ClearLog"]["target"], {}) + if "#LogService.ClearLog" in _data["Actions"]: + self.post_request(self.root_uri + _data["Actions"]["#LogService.ClearLog"]["target"], {}) if response['ret'] is False: return response return {'ret': True} @@ -498,7 +776,8 @@ class RedfishUtils(object): properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers', 'Location', 'Manufacturer', 'Model', 'Name', 'Id', 'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status'] - key = "StorageControllers" + key = "Controllers" + deprecated_key = "StorageControllers" # Find Storage service response = self.get_request(self.root_uri + systems_uri) @@ -519,14 +798,37 @@ class RedfishUtils(object): # Loop through Members and their StorageControllers # and gather properties from each StorageController - if data[u'Members']: - for storage_member in data[u'Members']: - storage_member_uri = storage_member[u'@odata.id'] + if data['Members']: + for storage_member in data['Members']: + storage_member_uri = storage_member['@odata.id'] response = self.get_request(self.root_uri + storage_member_uri) data = response['data'] if key in data: - controller_list = data[key] + controllers_uri = data[key]['@odata.id'] + + response = self.get_request(self.root_uri + controllers_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if data['Members']: + for controller_member in data['Members']: + controller_member_uri = controller_member['@odata.id'] + response = self.get_request(self.root_uri + controller_member_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + controller_result = {} + for property in properties: + if property in data: + controller_result[property] = data[property] + controller_results.append(controller_result) + elif deprecated_key in data: + controller_list = data[deprecated_key] for controller in controller_list: controller_result = {} for property in properties: @@ -548,7 +850,7 @@ class RedfishUtils(object): properties = ['BlockSizeBytes', 'CapableSpeedGbs', 'CapacityBytes', 'EncryptionAbility', 'EncryptionStatus', 'FailurePredicted', 'HotspareType', 'Id', 'Identifiers', - 'Manufacturer', 'MediaType', 'Model', 'Name', + 'Links', 'Manufacturer', 'MediaType', 'Model', 'Name', 'PartNumber', 'PhysicalLocation', 'Protocol', 'Revision', 'RotationSpeedRPM', 'SerialNumber', 'Status'] @@ -564,16 +866,16 @@ class RedfishUtils(object): if 'Storage' in data: # Get a list of all storage controllers and build respective URIs - storage_uri = data[u'Storage'][u'@odata.id'] + storage_uri = data['Storage']['@odata.id'] response = self.get_request(self.root_uri + storage_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] - if data[u'Members']: - for controller in data[u'Members']: - controller_list.append(controller[u'@odata.id']) + if data['Members']: + for controller in data['Members']: + controller_list.append(controller['@odata.id']) for c in controller_list: uri = self.root_uri + c response = self.get_request(uri) @@ -581,28 +883,54 @@ class RedfishUtils(object): return response data = response['data'] controller_name = 'Controller 1' - if 'StorageControllers' in data: + storage_id = data['Id'] + if 'Controllers' in data: + controllers_uri = data['Controllers']['@odata.id'] + + response = self.get_request(self.root_uri + controllers_uri) + if response['ret'] is False: + return response + result['ret'] = True + cdata = response['data'] + + if cdata['Members']: + controller_member_uri = cdata['Members'][0]['@odata.id'] + + response = self.get_request(self.root_uri + controller_member_uri) + if response['ret'] is False: + return response + result['ret'] = True + cdata = response['data'] + controller_name = cdata['Name'] + elif 'StorageControllers' in data: sc = data['StorageControllers'] if sc: if 'Name' in sc[0]: controller_name = sc[0]['Name'] else: sc_id = sc[0].get('Id', '1') - controller_name = 'Controller %s' % sc_id + controller_name = f'Controller {sc_id}' drive_results = [] if 'Drives' in data: - for device in data[u'Drives']: - disk_uri = self.root_uri + device[u'@odata.id'] + for device in data['Drives']: + disk_uri = self.root_uri + device['@odata.id'] response = self.get_request(disk_uri) data = response['data'] drive_result = {} + drive_result['RedfishURI'] = data['@odata.id'] for property in properties: if property in data: if data[property] is not None: - drive_result[property] = data[property] + if property == "Links": + if "Volumes" in data["Links"].keys(): + volumes = [v["@odata.id"] for v in data["Links"]["Volumes"]] + drive_result["Volumes"] = volumes + else: + drive_result[property] = data[property] drive_results.append(drive_result) drives = {'Controller': controller_name, + 'StorageId': storage_id, 'Drives': drive_results} result["entries"].append(drives) @@ -615,8 +943,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for controller in data[u'Members']: - controller_list.append(controller[u'@odata.id']) + for controller in data['Members']: + controller_list.append(controller['@odata.id']) for c in controller_list: uri = self.root_uri + c @@ -628,9 +956,9 @@ class RedfishUtils(object): controller_name = data['Name'] else: sc_id = data.get('Id', '1') - controller_name = 'Controller %s' % sc_id + controller_name = f'Controller {sc_id}' drive_results = [] - for device in data[u'Devices']: + for device in data['Devices']: drive_result = {} for property in properties: if property in device: @@ -668,7 +996,7 @@ class RedfishUtils(object): if 'Storage' in data: # Get a list of all storage controllers and build respective URIs - storage_uri = data[u'Storage'][u'@odata.id'] + storage_uri = data['Storage']['@odata.id'] response = self.get_request(self.root_uri + storage_uri) if response['ret'] is False: return response @@ -676,33 +1004,52 @@ class RedfishUtils(object): data = response['data'] if data.get('Members'): - for controller in data[u'Members']: - controller_list.append(controller[u'@odata.id']) - for c in controller_list: + for controller in data['Members']: + controller_list.append(controller['@odata.id']) + for idx, c in enumerate(controller_list): uri = self.root_uri + c response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] - controller_name = 'Controller 1' - if 'StorageControllers' in data: + controller_name = f'Controller {idx}' + if 'Controllers' in data: + response = self.get_request(self.root_uri + data['Controllers']['@odata.id']) + if response['ret'] is False: + return response + c_data = response['data'] + + if c_data.get('Members') and c_data['Members']: + response = self.get_request(self.root_uri + c_data['Members'][0]['@odata.id']) + if response['ret'] is False: + return response + member_data = response['data'] + + if member_data: + if 'Name' in member_data: + controller_name = member_data['Name'] + else: + controller_id = member_data.get('Id', '1') + controller_name = f'Controller {controller_id}' + elif 'StorageControllers' in data: sc = data['StorageControllers'] if sc: if 'Name' in sc[0]: controller_name = sc[0]['Name'] else: sc_id = sc[0].get('Id', '1') - controller_name = 'Controller %s' % sc_id + controller_name = f'Controller {sc_id}' volume_results = [] + volume_list = [] if 'Volumes' in data: # Get a list of all volumes and build respective URIs - volumes_uri = data[u'Volumes'][u'@odata.id'] + volumes_uri = data['Volumes']['@odata.id'] response = self.get_request(self.root_uri + volumes_uri) data = response['data'] if data.get('Members'): - for volume in data[u'Members']: - volume_list.append(volume[u'@odata.id']) + for volume in data['Members']: + volume_list.append(volume['@odata.id']) for v in volume_list: uri = self.root_uri + v response = self.get_request(uri) @@ -719,10 +1066,10 @@ class RedfishUtils(object): # Get related Drives Id drive_id_list = [] if 'Links' in data: - if 'Drives' in data[u'Links']: - for link in data[u'Links'][u'Drives']: - drive_id_link = link[u'@odata.id'] - drive_id = drive_id_link.split("/")[-1] + if 'Drives' in data['Links']: + for link in data['Links']['Drives']: + drive_id_link = link['@odata.id'] + drive_id = drive_id_link.rstrip('/').split('/')[-1] drive_id_list.append({'Id': drive_id}) volume_result['Linked_drives'] = drive_id_list volume_results.append(volume_result) @@ -744,31 +1091,19 @@ class RedfishUtils(object): return self.manage_indicator_led(command, self.chassis_uri) def manage_indicator_led(self, command, resource_uri=None): - result = {} - key = 'IndicatorLED' + # If no resource is specified; default to the Chassis resource if resource_uri is None: resource_uri = self.chassis_uri + # Perform a PATCH on the IndicatorLED property based on the requested command payloads = {'IndicatorLedOn': 'Lit', 'IndicatorLedOff': 'Off', "IndicatorLedBlink": 'Blinking'} - - result = {} - response = self.get_request(self.root_uri + resource_uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - if command in payloads.keys(): - payload = {'IndicatorLED': payloads[command]} - response = self.patch_request(self.root_uri + resource_uri, payload) - if response['ret'] is False: - return response - else: - return {'ret': False, 'msg': 'Invalid command'} - - return result + if command not in payloads.keys(): + return {'ret': False, 'msg': f'Invalid command ({command})'} + payload = {'IndicatorLED': payloads[command]} + resp = self.patch_request(self.root_uri + resource_uri, payload, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = f'Set IndicatorLED to {payloads[command]}' + return resp def _map_reset_type(self, reset_type, allowable_values): equiv_types = { @@ -793,27 +1128,34 @@ class RedfishUtils(object): return self.manage_power(command, self.systems_uri, '#ComputerSystem.Reset') - def manage_manager_power(self, command): + def manage_manager_power(self, command, wait=False, wait_timeout=120): return self.manage_power(command, self.manager_uri, - '#Manager.Reset') + '#Manager.Reset', wait, wait_timeout) - def manage_power(self, command, resource_uri, action_name): + def manage_power(self, command, resource_uri, action_name, wait=False, + wait_timeout=120): key = "Actions" reset_type_values = ['On', 'ForceOff', 'GracefulShutdown', 'GracefulRestart', 'ForceRestart', 'Nmi', - 'ForceOn', 'PushPowerButton', 'PowerCycle'] + 'ForceOn', 'PushPowerButton', 'PowerCycle', + 'FullPowerCycle'] # command should be PowerOn, PowerForceOff, etc. if not command.startswith('Power'): - return {'ret': False, 'msg': 'Invalid Command (%s)' % command} - reset_type = command[5:] + return {'ret': False, 'msg': f'Invalid Command ({command})'} + + # Commands (except PowerCycle) will be stripped of the 'Power' prefix + if command == 'PowerCycle': + reset_type = command + else: + reset_type = command[5:] # map Reboot to a ResetType that does a reboot if reset_type == 'Reboot': reset_type = 'GracefulRestart' if reset_type not in reset_type_values: - return {'ret': False, 'msg': 'Invalid Command (%s)' % command} + return {'ret': False, 'msg': f'Invalid Command ({command})'} # read the resource and get the current power state response = self.get_request(self.root_uri + resource_uri) @@ -830,11 +1172,11 @@ class RedfishUtils(object): # get the reset Action and target URI if key not in data or action_name not in data[key]: - return {'ret': False, 'msg': 'Action %s not found' % action_name} + return {'ret': False, 'msg': f'Action {action_name} not found'} reset_action = data[key][action_name] if 'target' not in reset_action: return {'ret': False, - 'msg': 'target URI missing from Action %s' % action_name} + 'msg': f'target URI missing from Action {action_name}'} action_uri = reset_action['target'] # get AllowableValues @@ -852,34 +1194,121 @@ class RedfishUtils(object): response = self.post_request(self.root_uri + action_uri, payload) if response['ret'] is False: return response + + # If requested to wait for the service to be available again, block + # until it is ready + if wait: + elapsed_time = 0 + start_time = time.time() + # Start with a large enough sleep. Some services will process new + # requests while in the middle of shutting down, thus breaking out + # early. + time.sleep(30) + + # Periodically check for the service's availability. + while elapsed_time <= wait_timeout: + status = self.check_service_availability() + if status['available']: + # It is available; we are done + break + time.sleep(5) + elapsed_time = time.time() - start_time + + if elapsed_time > wait_timeout: + # Exhausted the wait timer; error + return {'ret': False, 'changed': True, + 'msg': f'The service did not become available after {int(wait_timeout)} seconds'} return {'ret': True, 'changed': True} - def _find_account_uri(self, username=None, acct_id=None): - if not any((username, acct_id)): - return {'ret': False, 'msg': - 'Must provide either account_id or account_username'} + def manager_reset_to_defaults(self, command): + return self.reset_to_defaults(command, self.manager_uri, + '#Manager.ResetToDefaults') - response = self.get_request(self.root_uri + self.accounts_uri) + def reset_to_defaults(self, command, resource_uri, action_name): + key = "Actions" + reset_type_values = ['ResetAll', + 'PreserveNetworkAndUsers', + 'PreserveNetwork'] + + if command not in reset_type_values: + return {'ret': False, 'msg': f'Invalid Command ({command})'} + + # read the resource and get the current power state + response = self.get_request(self.root_uri + resource_uri) if response['ret'] is False: return response data = response['data'] - uris = [a.get('@odata.id') for a in data.get('Members', []) if - a.get('@odata.id')] - for uri in uris: - response = self.get_request(self.root_uri + uri) + # get the reset Action and target URI + if key not in data or action_name not in data[key]: + return {'ret': False, 'msg': f'Action {action_name} not found'} + reset_action = data[key][action_name] + if 'target' not in reset_action: + return {'ret': False, + 'msg': f'target URI missing from Action {action_name}'} + action_uri = reset_action['target'] + + # get AllowableValues + ai = self._get_all_action_info_values(reset_action) + allowable_values = ai.get('ResetType', {}).get('AllowableValues', []) + + # map ResetType to an allowable value if needed + if allowable_values and command not in allowable_values: + return {'ret': False, + 'msg': f'Specified reset type ({command}) not supported by service. Supported types: {allowable_values}'} + + # define payload + payload = {'ResetType': command} + + # POST to Action URI + response = self.post_request(self.root_uri + action_uri, payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True} + + def _find_account_uri(self, username=None, acct_id=None, password_change_uri=None): + if not any((username, acct_id)): + return {'ret': False, 'msg': + 'Must provide either account_id or account_username'} + + if password_change_uri: + # Password change required; go directly to the specified URI + response = self.get_request(self.root_uri + password_change_uri) if response['ret'] is False: - continue + return response data = response['data'] headers = response['headers'] if username: if username == data.get('UserName'): return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} + 'headers': headers, 'uri': password_change_uri} if acct_id: if acct_id == data.get('Id'): return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} + 'headers': headers, 'uri': password_change_uri} + else: + # Walk the accounts collection to find the desired user + response = self.get_request(self.root_uri + self.accounts_uri) + if response['ret'] is False: + return response + data = response['data'] + + uris = [a.get('@odata.id') for a in data.get('Members', []) if + a.get('@odata.id')] + for uri in uris: + response = self.get_request(self.root_uri + uri) + if response['ret'] is False: + continue + data = response['data'] + headers = response['headers'] + if username: + if username == data.get('UserName'): + return {'ret': True, 'data': data, + 'headers': headers, 'uri': uri} + if acct_id: + if acct_id == data.get('Id'): + return {'ret': True, 'data': data, + 'headers': headers, 'uri': uri} return {'ret': False, 'no_match': True, 'msg': 'No account with the given account_id or account_username found'} @@ -914,7 +1343,8 @@ class RedfishUtils(object): user_list = [] users_results = [] # Get these entries, but does not fail if not found - properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled'] + properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled', + 'AccountTypes', 'OEMAccountTypes'] response = self.get_request(self.root_uri + self.accounts_uri) if response['ret'] is False: @@ -923,7 +1353,7 @@ class RedfishUtils(object): data = response['data'] for users in data.get('Members', []): - user_list.append(users[u'@odata.id']) # user_list[] are URIs + user_list.append(users['@odata.id']) # user_list[] are URIs # for each user, get details for uri in user_list: @@ -937,6 +1367,12 @@ class RedfishUtils(object): if property in data: user[property] = data[property] + # Filter out empty account slots + # An empty account slot can be detected if the username is an empty + # string and if the account is disabled + if user.get('UserName', '') == '' and not user.get('Enabled', False): + continue + users_results.append(user) result["entries"] = users_results return result @@ -959,10 +1395,11 @@ class RedfishUtils(object): payload['Password'] = user.get('account_password') if user.get('account_roleid'): payload['RoleId'] = user.get('account_roleid') - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} + if user.get('account_accounttypes'): + payload['AccountTypes'] = user.get('account_accounttypes') + if user.get('account_oemaccounttypes'): + payload['OEMAccountTypes'] = user.get('account_oemaccounttypes') + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def add_user(self, user): if not user.get('account_username'): @@ -992,6 +1429,10 @@ class RedfishUtils(object): payload['Password'] = user.get('account_password') if user.get('account_roleid'): payload['RoleId'] = user.get('account_roleid') + if user.get('account_accounttypes'): + payload['AccountTypes'] = user.get('account_accounttypes') + if user.get('account_oemaccounttypes'): + payload['OEMAccountTypes'] = user.get('account_oemaccounttypes') if user.get('account_id'): payload['Id'] = user.get('account_id') @@ -1010,17 +1451,9 @@ class RedfishUtils(object): if not response['ret']: return response uri = response['uri'] - data = response['data'] - - if data.get('Enabled', True): - # account already enabled, nothing to do - return {'ret': True, 'changed': False} payload = {'Enabled': True} - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def delete_user_via_patch(self, user, uri=None, data=None): if not uri: @@ -1031,17 +1464,10 @@ class RedfishUtils(object): uri = response['uri'] data = response['data'] - if data and data.get('UserName') == '' and not data.get('Enabled', False): - # account UserName already cleared, nothing to do - return {'ret': True, 'changed': False} - payload = {'UserName': ''} if data.get('Enabled', False): payload['Enabled'] = False - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def delete_user(self, user): response = self._find_account_uri(username=user.get('account_username'), @@ -1078,18 +1504,10 @@ class RedfishUtils(object): acct_id=user.get('account_id')) if not response['ret']: return response + uri = response['uri'] - data = response['data'] - - if not data.get('Enabled'): - # account already disabled, nothing to do - return {'ret': True, 'changed': False} - payload = {'Enabled': False} - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def update_user_role(self, user): if not user.get('account_roleid'): @@ -1100,30 +1518,25 @@ class RedfishUtils(object): acct_id=user.get('account_id')) if not response['ret']: return response + uri = response['uri'] - data = response['data'] - - if data.get('RoleId') == user.get('account_roleid'): - # account already has RoleId , nothing to do - return {'ret': True, 'changed': False} - - payload = {'RoleId': user.get('account_roleid')} - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} + payload = {'RoleId': user['account_roleid']} + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def update_user_password(self, user): + if not user.get('account_password'): + return {'ret': False, 'msg': + 'Must provide account_password for UpdateUserPassword command'} + response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) + acct_id=user.get('account_id'), + password_change_uri=user.get('account_passwordchangerequired')) if not response['ret']: return response + uri = response['uri'] payload = {'Password': user['account_password']} - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def update_user_name(self, user): if not user.get('account_updatename'): @@ -1134,53 +1547,77 @@ class RedfishUtils(object): acct_id=user.get('account_id')) if not response['ret']: return response + uri = response['uri'] payload = {'UserName': user['account_updatename']} - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True} + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def update_accountservice_properties(self, user): - if user.get('account_properties') is None: + account_properties = user.get('account_properties') + if account_properties is None: return {'ret': False, 'msg': 'Must provide account_properties for UpdateAccountServiceProperties command'} - account_properties = user.get('account_properties') - # Find AccountService + # Find the AccountService resource response = self.get_request(self.root_uri + self.service_root) if response['ret'] is False: return response data = response['data'] - if 'AccountService' not in data: + accountservice_uri = data.get("AccountService", {}).get("@odata.id") + if accountservice_uri is None: return {'ret': False, 'msg': "AccountService resource not found"} - accountservice_uri = data["AccountService"]["@odata.id"] - # Check support or not - response = self.get_request(self.root_uri + accountservice_uri) - if response['ret'] is False: + # Perform a PATCH on the AccountService resource with the requested properties + resp = self.patch_request(self.root_uri + accountservice_uri, account_properties, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'Modified account service' + return resp + + def update_user_accounttypes(self, user): + account_types = user.get('account_accounttypes') + oemaccount_types = user.get('account_oemaccounttypes') + if account_types is None and oemaccount_types is None: + return {'ret': False, 'msg': + 'Must provide account_accounttypes or account_oemaccounttypes for UpdateUserAccountTypes command'} + + response = self._find_account_uri(username=user.get('account_username'), + acct_id=user.get('account_id')) + if not response['ret']: return response - data = response['data'] - for property_name in account_properties.keys(): - if property_name not in data: - return {'ret': False, 'msg': - 'property %s not supported' % property_name} - # if properties is already matched, nothing to do - need_change = False - for property_name in account_properties.keys(): - if account_properties[property_name] != data[property_name]: - need_change = True - break + uri = response['uri'] + payload = {} + if user.get('account_accounttypes'): + payload['AccountTypes'] = user.get('account_accounttypes') + if user.get('account_oemaccounttypes'): + payload['OEMAccountTypes'] = user.get('account_oemaccounttypes') - if not need_change: - return {'ret': True, 'changed': False, 'msg': "AccountService properties already set"} + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) - payload = account_properties - response = self.patch_request(self.root_uri + accountservice_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "Modified AccountService properties"} + def check_password_change_required(self, return_data): + """ + Checks a response if a user needs to change their password + + :param return_data: The return data for a failed request + :return: None or the URI of the account to update + """ + uri = None + if 'data' in return_data: + # Find the extended messages in the response payload + extended_messages = return_data['data'].get('error', {}).get('@Message.ExtendedInfo', []) + if len(extended_messages) == 0: + extended_messages = return_data['data'].get('@Message.ExtendedInfo', []) + # Go through each message and look for Base.1.X.PasswordChangeRequired + for message in extended_messages: + message_id = message.get('MessageId') + if message_id is None: + # While this is invalid, treat the lack of a MessageId as "no message" + continue + if message_id.startswith('Base.1.') and message_id.endswith('.PasswordChangeRequired'): + # Password change required; get the URI of the user account + uri = message['MessageArgs'][0] + break + return uri def get_sessions(self): result = {} @@ -1196,8 +1633,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for sessions in data[u'Members']: - session_list.append(sessions[u'@odata.id']) # session_list[] are URIs + for sessions in data['Members']: + session_list.append(sessions['@odata.id']) # session_list[] are URIs # for each session, get details for uri in session_list: @@ -1223,15 +1660,15 @@ class RedfishUtils(object): # if no active sessions, return as success if data['Members@odata.count'] == 0: - return {'ret': True, 'changed': False, 'msg': "There is no active sessions"} + return {'ret': True, 'changed': False, 'msg': "There are no active sessions"} # loop to delete every active session - for session in data[u'Members']: - response = self.delete_request(self.root_uri + session[u'@odata.id']) + for session in data['Members']: + response = self.delete_request(self.root_uri + session['@odata.id']) if response['ret'] is False: return response - return {'ret': True, 'changed': True, 'msg': "Clear all sessions successfully"} + return {'ret': True, 'changed': True, 'msg': "Cleared all sessions successfully"} def create_session(self): if not self.creds.get('user') or not self.creds.get('pswd'): @@ -1293,6 +1730,8 @@ class RedfishUtils(object): data = response['data'] + result['multipart_supported'] = 'MultipartHttpPushUri' in data + if "Actions" in data: actions = data['Actions'] if len(actions) > 0: @@ -1312,29 +1751,37 @@ class RedfishUtils(object): def _software_inventory(self, uri): result = {} - response = self.get_request(self.root_uri + uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - result['entries'] = [] - for member in data[u'Members']: - uri = self.root_uri + member[u'@odata.id'] - # Get details for each software or firmware member - response = self.get_request(uri) + + while uri: + response = self.get_request(self.root_uri + uri) if response['ret'] is False: return response result['ret'] = True + data = response['data'] - software = {} - # Get these standard properties if present - for key in ['Name', 'Id', 'Status', 'Version', 'Updateable', - 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer', - 'ReleaseDate']: - if key in data: - software[key] = data.get(key) - result['entries'].append(software) + if data.get('Members@odata.nextLink'): + uri = data.get('Members@odata.nextLink') + else: + uri = None + + for member in data['Members']: + fw_uri = self.root_uri + member['@odata.id'] + # Get details for each software or firmware member + response = self.get_request(fw_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + software = {} + # Get these standard properties if present + for key in ['Name', 'Id', 'Status', 'Version', 'Updateable', + 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer', + 'ReleaseDate']: + if key in data: + software[key] = data.get(key) + result['entries'].append(software) + return result def get_firmware_inventory(self): @@ -1349,11 +1796,85 @@ class RedfishUtils(object): else: return self._software_inventory(self.software_uri) + def _operation_results(self, response, data, handle=None): + """ + Builds the results for an operation from task, job, or action response. + + :param response: HTTP response object + :param data: HTTP response data + :param handle: The task or job handle that was last used + :return: dict containing operation results + """ + + operation_results = {'status': None, 'messages': [], 'handle': None, 'ret': True, + 'resets_requested': []} + + if response.status == 204: + # No content; successful, but nothing to return + # Use the Redfish "Completed" enum from TaskState for the operation status + operation_results['status'] = 'Completed' + else: + # Parse the response body for details + + # Determine the next handle, if any + operation_results['handle'] = handle + if response.status == 202: + # Task generated; get the task monitor URI + operation_results['handle'] = response.getheader('Location', handle) + + # Pull out the status and messages based on the body format + if data is not None: + response_type = data.get('@odata.type', '') + if response_type.startswith('#Task.') or response_type.startswith('#Job.'): + # Task and Job have similar enough structures to treat the same + operation_results['status'] = data.get('TaskState', data.get('JobState')) + operation_results['messages'] = data.get('Messages', []) + else: + # Error response body, which is a bit of a misnomer since it is used in successful action responses + operation_results['status'] = 'Completed' + if response.status >= 400: + operation_results['status'] = 'Exception' + operation_results['messages'] = data.get('error', {}).get('@Message.ExtendedInfo', []) + else: + # No response body (or malformed); build based on status code + operation_results['status'] = 'Completed' + if response.status == 202: + operation_results['status'] = 'New' + elif response.status >= 400: + operation_results['status'] = 'Exception' + + # Clear out the handle if the operation is complete + if operation_results['status'] in ['Completed', 'Cancelled', 'Exception', 'Killed']: + operation_results['handle'] = None + + # Scan the messages to see if next steps are needed + for message in operation_results['messages']: + message_id = message.get('MessageId') + if message_id is None: + # While this is invalid, treat the lack of a MessageId as "no message" + continue + + if message_id.startswith('Update.1.') and message_id.endswith('.OperationTransitionedToJob'): + # Operation rerouted to a job; update the status and handle + operation_results['status'] = 'New' + operation_results['handle'] = message['MessageArgs'][0] + operation_results['resets_requested'] = [] + # No need to process other messages in this case + break + + if message_id.startswith('Base.1.') and message_id.endswith('.ResetRequired'): + # A reset to some device is needed to continue the update + reset = {'uri': message['MessageArgs'][0], 'type': message['MessageArgs'][1]} + operation_results['resets_requested'].append(reset) + + return operation_results + def simple_update(self, update_opts): image_uri = update_opts.get('update_image_uri') protocol = update_opts.get('update_protocol') targets = update_opts.get('update_targets') creds = update_opts.get('update_creds') + apply_time = update_opts.get('update_apply_time') if not image_uri: return {'ret': False, 'msg': @@ -1379,18 +1900,14 @@ class RedfishUtils(object): default_values) if protocol not in allowable_values: return {'ret': False, - 'msg': 'Specified update_protocol (%s) not supported ' - 'by service. Supported protocols: %s' % - (protocol, allowable_values)} + 'msg': f'Specified update_protocol ({protocol}) not supported by service. Supported protocols: {allowable_values}'} if targets: allowable_values = self._get_allowable_values(action, 'Targets') if allowable_values: for target in targets: if target not in allowable_values: return {'ret': False, - 'msg': 'Specified target (%s) not supported ' - 'by service. Supported targets: %s' % - (target, allowable_values)} + 'msg': f'Specified target ({target}) not supported by service. Supported targets: {allowable_values}'} payload = { 'ImageURI': image_uri @@ -1404,11 +1921,130 @@ class RedfishUtils(object): payload["Username"] = creds.get('username') if creds.get('password'): payload["Password"] = creds.get('password') + if apply_time: + payload["@Redfish.OperationApplyTime"] = apply_time response = self.post_request(self.root_uri + update_uri, payload) if response['ret'] is False: return response return {'ret': True, 'changed': True, - 'msg': "SimpleUpdate requested"} + 'msg': "SimpleUpdate requested", + 'update_status': self._operation_results(response['resp'], response['data'])} + + def multipath_http_push_update(self, update_opts): + """ + Provides a software update via the URI specified by the + MultipartHttpPushUri property. Callers should adjust the 'timeout' + variable in the base object to accommodate the size of the image and + speed of the transfer. For example, a 200MB image will likely take + more than the default 10 second timeout. + + :param update_opts: The parameters for the update operation + :return: dict containing the response of the update request + """ + image_file = update_opts.get('update_image_file') + targets = update_opts.get('update_targets') + apply_time = update_opts.get('update_apply_time') + oem_params = update_opts.get('update_oem_params') + custom_oem_header = update_opts.get('update_custom_oem_header') + custom_oem_mime_type = update_opts.get('update_custom_oem_mime_type') + custom_oem_params = update_opts.get('update_custom_oem_params') + + # Ensure the image file is provided + if not image_file: + return {'ret': False, 'msg': + 'Must specify update_image_file for the MultipartHTTPPushUpdate command'} + if not os.path.isfile(image_file): + return {'ret': False, 'msg': + 'Must specify a valid file for the MultipartHTTPPushUpdate command'} + try: + with open(image_file, 'rb') as f: + image_payload = f.read() + except Exception as e: + return {'ret': False, 'msg': f'Could not read file {image_file}'} + + # Check that multipart HTTP push updates are supported + response = self.get_request(self.root_uri + self.update_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'MultipartHttpPushUri' not in data: + return {'ret': False, 'msg': 'Service does not support MultipartHttpPushUri'} + update_uri = data['MultipartHttpPushUri'] + + # Assemble the JSON payload portion of the request + payload = {} + if targets: + payload["Targets"] = targets + if apply_time: + payload["@Redfish.OperationApplyTime"] = apply_time + if oem_params: + payload["Oem"] = oem_params + multipart_payload = { + 'UpdateParameters': {'content': json.dumps(payload), 'mime_type': 'application/json'}, + 'UpdateFile': {'filename': image_file, 'content': image_payload, 'mime_type': 'application/octet-stream'} + } + if custom_oem_params: + multipart_payload[custom_oem_header] = {'content': custom_oem_params} + if custom_oem_mime_type: + multipart_payload[custom_oem_header]['mime_type'] = custom_oem_mime_type + + response = self.post_request(self.root_uri + update_uri, multipart_payload, multipart=True) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, + 'msg': "MultipartHTTPPushUpdate requested", + 'update_status': self._operation_results(response['resp'], response['data'])} + + def get_update_status(self, update_handle): + """ + Gets the status of an update operation. + + :param handle: The task or job handle tracking the update + :return: dict containing the response of the update status + """ + + if not update_handle: + return {'ret': False, 'msg': 'Must provide a handle tracking the update.'} + + # Get the task or job tracking the update + response = self.get_request(self.root_uri + update_handle, allow_no_resp=True) + if response['ret'] is False: + return response + + # Inspect the response to build the update status + return self._operation_results(response['resp'], response['data'], update_handle) + + def perform_requested_update_operations(self, update_handle): + """ + Performs requested operations to allow the update to continue. + + :param handle: The task or job handle tracking the update + :return: dict containing the result of the operations + """ + + # Get the current update status + update_status = self.get_update_status(update_handle) + if update_status['ret'] is False: + return update_status + + changed = False + + # Perform any requested updates + for reset in update_status['resets_requested']: + resp = self.post_request(self.root_uri + reset['uri'], {'ResetType': reset['type']}) + if resp['ret'] is False: + # Override the 'changed' indicator since other resets may have + # been successful + resp['changed'] = changed + return resp + changed = True + + msg = 'No operations required for the update' + if changed: + # Will need to consider finetuning this message if the scope of the + # requested operations grow over time + msg = 'One or more components reset to continue the update' + return {'ret': True, 'changed': changed, 'msg': msg} def get_bios_attributes(self, systems_uri): result = {} @@ -1423,7 +2059,7 @@ class RedfishUtils(object): data = response['data'] if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} + return {'ret': False, 'msg': f"Key {key} not found"} bios_uri = data[key]["@odata.id"] @@ -1432,7 +2068,7 @@ class RedfishUtils(object): return response result['ret'] = True data = response['data'] - for attribute in data[u'Attributes'].items(): + for attribute in data['Attributes'].items(): bios_attributes[attribute[0]] = attribute[1] result["entries"] = bios_attributes return result @@ -1548,89 +2184,72 @@ class RedfishUtils(object): return self.aggregate_systems(self.get_boot_override) def set_bios_default_settings(self): - result = {} - key = "Bios" - - # Search for 'key' entry and extract URI from it + # Find the Bios resource from the requested ComputerSystem resource response = self.get_request(self.root_uri + self.systems_uri) if response['ret'] is False: return response - result['ret'] = True data = response['data'] + bios_uri = data.get('Bios', {}).get('@odata.id') + if bios_uri is None: + return {'ret': False, 'msg': 'Bios resource not found'} - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - bios_uri = data[key]["@odata.id"] - - # Extract proper URI + # Find the URI of the ResetBios action response = self.get_request(self.root_uri + bios_uri) if response['ret'] is False: return response - result['ret'] = True data = response['data'] - reset_bios_settings_uri = data["Actions"]["#Bios.ResetBios"]["target"] + reset_bios_uri = data.get('Actions', {}).get('#Bios.ResetBios', {}).get('target') + if reset_bios_uri is None: + return {'ret': False, 'msg': 'ResetBios action not found'} - response = self.post_request(self.root_uri + reset_bios_settings_uri, {}) + # Perform the ResetBios action + response = self.post_request(self.root_uri + reset_bios_uri, {}) if response['ret'] is False: return response - return {'ret': True, 'changed': True, 'msg': "Set BIOS to default settings"} + return {'ret': True, 'changed': True, 'msg': "BIOS set to default settings"} def set_boot_override(self, boot_opts): - result = {} - key = "Boot" - + # Extract the requested boot override options bootdevice = boot_opts.get('bootdevice') uefi_target = boot_opts.get('uefi_target') boot_next = boot_opts.get('boot_next') override_enabled = boot_opts.get('override_enabled') boot_override_mode = boot_opts.get('boot_override_mode') - if not bootdevice and override_enabled != 'Disabled': return {'ret': False, 'msg': "bootdevice option required for temporary boot override"} - # Search for 'key' entry and extract URI from it + # Get the current boot override options from the Boot property response = self.get_request(self.root_uri + self.systems_uri) if response['ret'] is False: return response - result['ret'] = True data = response['data'] + boot = data.get('Boot') + if boot is None: + return {'ret': False, 'msg': "Boot property not found"} + cur_override_mode = boot.get('BootSourceOverrideMode') - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - boot = data[key] - + # Check if the requested target is supported by the system if override_enabled != 'Disabled': annotation = 'BootSourceOverrideTarget@Redfish.AllowableValues' if annotation in boot: allowable_values = boot[annotation] if isinstance(allowable_values, list) and bootdevice not in allowable_values: return {'ret': False, - 'msg': "Boot device %s not in list of allowable values (%s)" % - (bootdevice, allowable_values)} - - # read existing values - cur_enabled = boot.get('BootSourceOverrideEnabled') - target = boot.get('BootSourceOverrideTarget') - cur_uefi_target = boot.get('UefiTargetBootSourceOverride') - cur_boot_next = boot.get('BootNext') - cur_override_mode = boot.get('BootSourceOverrideMode') + 'msg': f"Boot device {bootdevice} not in list of allowable values ({allowable_values})"} + # Build the request payload based on the desired boot override options if override_enabled == 'Disabled': payload = { 'Boot': { - 'BootSourceOverrideEnabled': override_enabled + 'BootSourceOverrideEnabled': override_enabled, + 'BootSourceOverrideTarget': 'None' } } elif bootdevice == 'UefiTarget': if not uefi_target: return {'ret': False, 'msg': "uefi_target option required to SetOneTimeBoot for UefiTarget"} - if override_enabled == cur_enabled and target == bootdevice and uefi_target == cur_uefi_target: - # If properties are already set, no changes needed - return {'ret': True, 'changed': False} payload = { 'Boot': { 'BootSourceOverrideEnabled': override_enabled, @@ -1638,13 +2257,13 @@ class RedfishUtils(object): 'UefiTargetBootSourceOverride': uefi_target } } + # If needed, also specify UEFI mode + if cur_override_mode == 'Legacy': + payload['Boot']['BootSourceOverrideMode'] = 'UEFI' elif bootdevice == 'UefiBootNext': if not boot_next: return {'ret': False, 'msg': "boot_next option required to SetOneTimeBoot for UefiBootNext"} - if cur_enabled == override_enabled and target == bootdevice and boot_next == cur_boot_next: - # If properties are already set, no changes needed - return {'ret': True, 'changed': False} payload = { 'Boot': { 'BootSourceOverrideEnabled': override_enabled, @@ -1652,11 +2271,10 @@ class RedfishUtils(object): 'BootNext': boot_next } } + # If needed, also specify UEFI mode + if cur_override_mode == 'Legacy': + payload['Boot']['BootSourceOverrideMode'] = 'UEFI' else: - if (cur_enabled == override_enabled and target == bootdevice and - (cur_override_mode == boot_override_mode or not boot_override_mode)): - # If properties are already set, no changes needed - return {'ret': True, 'changed': False} payload = { 'Boot': { 'BootSourceOverrideEnabled': override_enabled, @@ -1666,32 +2284,35 @@ class RedfishUtils(object): if boot_override_mode: payload['Boot']['BootSourceOverrideMode'] = boot_override_mode - response = self.patch_request(self.root_uri + self.systems_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True} + # Apply the requested boot override request + resp = self.patch_request(self.root_uri + self.systems_uri, payload, check_pyld=True) + if resp['ret'] is False: + # WORKAROUND + # Older Dell systems do not allow BootSourceOverrideEnabled to be + # specified with UefiTarget as the target device + vendor = self._get_vendor()['Vendor'] + if vendor == 'Dell': + if bootdevice == 'UefiTarget' and override_enabled != 'Disabled': + payload['Boot'].pop('BootSourceOverrideEnabled', None) + resp = self.patch_request(self.root_uri + self.systems_uri, payload, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'Updated the boot override settings' + return resp def set_bios_attributes(self, attributes): - result = {} - key = "Bios" - - # Search for 'key' entry and extract URI from it + # Find the Bios resource from the requested ComputerSystem resource response = self.get_request(self.root_uri + self.systems_uri) if response['ret'] is False: return response - result['ret'] = True data = response['data'] + bios_uri = data.get('Bios', {}).get('@odata.id') + if bios_uri is None: + return {'ret': False, 'msg': 'Bios resource not found'} - if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} - - bios_uri = data[key]["@odata.id"] - - # Extract proper URI + # Get the current BIOS settings response = self.get_request(self.root_uri + bios_uri) if response['ret'] is False: return response - result['ret'] = True data = response['data'] # Make a copy of the attributes dict @@ -1702,19 +2323,19 @@ class RedfishUtils(object): # Check the attributes for attr_name, attr_value in attributes.items(): # Check if attribute exists - if attr_name not in data[u'Attributes']: + if attr_name not in data['Attributes']: # Remove and proceed to next attribute if this isn't valid attrs_bad.update({attr_name: attr_value}) del attrs_to_patch[attr_name] continue # If already set to requested value, remove it from PATCH payload - if data[u'Attributes'][attr_name] == attributes[attr_name]: + if data['Attributes'][attr_name] == attr_value: del attrs_to_patch[attr_name] warning = "" if attrs_bad: - warning = "Incorrect attributes %s" % (attrs_bad) + warning = f"Unsupported attributes {attrs_bad}" # Return success w/ changed=False if no attrs need to be changed if not attrs_to_patch: @@ -1722,16 +2343,26 @@ class RedfishUtils(object): 'msg': "BIOS attributes already set", 'warning': warning} - # Get the SettingsObject URI - set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"]["@odata.id"] + # Get the SettingsObject URI to apply the attributes + set_bios_attr_uri = data.get("@Redfish.Settings", {}).get("SettingsObject", {}).get("@odata.id") + if set_bios_attr_uri is None: + return {'ret': False, 'msg': "Settings resource for BIOS attributes not found."} # Construct payload and issue PATCH command payload = {"Attributes": attrs_to_patch} + + # WORKAROUND + # Dell systems require manually setting the apply time to "OnReset" + # to spawn a proprietary job to apply the BIOS settings + vendor = self._get_vendor()['Vendor'] + if vendor == 'Dell': + payload.update({"@Redfish.SettingsApplyTime": {"ApplyTime": "OnReset"}}) + response = self.patch_request(self.root_uri + set_bios_attr_uri, payload) if response['ret'] is False: return response return {'ret': True, 'changed': True, - 'msg': "Modified BIOS attributes %s" % (attrs_to_patch), + 'msg': f"Modified BIOS attributes {attrs_to_patch}. A reboot is required", 'warning': warning} def set_boot_order(self, boot_list): @@ -1753,28 +2384,24 @@ class RedfishUtils(object): boot_order = boot['BootOrder'] boot_options_dict = self._get_boot_options_dict(boot) - # validate boot_list against BootOptionReferences if available + # Verify the requested boot options are valid if boot_options_dict: boot_option_references = boot_options_dict.keys() for ref in boot_list: if ref not in boot_option_references: return {'ret': False, - 'msg': "BootOptionReference %s not found in BootOptions" % ref} - - # If requested BootOrder is already set, nothing to do - if boot_order == boot_list: - return {'ret': True, 'changed': False, - 'msg': "BootOrder already set to %s" % boot_list} + 'msg': f"BootOptionReference {ref} not found in BootOptions"} + # Apply the boot order payload = { 'Boot': { 'BootOrder': boot_list } } - response = self.patch_request(self.root_uri + systems_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "BootOrder set"} + resp = self.patch_request(self.root_uri + systems_uri, payload, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'Modified the boot order' + return resp def set_default_boot_order(self): systems_uri = self.systems_uri @@ -1786,10 +2413,9 @@ class RedfishUtils(object): # get the #ComputerSystem.SetDefaultBootOrder Action and target URI action = '#ComputerSystem.SetDefaultBootOrder' if 'Actions' not in data or action not in data['Actions']: - return {'ret': False, 'msg': 'Action %s not found' % action} + return {'ret': False, 'msg': f'Action {action} not found'} if 'target' not in data['Actions'][action]: - return {'ret': False, - 'msg': 'target URI missing from Action %s' % action} + return {'ret': False, 'msg': f'target URI missing from Action {action}'} action_uri = data['Actions'][action]['target'] # POST to Action URI @@ -1848,8 +2474,8 @@ class RedfishUtils(object): data = response['data'] # Checking if fans are present - if u'Fans' in data: - for device in data[u'Fans']: + if 'Fans' in data: + for device in data['Fans']: fan = {} for property in properties: if property in device: @@ -1888,14 +2514,13 @@ class RedfishUtils(object): for property in properties: if property in data: chassis_power_result[property] = data[property] - else: - return {'ret': False, 'msg': 'Key PowerControl not found.'} chassis_power_results.append(chassis_power_result) - else: - return {'ret': False, 'msg': 'Key Power not found.'} - result['entries'] = chassis_power_results - return result + if len(chassis_power_results) > 0: + result['entries'] = chassis_power_results + return result + else: + return {'ret': False, 'msg': 'Power information not found.'} def get_chassis_thermals(self): result = {} @@ -1925,7 +2550,7 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] if "Temperatures" in data: - for sensor in data[u'Temperatures']: + for sensor in data['Temperatures']: sensor_result = {} for property in properties: if property in sensor: @@ -1946,7 +2571,7 @@ class RedfishUtils(object): key = "Processors" # Get these entries, but does not fail if not found properties = ['Id', 'Name', 'Manufacturer', 'Model', 'MaxSpeedMHz', - 'TotalCores', 'TotalThreads', 'Status'] + 'ProcessorArchitecture', 'TotalCores', 'TotalThreads', 'Status'] # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + systems_uri) @@ -1956,7 +2581,7 @@ class RedfishUtils(object): data = response['data'] if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} + return {'ret': False, 'msg': f"Key {key} not found"} processors_uri = data[key]["@odata.id"] @@ -1967,8 +2592,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for cpu in data[u'Members']: - cpu_list.append(cpu[u'@odata.id']) + for cpu in data['Members']: + cpu_list.append(cpu['@odata.id']) for c in cpu_list: cpu = {} @@ -2006,7 +2631,7 @@ class RedfishUtils(object): data = response['data'] if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} + return {'ret': False, 'msg': f"Key {key} not found"} memory_uri = data[key]["@odata.id"] @@ -2017,8 +2642,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for dimm in data[u'Members']: - memory_list.append(dimm[u'@odata.id']) + for dimm in data['Members']: + memory_list.append(dimm['@odata.id']) for m in memory_list: dimm = {} @@ -2050,7 +2675,7 @@ class RedfishUtils(object): result = {} properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses', 'NameServers', 'MACAddress', 'PermanentMACAddress', - 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status'] + 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status', 'LinkStatus'] response = self.get_request(self.root_uri + resource_uri) if response['ret'] is False: return response @@ -2061,7 +2686,7 @@ class RedfishUtils(object): if property in data: nic[property] = data[property] result['entries'] = nic - return(result) + return result def get_nic_inventory(self, resource_uri): result = {} @@ -2076,7 +2701,7 @@ class RedfishUtils(object): data = response['data'] if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} + return {'ret': False, 'msg': f"Key {key} not found"} ethernetinterfaces_uri = data[key]["@odata.id"] @@ -2087,8 +2712,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for nic in data[u'Members']: - nic_list.append(nic[u'@odata.id']) + for nic in data['Members']: + nic_list.append(nic['@odata.id']) for n in nic_list: nic = self.get_nic(n) @@ -2132,7 +2757,7 @@ class RedfishUtils(object): data = response['data'] if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} + return {'ret': False, 'msg': f"Key {key} not found"} virtualmedia_uri = data[key]["@odata.id"] @@ -2143,8 +2768,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for virtualmedia in data[u'Members']: - virtualmedia_list.append(virtualmedia[u'@odata.id']) + for virtualmedia in data['Members']: + virtualmedia_list.append(virtualmedia['@odata.id']) for n in virtualmedia_list: virtualmedia = {} @@ -2162,11 +2787,15 @@ class RedfishUtils(object): result["entries"] = virtualmedia_results return result - def get_multi_virtualmedia(self): + def get_multi_virtualmedia(self, resource_type='Manager'): ret = True entries = [] - resource_uris = self.manager_uris + # Given resource_type, use the proper URI + if resource_type == 'Systems': + resource_uris = self.systems_uris + elif resource_type == 'Manager': + resource_uris = self.manager_uris for resource_uri in resource_uris: virtualmedia = self.get_virtualmedia(resource_uri) @@ -2178,7 +2807,7 @@ class RedfishUtils(object): @staticmethod def _find_empty_virt_media_slot(resources, media_types, - media_match_strict=True): + media_match_strict=True, vendor=''): for uri, data in resources.items(): # check MediaTypes if 'MediaTypes' in data and media_types: @@ -2187,6 +2816,9 @@ class RedfishUtils(object): else: if media_match_strict: continue + # Base on current Lenovo server capability, filter out slot RDOC1/2 and Remote1/2/3/4 which are not supported to Insert/Eject. + if vendor == 'Lenovo' and ('RDOC' in uri or 'Remote' in uri): + continue # if ejected, 'Inserted' should be False and 'ImageName' cleared if (not data.get('Inserted', False) and not data.get('ImageName')): @@ -2234,36 +2866,39 @@ class RedfishUtils(object): allowable = ai.get(param, {}).get('AllowableValues', []) if allowable and options.get(option) not in allowable: return {'ret': False, - 'msg': "Value '%s' specified for option '%s' not " - "in list of AllowableValues %s" % ( - options.get(option), option, - allowable)} + 'msg': f"Value '{options.get(option)}' specified for option '{option}' not in list of AllowableValues {allowable}"} payload[param] = options.get(option) return payload def virtual_media_insert_via_patch(self, options, param_map, uri, data, image_only=False): # get AllowableValues - ai = dict((k[:-24], - {'AllowableValues': v}) for k, v in data.items() - if k.endswith('@Redfish.AllowableValues')) + ai = { + k[:-24]: {'AllowableValues': v} + for k, v in data.items() + if k.endswith('@Redfish.AllowableValues') + } # construct payload payload = self._insert_virt_media_payload(options, param_map, data, ai) - if 'Inserted' not in payload: + if 'Inserted' not in payload and not image_only: + # Add Inserted to the payload if needed payload['Inserted'] = True - # Some hardware (such as iLO 4) only supports the Image property on the PATCH operation - # Inserted and WriteProtected are not writable - if image_only: - del payload['Inserted'] - del payload['WriteProtected'] - # PATCH the resource - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"} + resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True) + if resp['ret'] is False: + # WORKAROUND + # Older HPE systems with iLO 4 and Supermicro do not support + # specifying Inserted or WriteProtected + vendor = self._get_vendor()['Vendor'] + if vendor == 'HPE' or vendor == 'Supermicro': + payload.pop('Inserted', None) + payload.pop('WriteProtected', None) + resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'VirtualMedia inserted' + return resp - def virtual_media_insert(self, options): + def virtual_media_insert(self, options, resource_type='Manager'): param_map = { 'Inserted': 'inserted', 'WriteProtected': 'write_protected', @@ -2272,7 +2907,6 @@ class RedfishUtils(object): 'TransferProtocolType': 'transfer_protocol_type', 'TransferMethod': 'transfer_method' } - image_only = False image_url = options.get('image_url') if not image_url: return {'ret': False, @@ -2280,46 +2914,45 @@ class RedfishUtils(object): media_types = options.get('media_types') # locate and read the VirtualMedia resources - response = self.get_request(self.root_uri + self.manager_uri) + # Given resource_type, use the proper URI + if resource_type == 'Systems': + resource_uri = self.systems_uri + elif resource_type == 'Manager': + resource_uri = self.manager_uri + response = self.get_request(self.root_uri + resource_uri) if response['ret'] is False: return response data = response['data'] if 'VirtualMedia' not in data: return {'ret': False, 'msg': "VirtualMedia resource not found"} - # Some hardware (such as iLO 4) only supports the Image property on the PATCH operation - # Inserted and WriteProtected are not writable - if data["FirmwareVersion"].startswith("iLO 4"): - image_only = True - virt_media_uri = data["VirtualMedia"]["@odata.id"] response = self.get_request(self.root_uri + virt_media_uri) if response['ret'] is False: return response data = response['data'] virt_media_list = [] - for member in data[u'Members']: - virt_media_list.append(member[u'@odata.id']) + for member in data['Members']: + virt_media_list.append(member['@odata.id']) resources, headers = self._read_virt_media_resources(virt_media_list) # see if image already inserted; if so, nothing to do if self._virt_media_image_inserted(resources, image_url): return {'ret': True, 'changed': False, - 'msg': "VirtualMedia '%s' already inserted" % image_url} + 'msg': f"VirtualMedia '{image_url}' already inserted"} # find an empty slot to insert the media # try first with strict media_type matching + vendor = self._get_vendor()['Vendor'] uri, data = self._find_empty_virt_media_slot( - resources, media_types, media_match_strict=True) + resources, media_types, media_match_strict=True, vendor=vendor) if not uri: # if not found, try without strict media_type matching uri, data = self._find_empty_virt_media_slot( - resources, media_types, media_match_strict=False) + resources, media_types, media_match_strict=False, vendor=vendor) if not uri: return {'ret': False, - 'msg': "Unable to find an available VirtualMedia resource " - "%s" % ('supporting ' + str(media_types) - if media_types else '')} + 'msg': f"Unable to find an available VirtualMedia resource {('supporting ' + str(media_types)) if media_types else ''}"} # confirm InsertMedia action found if ('Actions' not in data or @@ -2331,10 +2964,9 @@ class RedfishUtils(object): if 'PATCH' not in methods: # if Allow header present and PATCH missing, return error return {'ret': False, - 'msg': "%s action not found and PATCH not allowed" - % '#VirtualMedia.InsertMedia'} + 'msg': "#VirtualMedia.InsertMedia action not found and PATCH not allowed"} return self.virtual_media_insert_via_patch(options, param_map, - uri, data, image_only) + uri, data) # get the action property action = data['Actions']['#VirtualMedia.InsertMedia'] @@ -2349,6 +2981,15 @@ class RedfishUtils(object): payload = self._insert_virt_media_payload(options, param_map, data, ai) # POST to action response = self.post_request(self.root_uri + action_uri, payload) + if response['ret'] is False and ('Inserted' in payload or 'WriteProtected' in payload): + # WORKAROUND + # Older HPE systems with iLO 4 and Supermicro do not support + # specifying Inserted or WriteProtected + vendor = self._get_vendor()['Vendor'] + if vendor == 'HPE' or vendor == 'Supermicro': + payload.pop('Inserted', None) + payload.pop('WriteProtected', None) + response = self.post_request(self.root_uri + action_uri, payload) if response['ret'] is False: return response return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"} @@ -2360,46 +3001,51 @@ class RedfishUtils(object): 'Image': None } - # Some hardware (such as iLO 4) only supports the Image property on the PATCH operation # Inserted is not writable if image_only: del payload['Inserted'] # PATCH resource - response = self.patch_request(self.root_uri + uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, - 'msg': "VirtualMedia ejected"} + resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True) + if resp['ret'] is False and 'Inserted' in payload: + # WORKAROUND + # Older HPE systems with iLO 4 and Supermicro do not support + # specifying Inserted + vendor = self._get_vendor()['Vendor'] + if vendor == 'HPE' or vendor == 'Supermicro': + payload.pop('Inserted', None) + resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'VirtualMedia ejected' + return resp - def virtual_media_eject(self, options): + def virtual_media_eject(self, options, resource_type='Manager'): image_url = options.get('image_url') if not image_url: return {'ret': False, 'msg': "image_url option required for VirtualMediaEject"} # locate and read the VirtualMedia resources - response = self.get_request(self.root_uri + self.manager_uri) + # Given resource_type, use the proper URI + if resource_type == 'Systems': + resource_uri = self.systems_uri + elif resource_type == 'Manager': + resource_uri = self.manager_uri + response = self.get_request(self.root_uri + resource_uri) if response['ret'] is False: return response data = response['data'] if 'VirtualMedia' not in data: return {'ret': False, 'msg': "VirtualMedia resource not found"} - # Some hardware (such as iLO 4) only supports the Image property on the PATCH operation - # Inserted is not writable - image_only = False - if data["FirmwareVersion"].startswith("iLO 4"): - image_only = True - virt_media_uri = data["VirtualMedia"]["@odata.id"] response = self.get_request(self.root_uri + virt_media_uri) if response['ret'] is False: return response data = response['data'] virt_media_list = [] - for member in data[u'Members']: - virt_media_list.append(member[u'@odata.id']) + for member in data['Members']: + virt_media_list.append(member['@odata.id']) resources, headers = self._read_virt_media_resources(virt_media_list) # find the VirtualMedia resource to eject @@ -2414,9 +3060,8 @@ class RedfishUtils(object): if 'PATCH' not in methods: # if Allow header present and PATCH missing, return error return {'ret': False, - 'msg': "%s action not found and PATCH not allowed" - % '#VirtualMedia.EjectMedia'} - return self.virtual_media_eject_via_patch(uri, image_only) + 'msg': "#VirtualMedia.EjectMedia action not found and PATCH not allowed"} + return self.virtual_media_eject_via_patch(uri) else: # POST to the EjectMedia Action action = data['Actions']['#VirtualMedia.EjectMedia'] @@ -2437,13 +3082,11 @@ class RedfishUtils(object): elif uri and not eject: # already ejected: return success but changed=False return {'ret': True, 'changed': False, - 'msg': "VirtualMedia image '%s' already ejected" % - image_url} + 'msg': f"VirtualMedia image '{image_url}' already ejected"} else: # return failure (no resources matching image_url found) return {'ret': False, 'changed': False, - 'msg': "No VirtualMedia resource found with image '%s' " - "inserted" % image_url} + 'msg': f"No VirtualMedia resource found with image '{image_url}' inserted"} def get_psu_inventory(self): result = {} @@ -2457,8 +3100,7 @@ class RedfishUtils(object): # Get a list of all Chassis and build URIs, then get all PowerSupplies # from each Power entry in the Chassis - chassis_uri_list = self.chassis_uris - for chassis_uri in chassis_uri_list: + for chassis_uri in self.chassis_uris: response = self.get_request(self.root_uri + chassis_uri) if response['ret'] is False: return response @@ -2467,7 +3109,7 @@ class RedfishUtils(object): data = response['data'] if 'Power' in data: - power_uri = data[u'Power'][u'@odata.id'] + power_uri = data['Power']['@odata.id'] else: continue @@ -2475,7 +3117,7 @@ class RedfishUtils(object): data = response['data'] if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} + return {'ret': False, 'msg': f"Key {key} not found"} psu_list = data[key] for psu in psu_list: @@ -2505,7 +3147,7 @@ class RedfishUtils(object): result = {} inventory = {} # Get these entries, but does not fail if not found - properties = ['Status', 'HostName', 'PowerState', 'Model', 'Manufacturer', + properties = ['Status', 'HostName', 'PowerState', 'BootProgress', 'Model', 'Manufacturer', 'PartNumber', 'SystemType', 'AssetTag', 'ServiceTag', 'SerialNumber', 'SKU', 'BiosVersion', 'MemorySummary', 'ProcessorSummary', 'TrustedModules', 'Name', 'Id'] @@ -2563,7 +3205,7 @@ class RedfishUtils(object): payload = {} for service_name in manager_services.keys(): if service_name not in protocol_services: - return {'ret': False, 'msg': "Service name %s is invalid" % service_name} + return {'ret': False, 'msg': f"Service name {service_name} is invalid"} payload[service_name] = {} for service_property in manager_services[service_name].keys(): value = manager_services[service_name][service_property] @@ -2573,59 +3215,36 @@ class RedfishUtils(object): elif value in protocol_state_offlist: payload[service_name]['ProtocolEnabled'] = False else: - return {'ret': False, 'msg': "Value of property %s is invalid" % service_property} + return {'ret': False, 'msg': f"Value of property {service_property} is invalid"} elif service_property in ['port', 'Port']: if isinstance(value, int): payload[service_name]['Port'] = value elif isinstance(value, str) and value.isdigit(): payload[service_name]['Port'] = int(value) else: - return {'ret': False, 'msg': "Value of property %s is invalid" % service_property} + return {'ret': False, 'msg': f"Value of property {service_property} is invalid"} else: payload[service_name][service_property] = value - # Find NetworkProtocol + # Find the ManagerNetworkProtocol resource response = self.get_request(self.root_uri + self.manager_uri) if response['ret'] is False: return response data = response['data'] - if 'NetworkProtocol' not in data: + networkprotocol_uri = data.get("NetworkProtocol", {}).get("@odata.id") + if networkprotocol_uri is None: return {'ret': False, 'msg': "NetworkProtocol resource not found"} - networkprotocol_uri = data["NetworkProtocol"]["@odata.id"] - # Check service property support or not - response = self.get_request(self.root_uri + networkprotocol_uri) - if response['ret'] is False: - return response - data = response['data'] - for service_name in payload.keys(): - if service_name not in data: - return {'ret': False, 'msg': "%s service not supported" % service_name} - for service_property in payload[service_name].keys(): - if service_property not in data[service_name]: - return {'ret': False, 'msg': "%s property for %s service not supported" % (service_property, service_name)} - - # if the protocol is already set, nothing to do - need_change = False - for service_name in payload.keys(): - for service_property in payload[service_name].keys(): - value = payload[service_name][service_property] - if value != data[service_name][service_property]: - need_change = True - break - - if not need_change: - return {'ret': True, 'changed': False, 'msg': "Manager NetworkProtocol services already set"} - - response = self.patch_request(self.root_uri + networkprotocol_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "Modified Manager NetworkProtocol services"} + # Modify the ManagerNetworkProtocol resource + resp = self.patch_request(self.root_uri + networkprotocol_uri, payload, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'Modified manager network protocol settings' + return resp @staticmethod def to_singular(resource_name): if resource_name.endswith('ies'): - resource_name = resource_name[:-3] + 'y' + resource_name = f"{resource_name[:-3]}y" elif resource_name.endswith('s'): resource_name = resource_name[:-1] return resource_name @@ -2649,12 +3268,12 @@ class RedfishUtils(object): if r.get('ret'): p = r.get('data') if p: - e = {self.to_singular(subsystem.lower()) + '_uri': u, + e = {f"{self.to_singular(subsystem.lower())}_uri": u, status: p.get(status, "Status not available")} health[subsystem].append(e) else: # non-collections case - e = {self.to_singular(subsystem.lower()) + '_uri': uri, + e = {f"{self.to_singular(subsystem.lower())}_uri": uri, status: d.get(status, "Status not available")} health[subsystem].append(e) @@ -2755,66 +3374,27 @@ class RedfishUtils(object): target_ethernet_current_setting = nic_info['ethernet_setting'] # Convert input to payload and check validity + # Note: Some properties in the EthernetInterface resource are arrays of + # objects. The call into this module expects a flattened view, meaning + # the user specifies exactly one object for an array property. For + # example, if a user provides IPv4StaticAddresses in the request to this + # module, it will turn that into an array of one member. This pattern + # should be avoided for future commands in this module, but needs to be + # preserved here for backwards compatibility. payload = {} for property in nic_config.keys(): value = nic_config[property] - if property not in target_ethernet_current_setting: - return {'ret': False, 'msg': "Property %s in nic_config is invalid" % property} - if isinstance(value, dict): - if isinstance(target_ethernet_current_setting[property], dict): - payload[property] = value - elif isinstance(target_ethernet_current_setting[property], list): - payload[property] = list() - payload[property].append(value) - else: - return {'ret': False, 'msg': "Value of property %s in nic_config is invalid" % property} + if property in target_ethernet_current_setting and isinstance(value, dict) and isinstance(target_ethernet_current_setting[property], list): + payload[property] = list() + payload[property].append(value) else: payload[property] = value - # If no need change, nothing to do. If error detected, report it - need_change = False - for property in payload.keys(): - set_value = payload[property] - cur_value = target_ethernet_current_setting[property] - # type is simple(not dict/list) - if not isinstance(set_value, dict) and not isinstance(set_value, list): - if set_value != cur_value: - need_change = True - # type is dict - if isinstance(set_value, dict): - for subprop in payload[property].keys(): - if subprop not in target_ethernet_current_setting[property]: - # Not configured already; need to apply the request - need_change = True - break - sub_set_value = payload[property][subprop] - sub_cur_value = target_ethernet_current_setting[property][subprop] - if sub_set_value != sub_cur_value: - need_change = True - # type is list - if isinstance(set_value, list): - if len(set_value) != len(cur_value): - # if arrays are not the same len, no need to check each element - need_change = True - continue - for i in range(len(set_value)): - for subprop in payload[property][i].keys(): - if subprop not in target_ethernet_current_setting[property][i]: - # Not configured already; need to apply the request - need_change = True - break - sub_set_value = payload[property][i][subprop] - sub_cur_value = target_ethernet_current_setting[property][i][subprop] - if sub_set_value != sub_cur_value: - need_change = True - - if not need_change: - return {'ret': True, 'changed': False, 'msg': "Manager NIC already set"} - - response = self.patch_request(self.root_uri + target_ethernet_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "Modified Manager NIC"} + # Modify the EthernetInterface resource + resp = self.patch_request(self.root_uri + target_ethernet_uri, payload, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'Modified manager NIC' + return resp # A helper function to get the EthernetInterface URI def get_manager_ethernet_uri(self, nic_addr='null'): @@ -2861,88 +3441,41 @@ class RedfishUtils(object): return nic_info def set_hostinterface_attributes(self, hostinterface_config, hostinterface_id=None): + if hostinterface_config is None: + return {'ret': False, 'msg': + 'Must provide hostinterface_config for SetHostInterface command'} + + # Find the HostInterfaceCollection resource response = self.get_request(self.root_uri + self.manager_uri) if response['ret'] is False: return response data = response['data'] - if 'HostInterfaces' not in data: - return {'ret': False, 'msg': "HostInterfaces resource not found"} - - hostinterfaces_uri = data["HostInterfaces"]["@odata.id"] + hostinterfaces_uri = data.get("HostInterfaces", {}).get("@odata.id") + if hostinterfaces_uri is None: + return {'ret': False, 'msg': "HostInterface resource not found"} response = self.get_request(self.root_uri + hostinterfaces_uri) if response['ret'] is False: return response data = response['data'] uris = [a.get('@odata.id') for a in data.get('Members', []) if a.get('@odata.id')] - # Capture list of URIs that match a specified HostInterface resource ID - if hostinterface_id: - matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.split('/')[-1]] + # Capture list of URIs that match a specified HostInterface resource Id + if hostinterface_id: + matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.rstrip('/').split('/')[-1]] if hostinterface_id and matching_hostinterface_uris: hostinterface_uri = list.pop(matching_hostinterface_uris) elif hostinterface_id and not matching_hostinterface_uris: - return {'ret': False, 'msg': "HostInterface ID %s not present." % hostinterface_id} + return {'ret': False, 'msg': f"HostInterface ID {hostinterface_id} not present."} elif len(uris) == 1: hostinterface_uri = list.pop(uris) else: return {'ret': False, 'msg': "HostInterface ID not defined and multiple interfaces detected."} - response = self.get_request(self.root_uri + hostinterface_uri) - if response['ret'] is False: - return response - current_hostinterface_config = response['data'] - payload = {} - for property in hostinterface_config.keys(): - value = hostinterface_config[property] - if property not in current_hostinterface_config: - return {'ret': False, 'msg': "Property %s in hostinterface_config is invalid" % property} - if isinstance(value, dict): - if isinstance(current_hostinterface_config[property], dict): - payload[property] = value - elif isinstance(current_hostinterface_config[property], list): - payload[property] = list() - payload[property].append(value) - else: - return {'ret': False, 'msg': "Value of property %s in hostinterface_config is invalid" % property} - else: - payload[property] = value - - need_change = False - for property in payload.keys(): - set_value = payload[property] - cur_value = current_hostinterface_config[property] - if not isinstance(set_value, dict) and not isinstance(set_value, list): - if set_value != cur_value: - need_change = True - if isinstance(set_value, dict): - for subprop in payload[property].keys(): - if subprop not in current_hostinterface_config[property]: - need_change = True - break - sub_set_value = payload[property][subprop] - sub_cur_value = current_hostinterface_config[property][subprop] - if sub_set_value != sub_cur_value: - need_change = True - if isinstance(set_value, list): - if len(set_value) != len(cur_value): - need_change = True - continue - for i in range(len(set_value)): - for subprop in payload[property][i].keys(): - if subprop not in current_hostinterface_config[property][i]: - need_change = True - break - sub_set_value = payload[property][i][subprop] - sub_cur_value = current_hostinterface_config[property][i][subprop] - if sub_set_value != sub_cur_value: - need_change = True - if not need_change: - return {'ret': True, 'changed': False, 'msg': "Host Interface already configured"} - - response = self.patch_request(self.root_uri + hostinterface_uri, payload) - if response['ret'] is False: - return response - return {'ret': True, 'changed': True, 'msg': "Modified Host Interface"} + # Modify the HostInterface resource + resp = self.patch_request(self.root_uri + hostinterface_uri, hostinterface_config, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'Modified host interface' + return resp def get_hostinterfaces(self): result = {} @@ -2958,10 +3491,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - - if 'HostInterfaces' in data: - hostinterfaces_uri = data[u'HostInterfaces'][u'@odata.id'] - else: + hostinterfaces_uri = data.get("HostInterfaces", {}).get("@odata.id") + if hostinterfaces_uri is None: continue response = self.get_request(self.root_uri + hostinterfaces_uri) @@ -3020,3 +3551,439 @@ class RedfishUtils(object): if not result["entries"]: return {'ret': False, 'msg': "No HostInterface objects found"} return result + + def get_manager_inventory(self, manager_uri): + result = {} + inventory = {} + # Get these entries, but does not fail if not found + properties = ['Id', 'FirmwareVersion', 'ManagerType', 'Manufacturer', 'Model', + 'PartNumber', 'PowerState', 'SerialNumber', 'ServiceIdentification', + 'Status', 'UUID'] + + response = self.get_request(self.root_uri + manager_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + for property in properties: + if property in data: + inventory[property] = data[property] + + result["entries"] = inventory + return result + + def get_multi_manager_inventory(self): + return self.aggregate_managers(self.get_manager_inventory) + + def get_service_identification(self, manager): + result = {} + if manager is None: + if len(self.manager_uris) == 1: + manager = self.manager_uris[0].rstrip('/').split('/')[-1] + elif len(self.manager_uris) > 1: + entries = self.get_multi_manager_inventory()['entries'] + managers = [m[0]['manager_uri'] for m in entries if m[1].get('ServiceIdentification')] + if len(managers) == 1: + manager = managers[0].rstrip('/').split('/')[-1] + else: + self.module.fail_json(msg=[ + f"Multiple managers with ServiceIdentification were found: {managers}", + "Please specify by using the 'manager' parameter in your playbook"]) + elif len(self.manager_uris) == 0: + self.module.fail_json(msg="No manager identities were found") + response = self.get_request(f"{self.root_uri}/redfish/v1/Managers/{manager}", override_headers=None) + try: + result['service_identification'] = response['data']['ServiceIdentification'] + except Exception as e: + self.module.fail_json(msg=f"Service ID not found for manager {manager}") + result['ret'] = True + return result + + def set_service_identification(self, service_id): + data = {"ServiceIdentification": service_id} + resp = self.patch_request(f"{self.root_uri}/redfish/v1/Managers/{self.resource_id}", data, check_pyld=True) + return resp + + def set_session_service(self, sessions_config): + if sessions_config is None: + return {'ret': False, 'msg': + 'Must provide sessions_config for SetSessionService command'} + + resp = self.patch_request(self.root_uri + self.session_service_uri, sessions_config, check_pyld=True) + if resp['ret'] and resp['changed']: + resp['msg'] = 'Modified session service' + return resp + + def verify_bios_attributes(self, bios_attributes): + # This method verifies BIOS attributes against the provided input + server_bios = self.get_bios_attributes(self.systems_uri) + if server_bios["ret"] is False: + return server_bios + + bios_dict = {} + wrong_param = {} + + # Verify bios_attributes with BIOS settings available in the server + for key, value in bios_attributes.items(): + if key in server_bios["entries"]: + if server_bios["entries"][key] != value: + bios_dict.update({key: value}) + else: + wrong_param.update({key: value}) + + if wrong_param: + return { + "ret": False, + "msg": f"Wrong parameters are provided: {wrong_param}" + } + + if bios_dict: + return { + "ret": False, + "msg": f"BIOS parameters are not matching: {bios_dict}" + } + + return { + "ret": True, + "changed": False, + "msg": "BIOS verification completed" + } + + def enable_secure_boot(self): + # This function enable Secure Boot on an OOB controller + + response = self.get_request(self.root_uri + self.systems_uri) + if response["ret"] is False: + return response + + server_details = response["data"] + secure_boot_url = server_details["SecureBoot"]["@odata.id"] + + response = self.get_request(self.root_uri + secure_boot_url) + if response["ret"] is False: + return response + + body = {} + body["SecureBootEnable"] = True + + return self.patch_request(self.root_uri + secure_boot_url, body, check_pyld=True) + + def set_secure_boot(self, secure_boot_enable): + # This function enable Secure Boot on an OOB controller + + response = self.get_request(self.root_uri + self.systems_uri) + if response["ret"] is False: + return response + + server_details = response["data"] + secure_boot_url = server_details["SecureBoot"]["@odata.id"] + + response = self.get_request(self.root_uri + secure_boot_url) + if response["ret"] is False: + return response + + body = {} + body["SecureBootEnable"] = secure_boot_enable + + return self.patch_request(self.root_uri + secure_boot_url, body, check_pyld=True) + + def get_hpe_thermal_config(self): + result = {} + key = "Thermal" + # Go through list + for chassis_uri in self.chassis_uris: + response = self.get_request(self.root_uri + chassis_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + val = data.get('Oem', {}).get('Hpe', {}).get('ThermalConfiguration') + if val is not None: + return {"ret": True, "current_thermal_config": val} + return {"ret": False} + + def get_hpe_fan_percent_min(self): + result = {} + key = "Thermal" + # Go through list + for chassis_uri in self.chassis_uris: + response = self.get_request(self.root_uri + chassis_uri) + if response['ret'] is False: + return response + data = response['data'] + val = data.get('Oem', {}).get('Hpe', {}).get('FanPercentMinimum') + if val is not None: + return {"ret": True, "fan_percent_min": val} + return {"ret": False} + + def delete_volumes(self, storage_subsystem_id, volume_ids): + # Find the Storage resource from the requested ComputerSystem resource + response = self.get_request(self.root_uri + self.systems_uri) + if response['ret'] is False: + return response + data = response['data'] + storage_uri = data.get('Storage', {}).get('@odata.id') + if storage_uri is None: + return {'ret': False, 'msg': 'Storage resource not found'} + + # Get Storage Collection + response = self.get_request(self.root_uri + storage_uri) + if response['ret'] is False: + return response + data = response['data'] + + # Collect Storage Subsystems + self.storage_subsystems_uris = [i['@odata.id'] for i in response['data'].get('Members', [])] + if not self.storage_subsystems_uris: + return { + 'ret': False, + 'msg': "StorageCollection's Members array is either empty or missing"} + + # Matching Storage Subsystem ID with user input + self.storage_subsystem_uri = "" + for storage_subsystem_uri in self.storage_subsystems_uris: + if storage_subsystem_uri.rstrip('/').split('/')[-1] == storage_subsystem_id: + self.storage_subsystem_uri = storage_subsystem_uri + + if not self.storage_subsystem_uri: + return { + 'ret': False, + 'msg': f"Provided Storage Subsystem ID {storage_subsystem_id} does not exist on the server"} + + # Get Volume Collection + response = self.get_request(self.root_uri + self.storage_subsystem_uri) + if response['ret'] is False: + return response + data = response['data'] + + response = self.get_request(self.root_uri + data['Volumes']['@odata.id']) + if response['ret'] is False: + return response + data = response['data'] + + # Collect Volumes + self.volume_uris = [i['@odata.id'] for i in response['data'].get('Members', [])] + if not self.volume_uris: + return { + 'ret': True, 'changed': False, + 'msg': "VolumeCollection's Members array is either empty or missing"} + + # Delete each volume + for volume in self.volume_uris: + if volume.rstrip('/').split('/')[-1] in volume_ids: + response = self.delete_request(self.root_uri + volume) + if response['ret'] is False: + return response + + return {'ret': True, 'changed': True, + 'msg': f"The following volumes were deleted: {volume_ids}"} + + def create_volume(self, volume_details, storage_subsystem_id, storage_none_volume_deletion=False): + # Find the Storage resource from the requested ComputerSystem resource + response = self.get_request(self.root_uri + self.systems_uri) + if response['ret'] is False: + return response + data = response['data'] + storage_uri = data.get('Storage', {}).get('@odata.id') + if storage_uri is None: + return {'ret': False, 'msg': 'Storage resource not found'} + + # Get Storage Collection + response = self.get_request(self.root_uri + storage_uri) + if response['ret'] is False: + return response + data = response['data'] + + # Collect Storage Subsystems + self.storage_subsystems_uris = [i['@odata.id'] for i in response['data'].get('Members', [])] + if not self.storage_subsystems_uris: + return { + 'ret': False, + 'msg': "StorageCollection's Members array is either empty or missing"} + + # Matching Storage Subsystem ID with user input + self.storage_subsystem_uri = "" + for storage_subsystem_uri in self.storage_subsystems_uris: + if storage_subsystem_uri.rstrip('/').split('/')[-1] == storage_subsystem_id: + self.storage_subsystem_uri = storage_subsystem_uri + + if not self.storage_subsystem_uri: + return { + 'ret': False, + 'msg': f"Provided Storage Subsystem ID {storage_subsystem_id} does not exist on the server"} + + # Validate input parameters + required_parameters = ['RAIDType', 'Drives'] + allowed_parameters = ['CapacityBytes', 'DisplayName', 'InitializeMethod', 'MediaSpanCount', + 'Name', 'ReadCachePolicy', 'StripSizeBytes', 'VolumeUsage', 'WriteCachePolicy'] + + for parameter in required_parameters: + if not volume_details.get(parameter): + return { + 'ret': False, + 'msg': f"{required_parameters} are required parameter to create a volume"} + + # Navigate to the volume uri of the correct storage subsystem + response = self.get_request(self.root_uri + self.storage_subsystem_uri) + if response['ret'] is False: + return response + data = response['data'] + + # Deleting any volumes of RAIDType None present on the Storage Subsystem + if storage_none_volume_deletion: + response = self.get_request(self.root_uri + data['Volumes']['@odata.id']) + if response['ret'] is False: + return response + volume_data = response['data'] + + if "Members" in volume_data: + for member in volume_data["Members"]: + response = self.get_request(self.root_uri + member['@odata.id']) + if response['ret'] is False: + return response + member_data = response['data'] + + if member_data["RAIDType"] == "None": + response = self.delete_request(self.root_uri + member['@odata.id']) + if response['ret'] is False: + return response + + # Construct payload and issue POST command to create volume + volume_details["Links"] = {} + volume_details["Links"]["Drives"] = [] + for drive in volume_details["Drives"]: + volume_details["Links"]["Drives"].append({"@odata.id": drive}) + del volume_details["Drives"] + payload = volume_details + response = self.post_request(self.root_uri + data['Volumes']['@odata.id'], payload) + if response['ret'] is False: + return response + + return {'ret': True, 'changed': True, + 'msg': "Volume Created"} + + def get_bios_registries(self): + # Get /redfish/v1 + response = self.get_request(self.root_uri + self.systems_uri) + if not response["ret"]: + return response + + server_details = response["data"] + + # Get Registries URI + if "Bios" not in server_details: + msg = "Getting BIOS URI failed, Key 'Bios' not found in /redfish/v1/Systems/1/ response: %s" + return { + "ret": False, + "msg": msg % str(server_details) + } + + bios_uri = server_details["Bios"]["@odata.id"] + bios_resp = self.get_request(self.root_uri + bios_uri) + if not bios_resp["ret"]: + return bios_resp + + bios_data = bios_resp["data"] + attribute_registry = bios_data["AttributeRegistry"] + + reg_uri = f"{self.root_uri}{self.service_root}Registries/{attribute_registry}" + reg_resp = self.get_request(reg_uri) + if not reg_resp["ret"]: + return reg_resp + + reg_data = reg_resp["data"] + + # Get BIOS attribute registry URI + lst = [] + + # Get the location URI + response = self.check_location_uri(reg_data, reg_uri) + if not response["ret"]: + return response + + rsp_data, rsp_uri = response["rsp_data"], response["rsp_uri"] + + if "RegistryEntries" not in rsp_data: + return { + "msg": f"'RegistryEntries' not present in {rsp_uri} response, {rsp_data}", + "ret": False + } + + return { + "bios_registry": rsp_data, + "bios_registry_uri": rsp_uri, + "ret": True + } + + def check_location_uri(self, resp_data, resp_uri): + # Get the location URI response + # return {"msg": self.creds, "ret": False} + vendor = self._get_vendor()['Vendor'] + rsp_uri = "" + for loc in resp_data['Location']: + if loc['Language'].startswith("en"): + rsp_uri = loc['Uri'] + if vendor == 'HPE': + # WORKAROUND + # HPE systems with iLO 4 will have BIOS Attribute Registries location URI as a dictionary with key 'extref' + # Hence adding condition to fetch the Uri + if isinstance(loc['Uri'], dict) and "extref" in loc['Uri'].keys(): + rsp_uri = loc['Uri']['extref'] + if not rsp_uri: + msg = "Language 'en' not found in BIOS Attribute Registries location, URI: %s, response: %s" + return { + "ret": False, + "msg": msg % (resp_uri, str(resp_data)) + } + + res = self.get_request(self.root_uri + rsp_uri) + if res['ret'] is False: + # WORKAROUND + # HPE systems with iLO 4 or iLO5 compresses (gzip) for some URIs + # Hence adding encoding to the header + if vendor == 'HPE': + override_headers = {"Accept-Encoding": "gzip"} + res = self.get_request(self.root_uri + rsp_uri, override_headers=override_headers) + if res['ret']: + return { + "ret": True, + "rsp_data": res["data"], + "rsp_uri": rsp_uri + } + return res + + def get_accountservice_properties(self): + # Find the AccountService resource + response = self.get_request(self.root_uri + self.service_root) + if response['ret'] is False: + return response + data = response['data'] + accountservice_uri = data.get("AccountService", {}).get("@odata.id") + if accountservice_uri is None: + return {'ret': False, 'msg': "AccountService resource not found"} + + response = self.get_request(self.root_uri + accountservice_uri) + if response['ret'] is False: + return response + return { + 'ret': True, + 'entries': response['data'] + } + + def get_power_restore_policy(self, systems_uri): + # Retrieve System resource + response = self.get_request(self.root_uri + systems_uri) + if response['ret'] is False: + return response + return { + 'ret': True, + 'entries': response['data']['PowerRestorePolicy'] + } + + def get_multi_power_restore_policy(self): + return self.aggregate_systems(self.get_power_restore_policy) + + def set_power_restore_policy(self, policy): + body = {'PowerRestorePolicy': policy} + return self.patch_request(self.root_uri + self.systems_uri, body, check_pyld=True) diff --git a/plugins/module_utils/redhat.py b/plugins/module_utils/redhat.py deleted file mode 100644 index 85f4a6aab2..0000000000 --- a/plugins/module_utils/redhat.py +++ /dev/null @@ -1,271 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Copyright (c), James Laska -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -import os -import re -import shutil -import tempfile -import types - -from ansible.module_utils.six.moves import configparser - - -class RegistrationBase(object): - def __init__(self, module, username=None, password=None): - self.module = module - self.username = username - self.password = password - - def configure(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def enable(self): - # Remove any existing redhat.repo - redhat_repo = '/etc/yum.repos.d/redhat.repo' - if os.path.isfile(redhat_repo): - os.unlink(redhat_repo) - - def register(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unregister(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unsubscribe(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def update_plugin_conf(self, plugin, enabled=True): - plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin - - if os.path.isfile(plugin_conf): - tmpfd, tmpfile = tempfile.mkstemp() - shutil.copy2(plugin_conf, tmpfile) - cfg = configparser.ConfigParser() - cfg.read([tmpfile]) - - if enabled: - cfg.set('main', 'enabled', 1) - else: - cfg.set('main', 'enabled', 0) - - fd = open(tmpfile, 'w+') - cfg.write(fd) - fd.close() - self.module.atomic_move(tmpfile, plugin_conf) - - def subscribe(self, **kwargs): - raise NotImplementedError("Must be implemented by a sub-class") - - -class Rhsm(RegistrationBase): - def __init__(self, module, username=None, password=None): - RegistrationBase.__init__(self, module, username, password) - self.config = self._read_config() - self.module = module - - def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'): - ''' - Load RHSM configuration from /etc/rhsm/rhsm.conf. - Returns: - * ConfigParser object - ''' - - # Read RHSM defaults ... - cp = configparser.ConfigParser() - cp.read(rhsm_conf) - - # Add support for specifying a default value w/o having to standup some configuration - # Yeah, I know this should be subclassed ... but, oh well - def get_option_default(self, key, default=''): - sect, opt = key.split('.', 1) - if self.has_section(sect) and self.has_option(sect, opt): - return self.get(sect, opt) - else: - return default - - cp.get_option = types.MethodType(get_option_default, cp, configparser.ConfigParser) - - return cp - - def enable(self): - ''' - Enable the system to receive updates from subscription-manager. - This involves updating affected yum plugins and removing any - conflicting yum repositories. - ''' - RegistrationBase.enable(self) - self.update_plugin_conf('rhnplugin', False) - self.update_plugin_conf('subscription-manager', True) - - def configure(self, **kwargs): - ''' - Configure the system as directed for registration with RHN - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'config'] - - # Pass supplied **kwargs as parameters to subscription-manager. Ignore - # non-configuration parameters and replace '_' with '.'. For example, - # 'server_hostname' becomes '--system.hostname'. - for k, v in kwargs.items(): - if re.search(r'^(system|rhsm)_', k): - args.append('--%s=%s' % (k.replace('_', '.'), v)) - - self.module.run_command(args, check_rc=True) - - @property - def is_registered(self): - ''' - Determine whether the current system - Returns: - * Boolean - whether the current system is currently registered to - RHN. - ''' - args = ['subscription-manager', 'identity'] - rc, stdout, stderr = self.module.run_command(args, check_rc=False) - if rc == 0: - return True - else: - return False - - def register(self, username, password, autosubscribe, activationkey): - ''' - Register the current system to the provided RHN server - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'register'] - - # Generate command arguments - if activationkey: - args.append('--activationkey "%s"' % activationkey) - else: - if autosubscribe: - args.append('--autosubscribe') - if username: - args.extend(['--username', username]) - if password: - args.extend(['--password', password]) - - # Do the needful... - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - - def unsubscribe(self): - ''' - Unsubscribe a system from all subscribed channels - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'unsubscribe', '--all'] - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - - def unregister(self): - ''' - Unregister a currently registered system - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'unregister'] - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - self.update_plugin_conf('rhnplugin', False) - self.update_plugin_conf('subscription-manager', False) - - def subscribe(self, regexp): - ''' - Subscribe current system to available pools matching the specified - regular expression - Raises: - * Exception - if error occurs while running command - ''' - - # Available pools ready for subscription - available_pools = RhsmPools(self.module) - - for pool in available_pools.filter(regexp): - pool.subscribe() - - -class RhsmPool(object): - ''' - Convenience class for housing subscription information - ''' - - def __init__(self, module, **kwargs): - self.module = module - for k, v in kwargs.items(): - setattr(self, k, v) - - def __str__(self): - return str(self.__getattribute__('_name')) - - def subscribe(self): - args = "subscription-manager subscribe --pool %s" % self.PoolId - rc, stdout, stderr = self.module.run_command(args, check_rc=True) - if rc == 0: - return True - else: - return False - - -class RhsmPools(object): - """ - This class is used for manipulating pools subscriptions with RHSM - """ - def __init__(self, module): - self.module = module - self.products = self._load_product_list() - - def __iter__(self): - return self.products.__iter__() - - def _load_product_list(self): - """ - Loads list of all available pools for system in data structure - """ - args = "subscription-manager list --available" - rc, stdout, stderr = self.module.run_command(args, check_rc=True) - - products = [] - for line in stdout.split('\n'): - # Remove leading+trailing whitespace - line = line.strip() - # An empty line implies the end of an output group - if len(line) == 0: - continue - # If a colon ':' is found, parse - elif ':' in line: - (key, value) = line.split(':', 1) - key = key.strip().replace(" ", "") # To unify - value = value.strip() - if key in ['ProductName', 'SubscriptionName']: - # Remember the name for later processing - products.append(RhsmPool(self.module, _name=value, key=value)) - elif products: - # Associate value with most recently recorded product - products[-1].__setattr__(key, value) - # FIXME - log some warning? - # else: - # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) - return products - - def filter(self, regexp='^$'): - ''' - Return a list of RhsmPools whose name matches the provided regular expression - ''' - r = re.compile(regexp) - for product in self.products: - if r.search(product._name): - yield product diff --git a/plugins/module_utils/redis.py b/plugins/module_utils/redis.py index 8f035614f0..d3de8e63e9 100644 --- a/plugins/module_utils/redis.py +++ b/plugins/module_utils/redis.py @@ -1,12 +1,11 @@ -# -*- coding: utf-8 -*- # -# Copyright: (c) 2021, Andreas Botzner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations from ansible.module_utils.basic import missing_required_lib -__metaclass__ = type import traceback @@ -15,6 +14,7 @@ try: from redis import Redis from redis import __version__ as redis_version HAS_REDIS_PACKAGE = True + REDIS_IMP_ERR = None except ImportError: REDIS_IMP_ERR = traceback.format_exc() HAS_REDIS_PACKAGE = False @@ -22,6 +22,7 @@ except ImportError: try: import certifi HAS_CERTIFI_PACKAGE = True + CERTIFI_IMPORT_ERROR = None except ImportError: CERTIFI_IMPORT_ERROR = traceback.format_exc() HAS_CERTIFI_PACKAGE = False @@ -54,7 +55,9 @@ def redis_auth_argument_spec(tls_default=True): validate_certs=dict(type='bool', default=True ), - ca_certs=dict(type='str') + ca_certs=dict(type='str'), + client_cert_file=dict(type='str'), + client_key_file=dict(type='str'), ) @@ -68,6 +71,8 @@ def redis_auth_params(module): ca_certs = module.params['ca_certs'] if tls and ca_certs is None: ca_certs = str(certifi.where()) + client_cert_file = module.params['client_cert_file'] + client_key_file = module.params['client_key_file'] if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None: module.fail_json( msg='The option `username` in only supported with redis >= 3.4.0.') @@ -75,6 +80,8 @@ def redis_auth_params(module): 'port': login_port, 'password': login_password, 'ssl_ca_certs': ca_certs, + 'ssl_certfile': client_cert_file, + 'ssl_keyfile': client_key_file, 'ssl_cert_reqs': validate_certs, 'ssl': tls} if login_user is not None: @@ -93,5 +100,5 @@ class RedisAnsible(object): try: return Redis(**redis_auth_params(self.module)) except Exception as e: - self.module.fail_json(msg='{0}'.format(str(e))) + self.module.fail_json(msg=f'{e}') return None diff --git a/plugins/module_utils/remote_management/lxca/common.py b/plugins/module_utils/remote_management/lxca/common.py index 07092b9642..1f06839d39 100644 --- a/plugins/module_utils/remote_management/lxca/common.py +++ b/plugins/module_utils/remote_management/lxca/common.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by @@ -6,13 +5,13 @@ # own license to the complete work. # # Copyright (C) 2017 Lenovo, Inc. -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause # # Contains LXCA common class # Lenovo xClarity Administrator (LXCA) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import traceback try: diff --git a/plugins/module_utils/rundeck.py b/plugins/module_utils/rundeck.py index afbbb48108..7b9f56339a 100644 --- a/plugins/module_utils/rundeck.py +++ b/plugins/module_utils/rundeck.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Phillipe Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Phillipe Smith +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import json @@ -27,7 +26,7 @@ def api_argument_spec(): return api_argument_spec -def api_request(module, endpoint, data=None, method="GET"): +def api_request(module, endpoint, data=None, method="GET", content_type="application/json"): """Manages Rundeck API requests via HTTP(S) :arg module: The AnsibleModule (used to get url, api_version, api_token, etc). @@ -54,15 +53,11 @@ def api_request(module, endpoint, data=None, method="GET"): response, info = fetch_url( module=module, - url="%s/api/%s/%s" % ( - module.params["url"], - module.params["api_version"], - endpoint - ), + url=f"{module.params['url']}/api/{module.params['api_version']}/{endpoint}", data=json.dumps(data), method=method, headers={ - "Content-Type": "application/json", + "Content-Type": content_type, "Accept": "application/json", "X-Rundeck-Auth-Token": module.params["api_token"] } @@ -71,7 +66,9 @@ def api_request(module, endpoint, data=None, method="GET"): if info["status"] == 403: module.fail_json(msg="Token authorization failed", execution_info=json.loads(info["body"])) - if info["status"] == 409: + elif info["status"] == 404: + return None, info + elif info["status"] == 409: module.fail_json(msg="Job executions limit reached", execution_info=json.loads(info["body"])) elif info["status"] >= 500: @@ -80,12 +77,18 @@ def api_request(module, endpoint, data=None, method="GET"): try: content = response.read() - json_response = json.loads(content) - return json_response, info + + if not content: + return None, info + else: + json_response = json.loads(content) + return json_response, info except AttributeError as error: - module.fail_json(msg="Rundeck API request error", - exception=to_native(error), - execution_info=info) + module.fail_json( + msg="Rundeck API request error", + exception=to_native(error), + execution_info=info + ) except ValueError as error: module.fail_json( msg="No valid JSON response", diff --git a/plugins/module_utils/saslprep.py b/plugins/module_utils/saslprep.py index 3e16c7169e..b02cedd874 100644 --- a/plugins/module_utils/saslprep.py +++ b/plugins/module_utils/saslprep.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. @@ -6,12 +5,12 @@ # still belong to the author of the module, and may assign their own license # to the complete work. -# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) +# Copyright (c) 2020, Andrew Klychkov (@Andersson007) # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from stringprep import ( in_table_a1, @@ -30,11 +29,9 @@ from stringprep import ( ) from unicodedata import normalize -from ansible.module_utils.six import text_type - def is_unicode_str(string): - return True if isinstance(string, text_type) else False + return True if isinstance(string, str) else False def mapping_profile(string): @@ -53,11 +50,11 @@ def mapping_profile(string): if in_table_c12(c): # map non-ASCII space characters # (that can be mapped) to Unicode space - tmp.append(u' ') + tmp.append(' ') else: tmp.append(c) - return u"".join(tmp) + return "".join(tmp) def is_ral_string(string): @@ -108,35 +105,31 @@ def prohibited_output_profile(string): for c in string: # RFC4013 2.3. Prohibited Output: if in_table_c12(c): - raise ValueError('%s: prohibited non-ASCII space characters ' - 'that cannot be replaced (C.1.2).' % RFC) + raise ValueError(f'{RFC}: prohibited non-ASCII space characters that cannot be replaced (C.1.2).') if in_table_c21_c22(c): - raise ValueError('%s: prohibited control characters (C.2.1).' % RFC) + raise ValueError(f'{RFC}: prohibited control characters (C.2.1).') if in_table_c3(c): - raise ValueError('%s: prohibited private Use characters (C.3).' % RFC) + raise ValueError(f'{RFC}: prohibited private Use characters (C.3).') if in_table_c4(c): - raise ValueError('%s: prohibited non-character code points (C.4).' % RFC) + raise ValueError(f'{RFC}: prohibited non-character code points (C.4).') if in_table_c5(c): - raise ValueError('%s: prohibited surrogate code points (C.5).' % RFC) + raise ValueError(f'{RFC}: prohibited surrogate code points (C.5).') if in_table_c6(c): - raise ValueError('%s: prohibited inappropriate for plain text ' - 'characters (C.6).' % RFC) + raise ValueError(f'{RFC}: prohibited inappropriate for plain text characters (C.6).') if in_table_c7(c): - raise ValueError('%s: prohibited inappropriate for canonical ' - 'representation characters (C.7).' % RFC) + raise ValueError(f'{RFC}: prohibited inappropriate for canonical representation characters (C.7).') if in_table_c8(c): - raise ValueError('%s: prohibited change display properties / ' - 'deprecated characters (C.8).' % RFC) + raise ValueError(f'{RFC}: prohibited change display properties / deprecated characters (C.8).') if in_table_c9(c): - raise ValueError('%s: prohibited tagging characters (C.9).' % RFC) + raise ValueError(f'{RFC}: prohibited tagging characters (C.9).') # RFC4013, 2.4. Bidirectional Characters: if is_prohibited_bidi_ch(c): - raise ValueError('%s: prohibited bidi characters (%s).' % (RFC, bidi_table)) + raise ValueError(f'{RFC}: prohibited bidi characters ({bidi_table}).') # RFC4013, 2.5. Unassigned Code Points: if in_table_a1(c): - raise ValueError('%s: prohibited unassigned code points (A.1).' % RFC) + raise ValueError(f'{RFC}: prohibited unassigned code points (A.1).') def saslprep(string): @@ -157,9 +150,8 @@ def saslprep(string): # RFC4013: "The algorithm assumes all strings are # comprised of characters from the Unicode [Unicode] character set." # Validate the string is a Unicode string - # (text_type is the string type if PY3 and unicode otherwise): if not is_unicode_str(string): - raise TypeError('input must be of type %s, not %s' % (text_type, type(string))) + raise TypeError(f'input must be of type str, not {type(string)}') # RFC4013: 2.1. Mapping. string = mapping_profile(string) @@ -168,7 +160,7 @@ def saslprep(string): # "This profile specifies using Unicode normalization form KC." string = normalize('NFKC', string) if not string: - return u'' + return '' # RFC4013: 2.3. Prohibited Output. # RFC4013: 2.4. Bidirectional Characters. diff --git a/plugins/module_utils/scaleway.py b/plugins/module_utils/scaleway.py index e6fb8109cc..0798e61317 100644 --- a/plugins/module_utils/scaleway.py +++ b/plugins/module_utils/scaleway.py @@ -1,14 +1,32 @@ -# -*- coding: utf-8 -*- -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations import json import re import sys +import datetime +import time +import traceback +from urllib.parse import urlencode -from ansible.module_utils.basic import env_fallback +from ansible.module_utils.basic import env_fallback, missing_required_lib from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlencode + +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + +SCALEWAY_SECRET_IMP_ERR = None +try: + from passlib.hash import argon2 + HAS_SCALEWAY_SECRET_PACKAGE = True +except Exception: + argon2 = None + SCALEWAY_SECRET_IMP_ERR = traceback.format_exc() + HAS_SCALEWAY_SECRET_PACKAGE = False def scaleway_argument_spec(): @@ -22,12 +40,20 @@ def scaleway_argument_spec(): ) -def payload_from_object(scw_object): +def scaleway_waitable_resource_argument_spec(): return dict( - (k, v) + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=300), + wait_sleep_time=dict(type="int", default=3), + ) + + +def payload_from_object(scw_object): + return { + k: v for k, v in scw_object.items() if k != 'id' and v is not None - ) + } class ScalewayException(Exception): @@ -59,6 +85,61 @@ def parse_pagination_link(header): return parsed_relations +def filter_sensitive_attributes(container, attributes): + ''' + WARNING: This function is effectively private, **do not use it**! + It will be removed or renamed once changing its name no longer triggers a pylint bug. + ''' + for attr in attributes: + container[attr] = "SENSITIVE_VALUE" + + return container + + +class SecretVariables(object): + @staticmethod + def ensure_scaleway_secret_package(module): + if not HAS_SCALEWAY_SECRET_PACKAGE: + module.fail_json( + msg=missing_required_lib("passlib[argon2]", url='https://passlib.readthedocs.io/en/stable/'), + exception=SCALEWAY_SECRET_IMP_ERR + ) + + @staticmethod + def dict_to_list(source_dict): + return [dict(key=k, value=v) for k, v in source_dict.items()] + + @staticmethod + def list_to_dict(source_list, hashed=False): + key_value = 'hashed_value' if hashed else 'value' + return {var['key']: var[key_value] for var in source_list} + + @classmethod + def decode(cls, secrets_list, values_list): + secrets_dict = cls.list_to_dict(secrets_list, hashed=True) + values_dict = cls.list_to_dict(values_list, hashed=False) + for key in values_dict: + if key in secrets_dict: + if argon2.verify(values_dict[key], secrets_dict[key]): + secrets_dict[key] = values_dict[key] + else: + secrets_dict[key] = secrets_dict[key] + + return cls.dict_to_list(secrets_dict) + + +def resource_attributes_should_be_changed(target, wished, verifiable_mutable_attributes, mutable_attributes): + diff = dict() + for attr in verifiable_mutable_attributes: + if wished[attr] is not None and target[attr] != wished[attr]: + diff[attr] = wished[attr] + + if diff: + return {attr: wished[attr] for attr in mutable_attributes} + else: + return diff + + class Response(object): def __init__(self, resp, info): @@ -165,6 +246,78 @@ class Scaleway(object): def warn(self, x): self.module.warn(str(x)) + def fetch_state(self, resource): + self.module.debug("fetch_state of resource: %s" % resource["id"]) + response = self.get(path=self.api_path + "/%s" % resource["id"]) + + if response.status_code == 404: + return "absent" + + if not response.ok: + msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json) + self.module.fail_json(msg=msg) + + try: + self.module.debug("Resource %s in state: %s" % (resource["id"], response.json["status"])) + return response.json["status"] + except KeyError: + self.module.fail_json(msg="Could not fetch state in %s" % response.json) + + def fetch_paginated_resources(self, resource_key, **pagination_kwargs): + response = self.get( + path=self.api_path, + params=pagination_kwargs) + + status_code = response.status_code + if not response.ok: + self.module.fail_json(msg='Error getting {0} [{1}: {2}]'.format( + resource_key, + response.status_code, response.json['message'])) + + return response.json[resource_key] + + def fetch_all_resources(self, resource_key, **pagination_kwargs): + resources = [] + + result = [None] + while len(result) != 0: + result = self.fetch_paginated_resources(resource_key, **pagination_kwargs) + resources += result + if 'page' in pagination_kwargs: + pagination_kwargs['page'] += 1 + else: + pagination_kwargs['page'] = 2 + + return resources + + def wait_to_complete_state_transition(self, resource, stable_states, force_wait=False): + wait = self.module.params["wait"] + + if not (wait or force_wait): + return + + wait_timeout = self.module.params["wait_timeout"] + wait_sleep_time = self.module.params["wait_sleep_time"] + + # Prevent requesting the resource status too soon + time.sleep(wait_sleep_time) + + start = now() + end = start + datetime.timedelta(seconds=wait_timeout) + + while now() < end: + self.module.debug("We are going to wait for the resource to finish its transition") + + state = self.fetch_state(resource) + if state in stable_states: + self.module.debug("It seems that the resource is not in transition anymore.") + self.module.debug("load-balancer in state: %s" % self.fetch_state(resource)) + break + + time.sleep(wait_sleep_time) + else: + self.module.fail_json(msg="Server takes too long to finish its transition") + SCALEWAY_LOCATION = { 'par1': { @@ -195,11 +348,18 @@ SCALEWAY_LOCATION = { 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-2' }, + 'par3': { + 'name': 'Paris 3', + 'country': 'FR', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-3', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-3' + }, + 'ams1': { 'name': 'Amsterdam 1', 'country': 'NL', 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-1', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-10' + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-1' }, 'EMEA-NL-EVS': { @@ -209,6 +369,20 @@ SCALEWAY_LOCATION = { 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-1' }, + 'ams2': { + 'name': 'Amsterdam 2', + 'country': 'NL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-2', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-2' + }, + + 'ams3': { + 'name': 'Amsterdam 3', + 'country': 'NL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-3', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-3' + }, + 'waw1': { 'name': 'Warsaw 1', 'country': 'PL', @@ -222,6 +396,20 @@ SCALEWAY_LOCATION = { 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-1', 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-1' }, + + 'waw2': { + 'name': 'Warsaw 2', + 'country': 'PL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-2', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-2' + }, + + 'waw3': { + 'name': 'Warsaw 3', + 'country': 'PL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-3', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-3' + }, } SCALEWAY_ENDPOINT = "https://api.scaleway.com" @@ -235,6 +423,11 @@ SCALEWAY_REGIONS = [ SCALEWAY_ZONES = [ "fr-par-1", "fr-par-2", + "fr-par-3", "nl-ams-1", + "nl-ams-2", + "nl-ams-3", "pl-waw-1", + "pl-waw-2", + "pl-waw-3", ] diff --git a/plugins/module_utils/snap.py b/plugins/module_utils/snap.py new file mode 100644 index 0000000000..d672a7b519 --- /dev/null +++ b/plugins/module_utils/snap.py @@ -0,0 +1,53 @@ +# Copyright (c) 2023, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +_alias_state_map = dict( + present='alias', + absent='unalias', + info='aliases', +) + +_state_map = dict( + present='install', + absent='remove', + enabled='enable', + disabled='disable', + refresh='refresh', +) + + +def snap_runner(module, **kwargs): + runner = CmdRunner( + module, + "snap", + arg_formats=dict( + state_alias=cmd_runner_fmt.as_map(_alias_state_map), # snap_alias only + name=cmd_runner_fmt.as_list(), + alias=cmd_runner_fmt.as_list(), # snap_alias only + state=cmd_runner_fmt.as_map(_state_map), + _list=cmd_runner_fmt.as_fixed("list"), + _set=cmd_runner_fmt.as_fixed("set"), + get=cmd_runner_fmt.as_fixed(["get", "-d"]), + classic=cmd_runner_fmt.as_bool("--classic"), + channel=cmd_runner_fmt.as_func(lambda v: [] if v == 'stable' else ['--channel', f'{v}']), + options=cmd_runner_fmt.as_list(), + info=cmd_runner_fmt.as_fixed("info"), + dangerous=cmd_runner_fmt.as_bool("--dangerous"), + version=cmd_runner_fmt.as_fixed("version"), + ), + check_rc=False, + **kwargs + ) + return runner + + +def get_version(runner): + with runner("version") as ctx: + rc, out, err = ctx.run() + return dict(x.split() for x in out.splitlines() if len(x.split()) == 2) diff --git a/plugins/module_utils/source_control/bitbucket.py b/plugins/module_utils/source_control/bitbucket.py index 1d584391d9..a3d3fa5f2f 100644 --- a/plugins/module_utils/source_control/bitbucket.py +++ b/plugins/module_utils/source_control/bitbucket.py @@ -1,9 +1,8 @@ -# -*- coding: utf-8 -*- +# Copyright (c) Ansible project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json @@ -27,7 +26,7 @@ class BitbucketHelper: # TODO: # - Rename user to username once current usage of username is removed # - Alias user to username and deprecate it - user=dict(type='str', fallback=(env_fallback, ['BITBUCKET_USERNAME'])), + user=dict(type='str', aliases=['username'], fallback=(env_fallback, ['BITBUCKET_USERNAME'])), password=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_PASSWORD'])), ) @@ -55,14 +54,14 @@ class BitbucketHelper: if info['status'] == 200: self.access_token = content['access_token'] else: - self.module.fail_json(msg='Failed to retrieve access token: {0}'.format(info)) + self.module.fail_json(msg=f'Failed to retrieve access token: {info}') def request(self, api_url, method, data=None, headers=None): headers = headers or {} if self.access_token: headers.update({ - 'Authorization': 'Bearer {0}'.format(self.access_token), + 'Authorization': f'Bearer {self.access_token}', }) elif self.module.params['user'] and self.module.params['password']: headers.update({ diff --git a/plugins/module_utils/ssh.py b/plugins/module_utils/ssh.py new file mode 100644 index 0000000000..851efcbe86 --- /dev/null +++ b/plugins/module_utils/ssh.py @@ -0,0 +1,19 @@ +# Copyright (c) 2015, Björn Andersson +# Copyright (c) 2021, Ansible Project +# Copyright (c) 2021, Abhijeet Kasurde +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +import os + + +def determine_config_file(user, config_file): + if user: + config_file = os.path.join(os.path.expanduser(f'~{user}'), '.ssh', 'config') + elif config_file is None: + config_file = '/etc/ssh/ssh_config' + return config_file diff --git a/plugins/module_utils/storage/emc/emc_vnx.py b/plugins/module_utils/storage/emc/emc_vnx.py index 5922512676..b6a4d30463 100644 --- a/plugins/module_utils/storage/emc/emc_vnx.py +++ b/plugins/module_utils/storage/emc/emc_vnx.py @@ -1,16 +1,8 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# (c) 2018 Luca 'remix_tj' Lorenzetto -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2018 Luca 'remix_tj' Lorenzetto +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations emc_vnx_argument_spec = { diff --git a/plugins/module_utils/storage/hpe3par/hpe3par.py b/plugins/module_utils/storage/hpe3par/hpe3par.py index b7734444dd..da88db1ce6 100644 --- a/plugins/module_utils/storage/hpe3par/hpe3par.py +++ b/plugins/module_utils/storage/hpe3par/hpe3par.py @@ -1,9 +1,8 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2018, Hewlett Packard Enterprise Development LP +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils import basic @@ -21,7 +20,7 @@ def convert_to_binary_multiple(size_with_unit): if float(size) < 0: return -1 if not valid_unit: - raise ValueError("%s does not have a valid unit. The unit must be one of %s" % (size_with_unit, valid_units)) + raise ValueError(f"{size_with_unit} does not have a valid unit. The unit must be one of {valid_units}") size = size_with_unit.replace(" ", "").split('iB')[0] size_kib = basic.human_to_bytes(size) diff --git a/plugins/module_utils/systemd.py b/plugins/module_utils/systemd.py new file mode 100644 index 0000000000..00ce292feb --- /dev/null +++ b/plugins/module_utils/systemd.py @@ -0,0 +1,32 @@ +# Copyright (c) 2025, Marco Noce +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def systemd_runner(module, command, **kwargs): + arg_formats = dict( + version=cmd_runner_fmt.as_fixed("--version"), + list_units=cmd_runner_fmt.as_fixed(["list-units", "--no-pager"]), + types=cmd_runner_fmt.as_func(lambda v: [] if not v else ["--type", ",".join(v)]), + all=cmd_runner_fmt.as_fixed("--all"), + plain=cmd_runner_fmt.as_fixed("--plain"), + no_legend=cmd_runner_fmt.as_fixed("--no-legend"), + show=cmd_runner_fmt.as_fixed("show"), + props=cmd_runner_fmt.as_func(lambda v: [] if not v else ["-p", ",".join(v)]), + dashdash=cmd_runner_fmt.as_fixed("--"), + unit=cmd_runner_fmt.as_list(), + ) + + runner = CmdRunner( + module, + command=command, + arg_formats=arg_formats, + check_rc=True, + **kwargs + ) + return runner diff --git a/plugins/module_utils/univention_umc.py b/plugins/module_utils/univention_umc.py index a44a0052a9..1475a91542 100644 --- a/plugins/module_utils/univention_umc.py +++ b/plugins/module_utils/univention_umc.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. @@ -9,10 +8,10 @@ # Copyright (c) 2016, Adfinis SyGroup AG # Tobias Rueetschi # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations """Univention Corporate Server (UCS) access module. @@ -90,7 +89,7 @@ def uldap(): def construct(): try: secret_file = open('/etc/ldap.secret', 'r') - bind_dn = 'cn=admin,{0}'.format(base_dn()) + bind_dn = f'cn=admin,{base_dn()}' except IOError: # pragma: no cover secret_file = open('/etc/machine.secret', 'r') bind_dn = config_registry()["ldap/hostdn"] @@ -187,7 +186,7 @@ def module_by_name(module_name_): univention.admin.modules.init(uldap(), position_base_dn(), module) return module - return _singleton('module/%s' % module_name_, construct) + return _singleton(f'module/{module_name_}', construct) def get_umc_admin_objects(): diff --git a/plugins/module_utils/utm_utils.py b/plugins/module_utils/utm_utils.py index 7e6ff3093e..2e7432fb38 100644 --- a/plugins/module_utils/utm_utils.py +++ b/plugins/module_utils/utm_utils.py @@ -1,16 +1,15 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # -# Copyright: (c) 2018, Johannes Brunswicker +# Copyright (c) 2018, Johannes Brunswicker # -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json @@ -72,8 +71,9 @@ class UTM: """ self.info_only = info_only self.module = module - self.request_url = module.params.get('utm_protocol') + "://" + module.params.get('utm_host') + ":" + to_native( - module.params.get('utm_port')) + "/api/objects/" + endpoint + "/" + self.request_url = ( + f"{module.params.get('utm_protocol')}://{module.params.get('utm_host')}:{module.params.get('utm_port')}/api/objects/{endpoint}/" + ) """ The change_relevant_keys will be checked for changes to determine whether the object needs to be updated @@ -83,9 +83,8 @@ class UTM: self.module.params['url_password'] = module.params.get('utm_token') if all(elem in self.change_relevant_keys for elem in module.params.keys()): raise UTMModuleConfigurationError( - "The keys " + to_native( - self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native( - list(module.params.keys()))) + f"The keys {self.change_relevant_keys} to check are not in the modules keys:\n{list(module.params.keys())}" + ) def execute(self): try: @@ -184,7 +183,7 @@ class UTM: result = None if response is not None: results = json.loads(response.read()) - result = next(iter(filter(lambda d: d['name'] == module.params.get('name'), results)), None) + result = next((d for d in results if d['name'] == module.params.get('name')), None) return info, result def _clean_result(self, result): diff --git a/plugins/module_utils/vardict.py b/plugins/module_utils/vardict.py new file mode 100644 index 0000000000..ccea7d5bb6 --- /dev/null +++ b/plugins/module_utils/vardict.py @@ -0,0 +1,196 @@ +# (c) 2023, Alexei Znamensky +# Copyright (c) 2023, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import annotations + +import copy + + +class _Variable(object): + NOTHING = object() + + def __init__(self, diff=False, output=True, change=None, fact=False, verbosity=0): + self.init = False + self.initial_value = None + self.value = None + + self.diff = None + self._change = None + self.output = None + self.fact = None + self._verbosity = None + self.set_meta(output=output, diff=diff, change=change, fact=fact, verbosity=verbosity) + + def getchange(self): + return self.diff if self._change is None else self._change + + def setchange(self, value): + self._change = value + + def getverbosity(self): + return self._verbosity + + def setverbosity(self, v): + if not (0 <= v <= 4): + raise ValueError("verbosity must be an int in the range 0 to 4") + self._verbosity = v + + change = property(getchange, setchange) + verbosity = property(getverbosity, setverbosity) + + def set_meta(self, output=None, diff=None, change=None, fact=None, initial_value=NOTHING, verbosity=None): + """Set the metadata for the variable + + Args: + output (bool, optional): flag indicating whether the variable should be in the output of the module. Defaults to None. + diff (bool, optional): flag indicating whether to generate diff mode output for this variable. Defaults to None. + change (bool, optional): flag indicating whether to track if changes happened to this variable. Defaults to None. + fact (bool, optional): flag indicating whether the variable should be exposed as a fact of the module. Defaults to None. + initial_value (any, optional): initial value of the variable, to be used with `change`. Defaults to NOTHING. + verbosity (int, optional): level of verbosity in which this variable is reported by the module as `output`, `fact` or `diff`. Defaults to None. + """ + if output is not None: + self.output = output + if change is not None: + self.change = change + if diff is not None: + self.diff = diff + if fact is not None: + self.fact = fact + if initial_value is not _Variable.NOTHING: + self.initial_value = copy.deepcopy(initial_value) + if verbosity is not None: + self.verbosity = verbosity + + def as_dict(self, meta_only=False): + d = { + "diff": self.diff, + "change": self.change, + "output": self.output, + "fact": self.fact, + "verbosity": self.verbosity, + } + if not meta_only: + d["initial_value"] = copy.deepcopy(self.initial_value) + d["value"] = self.value + return d + + def set_value(self, value): + if not self.init: + self.initial_value = copy.deepcopy(value) + self.init = True + self.value = value + return self + + def is_visible(self, verbosity): + return self.verbosity <= verbosity + + @property + def has_changed(self): + return self.change and (self.initial_value != self.value) + + @property + def diff_result(self): + if self.diff and self.has_changed: + return {'before': self.initial_value, 'after': self.value} + return + + def __str__(self): + return ( + f"" + ) + + +class VarDict(object): + reserved_names = ('__vars__', '_var', 'var', 'set_meta', 'get_meta', 'set', 'output', 'diff', 'facts', 'has_changed', 'as_dict') + + def __init__(self): + self.__vars__ = dict() + + def __getitem__(self, item): + return self.__vars__[item].value + + def __setitem__(self, key, value): + self.set(key, value) + + def __getattr__(self, item): + try: + return self.__vars__[item].value + except KeyError: + return getattr(super(VarDict, self), item) + + def __setattr__(self, key, value): + if key == '__vars__': + super(VarDict, self).__setattr__(key, value) + else: + self.set(key, value) + + def _var(self, name): + return self.__vars__[name] + + def var(self, name): + return self._var(name).as_dict() + + def set_meta(self, name, **kwargs): + """Set the metadata for the variable + + Args: + name (str): name of the variable having its metadata changed + output (bool, optional): flag indicating whether the variable should be in the output of the module. Defaults to None. + diff (bool, optional): flag indicating whether to generate diff mode output for this variable. Defaults to None. + change (bool, optional): flag indicating whether to track if changes happened to this variable. Defaults to None. + fact (bool, optional): flag indicating whether the variable should be exposed as a fact of the module. Defaults to None. + initial_value (any, optional): initial value of the variable, to be used with `change`. Defaults to NOTHING. + verbosity (int, optional): level of verbosity in which this variable is reported by the module as `output`, `fact` or `diff`. Defaults to None. + """ + self._var(name).set_meta(**kwargs) + + def get_meta(self, name): + return self._var(name).as_dict(meta_only=True) + + def set(self, name, value, **kwargs): + """Set the value and optionally metadata for a variable. The variable is not required to exist prior to calling `set`. + + For details on the accepted metada see the documentation for method `set_meta`. + + Args: + name (str): name of the variable being changed + value (any): the value of the variable, it can be of any type + + Raises: + ValueError: Raised if trying to set a variable with a reserved name. + """ + if name in self.reserved_names: + raise ValueError(f"Name {name} is reserved") + if name in self.__vars__: + var = self._var(name) + var.set_meta(**kwargs) + else: + var = _Variable(**kwargs) + var.set_value(value) + self.__vars__[name] = var + + def output(self, verbosity=0): + return {n: v.value for n, v in self.__vars__.items() if v.output and v.is_visible(verbosity)} + + def diff(self, verbosity=0): + diff_results = [(n, v.diff_result) for n, v in self.__vars__.items() if v.diff_result and v.is_visible(verbosity)] + if diff_results: + before = {n: dr['before'] for n, dr in diff_results} + after = {n: dr['after'] for n, dr in diff_results} + return {'before': before, 'after': after} + return None + + def facts(self, verbosity=0): + facts_result = {n: v.value for n, v in self.__vars__.items() if v.fact and v.is_visible(verbosity)} + return facts_result if facts_result else None + + @property + def has_changed(self): + return any(var.has_changed for var in self.__vars__.values()) + + def as_dict(self): + return {name: var.value for name, var in self.__vars__.items()} diff --git a/plugins/module_utils/version.py b/plugins/module_utils/version.py index dc59c43712..18cd6d12fe 100644 --- a/plugins/module_utils/version.py +++ b/plugins/module_utils/version.py @@ -1,21 +1,11 @@ -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Felix Fontein -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later """Provide version object to compare version numbers.""" -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -from ansible.module_utils.six import raise_from - -try: - from ansible.module_utils.compat.version import LooseVersion -except ImportError: - try: - from distutils.version import LooseVersion - except ImportError as exc: - msg = 'To use this plugin or module with ansible-core 2.11, you need to use Python < 3.12 with distutils.version present' - raise_from(ImportError(msg), exc) +from ansible.module_utils.compat.version import LooseVersion # noqa: F401, pylint: disable=unused-import diff --git a/plugins/module_utils/vexata.py b/plugins/module_utils/vexata.py index 3d6fb7aaca..ed0b11480c 100644 --- a/plugins/module_utils/vexata.py +++ b/plugins/module_utils/vexata.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- # -# Copyright: (c) 2019, Sandeep Kasargod -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# Copyright (c) 2019, Sandeep Kasargod +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations HAS_VEXATAPI = True @@ -13,7 +12,6 @@ try: except ImportError: HAS_VEXATAPI = False -from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.basic import env_fallback VXOS_VERSION = None @@ -22,10 +20,9 @@ VXOS_VERSION = None def get_version(iocs_json): if not iocs_json: raise Exception('Invalid IOC json') - active = filter(lambda x: x['mgmtRole'], iocs_json) - if not active: + active = next((x for x in iocs_json if x['mgmtRole']), None) + if active is None: raise Exception('Unable to detect active IOC') - active = active[0] ver = active['swVersion'] if ver[0] != 'v': raise Exception('Illegal version string') @@ -59,7 +56,7 @@ def get_array(module): else: module.fail_json(msg='Test connection to array failed.') except Exception as e: - module.fail_json(msg='Vexata API access failed: {0}'.format(to_native(e))) + module.fail_json(msg=f'Vexata API access failed: {e}') def argument_spec(): diff --git a/plugins/module_utils/wdc_redfish_utils.py b/plugins/module_utils/wdc_redfish_utils.py new file mode 100644 index 0000000000..564be3829e --- /dev/null +++ b/plugins/module_utils/wdc_redfish_utils.py @@ -0,0 +1,553 @@ + +# Copyright (c) 2022 Western Digital Corporation +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +import datetime +import re +import time +import tarfile +import os +from urllib.parse import urlparse, urlunparse + +from ansible.module_utils.urls import fetch_file +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils + + +class WdcRedfishUtils(RedfishUtils): + """Extension to RedfishUtils to support WDC enclosures.""" + # Status codes returned by WDC FW Update Status + UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE = 0 + UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS = 1 + UPDATE_STATUS_CODE_FW_UPDATE_COMPLETED_WAITING_FOR_ACTIVATION = 2 + UPDATE_STATUS_CODE_FW_UPDATE_FAILED = 3 + + # Status messages returned by WDC FW Update Status + UPDATE_STATUS_MESSAGE_READY_FOR_FW_UDPATE = "Ready for FW update" + UDPATE_STATUS_MESSAGE_FW_UPDATE_IN_PROGRESS = "FW update in progress" + UPDATE_STATUS_MESSAGE_FW_UPDATE_COMPLETED_WAITING_FOR_ACTIVATION = "FW update completed. Waiting for activation." + UPDATE_STATUS_MESSAGE_FW_UPDATE_FAILED = "FW update failed." + + # Dict keys for resource bodies + # Standard keys + ACTIONS = "Actions" + OEM = "Oem" + WDC = "WDC" + TARGET = "target" + + # Keys for specific operations + CHASSIS_LOCATE = "#Chassis.Locate" + CHASSIS_POWER_MODE = "#Chassis.PowerMode" + + def __init__(self, + creds, + root_uris, + timeout, + module, + resource_id, + data_modification): + super(WdcRedfishUtils, self).__init__(creds=creds, + root_uri=root_uris[0], + timeout=timeout, + module=module, + resource_id=resource_id, + data_modification=data_modification) + # Update the root URI if we cannot perform a Redfish GET to the first one + self._set_root_uri(root_uris) + + def _set_root_uri(self, root_uris): + """Set the root URI from a list of options. + + If the current root URI is good, just keep it. Else cycle through our options until we find a good one. + A URI is considered good if we can GET uri/redfish/v1. + """ + for root_uri in root_uris: + uri = f"{root_uri}/redfish/v1" + response = self.get_request(uri) + if response['ret']: + self.root_uri = root_uri + break + + def _find_updateservice_resource(self): + """Find the update service resource as well as additional WDC-specific resources.""" + response = super(WdcRedfishUtils, self)._find_updateservice_resource() + if not response['ret']: + return response + return self._find_updateservice_additional_uris() + + def _is_enclosure_multi_tenant_and_fetch_gen(self): + """Determine if the enclosure is multi-tenant. + + The serial number of a multi-tenant enclosure will end in "-A" or "-B". + Fetching enclsoure generation. + + :return: True/False if the enclosure is multi-tenant or not and return enclosure generation; + None if unable to determine. + """ + response = self.get_request(f"{self.root_uri}{self.service_root}Chassis/Enclosure") + if response['ret'] is False: + return None + pattern = r".*-[A,B]" + data = response['data'] + if 'EnclVersion' not in data: + enc_version = 'G1' + else: + enc_version = data['EnclVersion'] + return re.match(pattern, data['SerialNumber']) is not None, enc_version + + def _find_updateservice_additional_uris(self): + """Find & set WDC-specific update service URIs""" + response = self.get_request(self.root_uri + self._update_uri()) + if response['ret'] is False: + return response + data = response['data'] + if 'Actions' not in data: + return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} + if '#UpdateService.SimpleUpdate' not in data['Actions']: + return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} + action = data['Actions']['#UpdateService.SimpleUpdate'] + if 'target' not in action: + return {'ret': False, 'msg': 'Service does not support SimpleUpdate'} + self.simple_update_uri = action['target'] + + # Simple update status URI is not provided via GET /redfish/v1/UpdateService + # So we have to hard code it. + self.simple_update_status_uri = f"{self.simple_update_uri}/Status" + + # FWActivate URI + if 'Oem' not in data['Actions']: + return {'ret': False, 'msg': 'Service does not support OEM operations'} + if 'WDC' not in data['Actions']['Oem']: + return {'ret': False, 'msg': 'Service does not support WDC operations'} + if '#UpdateService.FWActivate' not in data['Actions']['Oem']['WDC']: + return {'ret': False, 'msg': 'Service does not support FWActivate'} + action = data['Actions']['Oem']['WDC']['#UpdateService.FWActivate'] + if 'target' not in action: + return {'ret': False, 'msg': 'Service does not support FWActivate'} + self.firmware_activate_uri = action['target'] + return {'ret': True} + + def _simple_update_status_uri(self): + return self.simple_update_status_uri + + def _firmware_activate_uri(self): + return self.firmware_activate_uri + + def _update_uri(self): + return self.update_uri + + def get_simple_update_status(self): + """Issue Redfish HTTP GET to return the simple update status""" + result = {} + response = self.get_request(self.root_uri + self._simple_update_status_uri()) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + result['entries'] = data + return result + + def firmware_activate(self, update_opts): + """Perform FWActivate using Redfish HTTP API.""" + creds = update_opts.get('update_creds') + payload = {} + if creds: + if creds.get('username'): + payload["Username"] = creds.get('username') + if creds.get('password'): + payload["Password"] = creds.get('password') + + # Make sure the service supports FWActivate + response = self.get_request(self.root_uri + self._update_uri()) + if response['ret'] is False: + return response + data = response['data'] + if 'Actions' not in data: + return {'ret': False, 'msg': 'Service does not support FWActivate'} + + response = self.post_request(self.root_uri + self._firmware_activate_uri(), payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, + 'msg': "FWActivate requested"} + + def _get_bundle_version(self, + bundle_uri): + """Get the firmware version from a bundle file, and whether or not it is multi-tenant. + + Only supports HTTP at this time. Assumes URI exists and is a tarfile. + Looks for a file oobm-[version].pkg, such as 'oobm-4.0.13.pkg`. Extracts the version number + from that filename (in the above example, the version number is "4.0.13". + + To determine if the bundle is multi-tenant or not, it looks inside the .bin file within the tarfile, + and checks the appropriate byte in the file. + + If not tarfile, the bundle is checked for 2048th byte to determine whether it is Gen2 bundle. + Gen2 is always single tenant at this time. + + :param str bundle_uri: HTTP URI of the firmware bundle. + :return: Firmware version number contained in the bundle, whether or not the bundle is multi-tenant + and bundle generation. Either value will be None if unable to determine. + :rtype: str or None, bool or None + """ + bundle_temp_filename = fetch_file(module=self.module, + url=bundle_uri) + bundle_version = None + is_multi_tenant = None + gen = None + + # If not tarfile, then if the file has "MMG2" or "DPG2" at 2048th byte + # then the bundle is for MM or DP G2 + if not tarfile.is_tarfile(bundle_temp_filename): + cookie1 = None + with open(bundle_temp_filename, "rb") as bundle_file: + file_size = os.path.getsize(bundle_temp_filename) + if file_size >= 2052: + bundle_file.seek(2048) + cookie1 = bundle_file.read(4) + # It is anticipated that DP firmware bundle will be having the value "DPG2" + # for cookie1 in the header + if cookie1 and cookie1.decode("utf8") == "MMG2" or cookie1.decode("utf8") == "DPG2": + file_name, ext = os.path.splitext(str(bundle_uri.rsplit('/', 1)[1])) + # G2 bundle file name: Ultrastar-Data102_3000_SEP_1010-032_2.1.12 + parsedFileName = file_name.split('_') + if len(parsedFileName) == 5: + bundle_version = parsedFileName[4] + # MM G2 is always single tanant + is_multi_tenant = False + gen = "G2" + + return bundle_version, is_multi_tenant, gen + + # Bundle is for MM or DP G1 + tf = tarfile.open(bundle_temp_filename) + pattern_pkg = r"oobm-(.+)\.pkg" + pattern_bin = r"(.*\.bin)" + bundle_version = None + is_multi_tenant = None + for filename in tf.getnames(): + match_pkg = re.match(pattern_pkg, filename) + if match_pkg is not None: + bundle_version = match_pkg.group(1) + match_bin = re.match(pattern_bin, filename) + if match_bin is not None: + bin_filename = match_bin.group(1) + bin_file = tf.extractfile(bin_filename) + bin_file.seek(11) + byte_11 = bin_file.read(1) + is_multi_tenant = byte_11 == b'\x80' + gen = "G1" + + return bundle_version, is_multi_tenant, gen + + @staticmethod + def uri_is_http(uri): + """Return True if the specified URI is http or https. + + :param str uri: A URI. + :return: True if the URI is http or https, else False + :rtype: bool + """ + parsed_bundle_uri = urlparse(uri) + return parsed_bundle_uri.scheme.lower() in ['http', 'https'] + + def update_and_activate(self, update_opts): + """Update and activate the firmware in a single action. + + Orchestrates the firmware update so that everything can be done in a single command. + Compares the update version with the already-installed version -- skips update if they are the same. + Performs retries, handles timeouts as needed. + + """ + # Convert credentials to standard HTTP format + if update_opts.get("update_creds") is not None and "username" in update_opts["update_creds"] and "password" in update_opts["update_creds"]: + update_creds = update_opts["update_creds"] + parsed_url = urlparse(update_opts["update_image_uri"]) + if update_creds: + original_netloc = parsed_url.netloc + parsed_url = parsed_url._replace(netloc=f"{update_creds.get('username')}:{update_creds.get('password')}@{original_netloc}") + update_opts["update_image_uri"] = urlunparse(parsed_url) + del update_opts["update_creds"] + + # Make sure bundle URI is HTTP(s) + bundle_uri = update_opts["update_image_uri"] + + if not self.uri_is_http(bundle_uri): + return { + 'ret': False, + 'msg': 'Bundle URI must be HTTP or HTTPS' + } + # Make sure IOM is ready for update + result = self.get_simple_update_status() + if result['ret'] is False: + return result + update_status = result['entries'] + status_code = update_status['StatusCode'] + status_description = update_status['Description'] + if status_code not in [ + self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE, + self.UPDATE_STATUS_CODE_FW_UPDATE_FAILED + ]: + return { + 'ret': False, + 'msg': f'Target is not ready for FW update. Current status: {status_code} ({status_description})'} + + # Check the FW version in the bundle file, and compare it to what is already on the IOMs + + # Bundle version number + bundle_firmware_version, is_bundle_multi_tenant, bundle_gen = self._get_bundle_version(bundle_uri) + if bundle_firmware_version is None or is_bundle_multi_tenant is None or bundle_gen is None: + return { + 'ret': False, + 'msg': 'Unable to extract bundle version or multi-tenant status or generation from update image file' + } + + is_enclosure_multi_tenant, enclosure_gen = self._is_enclosure_multi_tenant_and_fetch_gen() + + # Verify that the bundle is correctly multi-tenant or not + if is_enclosure_multi_tenant != is_bundle_multi_tenant: + return { + 'ret': False, + 'msg': f'Enclosure multi-tenant is {is_enclosure_multi_tenant} but bundle multi-tenant is {is_bundle_multi_tenant}' + } + + # Verify that the bundle is compliant with the target enclosure + if enclosure_gen != bundle_gen: + return { + 'ret': False, + 'msg': f'Enclosure generation is {enclosure_gen} but bundle is of {bundle_gen}' + } + + # Version number installed on IOMs + firmware_inventory = self.get_firmware_inventory() + if not firmware_inventory["ret"]: + return firmware_inventory + firmware_inventory_dict = {} + for entry in firmware_inventory["entries"]: + firmware_inventory_dict[entry["Id"]] = entry + iom_a_firmware_version = firmware_inventory_dict.get("IOModuleA_OOBM", {}).get("Version") + iom_b_firmware_version = firmware_inventory_dict.get("IOModuleB_OOBM", {}).get("Version") + # If version is None, we will proceed with the update, because we cannot tell + # for sure that we have a full version match. + if is_enclosure_multi_tenant: + # For multi-tenant, only one of the IOMs will be affected by the firmware update, + # so see if that IOM already has the same firmware version as the bundle. + firmware_already_installed = bundle_firmware_version == self._get_installed_firmware_version_of_multi_tenant_system( + iom_a_firmware_version, + iom_b_firmware_version) + else: + # For single-tenant, see if both IOMs already have the same firmware version as the bundle. + firmware_already_installed = bundle_firmware_version == iom_a_firmware_version == iom_b_firmware_version + # If this FW already installed, return changed: False, and do not update the firmware. + if firmware_already_installed: + return { + 'ret': True, + 'changed': False, + 'msg': f'Version {bundle_firmware_version} already installed' + } + + # Version numbers don't match the bundle -- proceed with update (unless we are in check mode) + if self.module.check_mode: + return { + 'ret': True, + 'changed': True, + 'msg': 'Update not performed in check mode.' + } + update_successful = False + retry_interval_seconds = 5 + max_number_of_retries = 5 + retry_number = 0 + while retry_number < max_number_of_retries and not update_successful: + if retry_number != 0: + time.sleep(retry_interval_seconds) + retry_number += 1 + + result = self.simple_update(update_opts) + if result['ret'] is not True: + # Sometimes a timeout error is returned even though the update actually was requested. + # Check the update status to see if the update is in progress. + status_result = self.get_simple_update_status() + if status_result['ret'] is False: + continue + update_status = status_result['entries'] + status_code = update_status['StatusCode'] + if status_code != self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS: + # Update is not in progress -- retry until max number of retries + continue + else: + update_successful = True + else: + update_successful = True + if not update_successful: + # Unable to get SimpleUpdate to work. Return the failure from the SimpleUpdate + return result + + # Wait for "ready to activate" + max_wait_minutes = 30 + polling_interval_seconds = 30 + status_code = self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE + start_time = datetime.datetime.now() + # For a short time, target will still say "ready for firmware update" before it transitions + # to "update in progress" + status_codes_for_update_incomplete = [ + self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS, + self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE + ] + iteration = 0 + while status_code in status_codes_for_update_incomplete \ + and datetime.datetime.now() - start_time < datetime.timedelta(minutes=max_wait_minutes): + if iteration != 0: + time.sleep(polling_interval_seconds) + iteration += 1 + result = self.get_simple_update_status() + if result['ret'] is False: + continue # We may get timeouts, just keep trying until we give up + update_status = result['entries'] + status_code = update_status['StatusCode'] + status_description = update_status['Description'] + if status_code == self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS: + # Once it says update in progress, "ready for update" is no longer a valid status code + status_codes_for_update_incomplete = [self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS] + + # Update no longer in progress -- verify that it finished + if status_code != self.UPDATE_STATUS_CODE_FW_UPDATE_COMPLETED_WAITING_FOR_ACTIVATION: + return { + 'ret': False, + 'msg': f'Target is not ready for FW activation after update. Current status: {status_code} ({status_description})'} + + self.firmware_activate(update_opts) + return {'ret': True, 'changed': True, + 'msg': "Firmware updated and activation initiated."} + + def _get_installed_firmware_version_of_multi_tenant_system(self, + iom_a_firmware_version, + iom_b_firmware_version): + """Return the version for the active IOM on a multi-tenant system. + + Only call this on a multi-tenant system. + Given the installed firmware versions for IOM A, B, this method will determine which IOM is active + for this tenanat, and return that IOM's firmware version. + """ + # To determine which IOM we are on, try to GET each IOM resource + # The one we are on will return valid data. + # The other will return an error with message "IOM Module A/B cannot be read" + which_iom_is_this = None + for iom_letter in ['A', 'B']: + iom_uri = f"Chassis/IOModule{iom_letter}FRU" + response = self.get_request(self.root_uri + self.service_root + iom_uri) + if response['ret'] is False: + continue + data = response['data'] + if "Id" in data: # Assume if there is an "Id", it is valid + which_iom_is_this = iom_letter + break + if which_iom_is_this == 'A': + return iom_a_firmware_version + elif which_iom_is_this == 'B': + return iom_b_firmware_version + else: + return None + + @staticmethod + def _get_led_locate_uri(data): + """Get the LED locate URI given a resource body.""" + if WdcRedfishUtils.ACTIONS not in data: + return None + if WdcRedfishUtils.OEM not in data[WdcRedfishUtils.ACTIONS]: + return None + if WdcRedfishUtils.WDC not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM]: + return None + if WdcRedfishUtils.CHASSIS_LOCATE not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]: + return None + if WdcRedfishUtils.TARGET not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_LOCATE]: + return None + return data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_LOCATE][WdcRedfishUtils.TARGET] + + @staticmethod + def _get_power_mode_uri(data): + """Get the Power Mode URI given a resource body.""" + if WdcRedfishUtils.ACTIONS not in data: + return None + if WdcRedfishUtils.OEM not in data[WdcRedfishUtils.ACTIONS]: + return None + if WdcRedfishUtils.WDC not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM]: + return None + if WdcRedfishUtils.CHASSIS_POWER_MODE not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]: + return None + if WdcRedfishUtils.TARGET not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_POWER_MODE]: + return None + return data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_POWER_MODE][WdcRedfishUtils.TARGET] + + def manage_indicator_led(self, command, resource_uri): + key = 'IndicatorLED' + + payloads = {'IndicatorLedOn': 'On', 'IndicatorLedOff': 'Off'} + current_led_status_map = {'IndicatorLedOn': 'Blinking', 'IndicatorLedOff': 'Off'} + + result = {} + response = self.get_request(self.root_uri + resource_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + if key not in data: + return {'ret': False, 'msg': f"Key {key} not found"} + current_led_status = data[key] + if current_led_status == current_led_status_map[command]: + return {'ret': True, 'changed': False} + + led_locate_uri = self._get_led_locate_uri(data) + if led_locate_uri is None: + return {'ret': False, 'msg': 'LED locate URI not found.'} + + if command in payloads.keys(): + payload = {'LocateState': payloads[command]} + response = self.post_request(self.root_uri + led_locate_uri, payload) + if response['ret'] is False: + return response + else: + return {'ret': False, 'msg': 'Invalid command'} + + return result + + def manage_chassis_power_mode(self, command): + return self.manage_power_mode(command, self.chassis_uri) + + def manage_power_mode(self, command, resource_uri=None): + if resource_uri is None: + resource_uri = self.chassis_uri + + payloads = {'PowerModeNormal': 'Normal', 'PowerModeLow': 'Low'} + requested_power_mode = payloads[command] + + result = {} + response = self.get_request(self.root_uri + resource_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + # Make sure the response includes Oem.WDC.PowerMode, and get current power mode + power_mode = 'PowerMode' + if WdcRedfishUtils.OEM not in data or WdcRedfishUtils.WDC not in data[WdcRedfishUtils.OEM] or\ + power_mode not in data[WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]: + return {'ret': False, 'msg': 'Resource does not support Oem.WDC.PowerMode'} + current_power_mode = data[WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][power_mode] + if current_power_mode == requested_power_mode: + return {'ret': True, 'changed': False} + + power_mode_uri = self._get_power_mode_uri(data) + if power_mode_uri is None: + return {'ret': False, 'msg': 'Power Mode URI not found.'} + + if command in payloads.keys(): + payload = {'PowerMode': payloads[command]} + response = self.post_request(self.root_uri + power_mode_uri, payload) + if response['ret'] is False: + return response + else: + return {'ret': False, 'msg': 'Invalid command'} + + return result diff --git a/plugins/module_utils/xdg_mime.py b/plugins/module_utils/xdg_mime.py new file mode 100644 index 0000000000..d02002737b --- /dev/null +++ b/plugins/module_utils/xdg_mime.py @@ -0,0 +1,34 @@ +# Copyright (c) 2025, Marcos Alano +# Based on gio_mime module. Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def xdg_mime_runner(module, **kwargs): + return CmdRunner( + module, + command=['xdg-mime'], + arg_formats=dict( + default=cmd_runner_fmt.as_fixed('default'), + query=cmd_runner_fmt.as_fixed('query'), + mime_types=cmd_runner_fmt.as_list(), + handler=cmd_runner_fmt.as_list(), + version=cmd_runner_fmt.as_fixed('--version'), + ), + **kwargs + ) + + +def xdg_mime_get(runner, mime_type): + def process(rc, out, err): + if not out.strip(): + return None + out = out.splitlines()[0] + return out.split()[-1] + + with runner("query default mime_types", output_process=process) as ctx: + return ctx.run(mime_types=mime_type) diff --git a/plugins/module_utils/xenserver.py b/plugins/module_utils/xenserver.py index 015b10215e..32576000cc 100644 --- a/plugins/module_utils/xenserver.py +++ b/plugins/module_utils/xenserver.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- # -# Copyright: (c) 2018, Bojan Vitnik -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Bojan Vitnik +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import atexit import time @@ -27,22 +26,18 @@ def xenserver_common_argument_spec(): return dict( hostname=dict(type='str', aliases=['host', 'pool'], - required=False, default='localhost', fallback=(env_fallback, ['XENSERVER_HOST']), ), username=dict(type='str', aliases=['user', 'admin'], - required=False, default='root', fallback=(env_fallback, ['XENSERVER_USER'])), password=dict(type='str', aliases=['pass', 'pwd'], - required=False, no_log=True, fallback=(env_fallback, ['XENSERVER_PASSWORD'])), validate_certs=dict(type='bool', - required=False, default=True, fallback=(env_fallback, ['XENSERVER_VALIDATE_CERTS'])), ) @@ -293,29 +288,29 @@ def get_object_ref(module, name, uuid=None, obj_type="VM", fail=True, msg_prefix try: # Find object by UUID. If no object is found using given UUID, # an exception will be generated. - obj_ref = xapi_session.xenapi_request("%s.get_by_uuid" % real_obj_type, (uuid,)) + obj_ref = xapi_session.xenapi_request(f"{real_obj_type}.get_by_uuid", (uuid,)) except XenAPI.Failure as f: if fail: - module.fail_json(msg="%s%s with UUID '%s' not found!" % (msg_prefix, obj_type, uuid)) + module.fail_json(msg=f"{msg_prefix}{obj_type} with UUID '{uuid}' not found!") elif name: try: # Find object by name (name_label). - obj_ref_list = xapi_session.xenapi_request("%s.get_by_name_label" % real_obj_type, (name,)) + obj_ref_list = xapi_session.xenapi_request(f"{real_obj_type}.get_by_name_label", (name,)) except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) + module.fail_json(msg=f"XAPI ERROR: {f.details}") # If obj_ref_list is empty. if not obj_ref_list: if fail: - module.fail_json(msg="%s%s with name '%s' not found!" % (msg_prefix, obj_type, name)) + module.fail_json(msg=f"{msg_prefix}{obj_type} with name '{name}' not found!") # If obj_ref_list contains multiple object references. elif len(obj_ref_list) > 1: - module.fail_json(msg="%smultiple %ss with name '%s' found! Please use UUID." % (msg_prefix, obj_type, name)) + module.fail_json(msg=f"{msg_prefix}multiple {obj_type}s with name '{name}' found! Please use UUID.") # The obj_ref_list contains only one object reference. else: obj_ref = obj_ref_list[0] else: - module.fail_json(msg="%sno valid name or UUID supplied for %s!" % (msg_prefix, obj_type)) + module.fail_json(msg=f"{msg_prefix}no valid name or UUID supplied for {obj_type}!") return obj_ref @@ -401,7 +396,7 @@ def gather_vm_params(module, vm_ref): vm_params['customization_agent'] = "custom" except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) + module.fail_json(msg=f"XAPI ERROR: {f.details}") return vm_params @@ -477,12 +472,13 @@ def gather_vm_facts(module, vm_params): "mac": vm_vif_params['MAC'], "vif_device": vm_vif_params['device'], "mtu": vm_vif_params['MTU'], - "ip": vm_guest_metrics_networks.get("%s/ip" % vm_vif_params['device'], ''), + "ip": vm_guest_metrics_networks.get(f"{vm_vif_params['device']}/ip", ''), "prefix": "", "netmask": "", "gateway": "", - "ip6": [vm_guest_metrics_networks[ipv6] for ipv6 in sorted(vm_guest_metrics_networks.keys()) if ipv6.startswith("%s/ipv6/" % - vm_vif_params['device'])], + "ip6": [vm_guest_metrics_networks[ipv6] + for ipv6 in sorted(vm_guest_metrics_networks.keys()) + if ipv6.startswith(f"{vm_vif_params['device']}/ipv6/")], "prefix6": "", "gateway6": "", } @@ -503,7 +499,7 @@ def gather_vm_facts(module, vm_params): vm_xenstore_data = vm_params['xenstore_data'] for f in ['prefix', 'netmask', 'gateway', 'prefix6', 'gateway6']: - vm_network_params[f] = vm_xenstore_data.get("vm-data/networks/%s/%s" % (vm_vif_params['device'], f), "") + vm_network_params[f] = vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/{f}", "") vm_facts['networks'].append(vm_network_params) @@ -570,14 +566,14 @@ def set_vm_power_state(module, vm_ref, power_state, timeout=300): if not module.check_mode: xapi_session.xenapi.VM.hard_reboot(vm_ref) else: - module.fail_json(msg="Cannot restart VM in state '%s'!" % vm_power_state_current) + module.fail_json(msg=f"Cannot restart VM in state '{vm_power_state_current}'!") elif power_state == "suspended": # running state is required for suspend. if vm_power_state_current == "poweredon": if not module.check_mode: xapi_session.xenapi.VM.suspend(vm_ref) else: - module.fail_json(msg="Cannot suspend VM in state '%s'!" % vm_power_state_current) + module.fail_json(msg=f"Cannot suspend VM in state '{vm_power_state_current}'!") elif power_state == "shutdownguest": # running state is required for guest shutdown. if vm_power_state_current == "poweredon": @@ -589,9 +585,9 @@ def set_vm_power_state(module, vm_ref, power_state, timeout=300): task_result = wait_for_task(module, task_ref, timeout) if task_result: - module.fail_json(msg="Guest shutdown task failed: '%s'!" % task_result) + module.fail_json(msg=f"Guest shutdown task failed: '{task_result}'!") else: - module.fail_json(msg="Cannot shutdown guest when VM is in state '%s'!" % vm_power_state_current) + module.fail_json(msg=f"Cannot shutdown guest when VM is in state '{vm_power_state_current}'!") elif power_state == "rebootguest": # running state is required for guest reboot. if vm_power_state_current == "poweredon": @@ -603,15 +599,15 @@ def set_vm_power_state(module, vm_ref, power_state, timeout=300): task_result = wait_for_task(module, task_ref, timeout) if task_result: - module.fail_json(msg="Guest reboot task failed: '%s'!" % task_result) + module.fail_json(msg=f"Guest reboot task failed: '{task_result}'!") else: - module.fail_json(msg="Cannot reboot guest when VM is in state '%s'!" % vm_power_state_current) + module.fail_json(msg=f"Cannot reboot guest when VM is in state '{vm_power_state_current}'!") else: - module.fail_json(msg="Requested VM power state '%s' is unsupported!" % power_state) + module.fail_json(msg=f"Requested VM power state '{power_state}' is unsupported!") state_changed = True except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) + module.fail_json(msg=f"XAPI ERROR: {f.details}") return (state_changed, vm_power_state_resulting) @@ -670,7 +666,7 @@ def wait_for_task(module, task_ref, timeout=300): xapi_session.xenapi.task.destroy(task_ref) except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) + module.fail_json(msg=f"XAPI ERROR: {f.details}") return result @@ -702,7 +698,7 @@ def wait_for_vm_ip_address(module, vm_ref, timeout=300): vm_power_state = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower()) if vm_power_state != 'poweredon': - module.fail_json(msg="Cannot wait for VM IP address when VM is in state '%s'!" % vm_power_state) + module.fail_json(msg=f"Cannot wait for VM IP address when VM is in state '{vm_power_state}'!") interval = 2 @@ -733,7 +729,7 @@ def wait_for_vm_ip_address(module, vm_ref, timeout=300): module.fail_json(msg="Timed out waiting for VM IP address!") except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) + module.fail_json(msg=f"XAPI ERROR: {f.details}") return vm_guest_metrics @@ -796,7 +792,7 @@ class XAPI(object): # If scheme is not specified we default to http:// because https:// # is problematic in most setups. if not hostname.startswith("http://") and not hostname.startswith("https://"): - hostname = "http://%s" % hostname + hostname = f"http://{hostname}" try: # ignore_ssl is supported in XenAPI library from XenServer 7.2 @@ -815,7 +811,7 @@ class XAPI(object): try: cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, 'Ansible') except XenAPI.Failure as f: - module.fail_json(msg="Unable to log on to XenServer at %s as %s: %s" % (hostname, username, f.details)) + module.fail_json(msg=f"Unable to log on to XenServer at {hostname} as {username}: {f.details}") # Disabling atexit should be used in special cases only. if disconnect_atexit: @@ -858,4 +854,4 @@ class XenServerObject(object): self.default_sr_ref = self.xapi_session.xenapi.pool.get_default_SR(self.pool_ref) self.xenserver_version = get_xenserver_version(module) except XenAPI.Failure as f: - self.module.fail_json(msg="XAPI ERROR: %s" % f.details) + self.module.fail_json(msg=f"XAPI ERROR: {f.details}") diff --git a/plugins/module_utils/xfconf.py b/plugins/module_utils/xfconf.py new file mode 100644 index 0000000000..8febbf450d --- /dev/null +++ b/plugins/module_utils/xfconf.py @@ -0,0 +1,43 @@ +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible.module_utils.parsing.convert_bool import boolean +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +@cmd_runner_fmt.unpack_args +def _values_fmt(values, value_types): + result = [] + for value, value_type in zip(values, value_types): + if value_type == 'bool': + value = 'true' if boolean(value) else 'false' + result.extend(['--type', f'{value_type}', '--set', f'{value}']) + return result + + +def xfconf_runner(module, **kwargs): + runner = CmdRunner( + module, + command='xfconf-query', + arg_formats=dict( + channel=cmd_runner_fmt.as_opt_val("--channel"), + property=cmd_runner_fmt.as_opt_val("--property"), + force_array=cmd_runner_fmt.as_bool("--force-array"), + reset=cmd_runner_fmt.as_bool("--reset"), + create=cmd_runner_fmt.as_bool("--create"), + list_arg=cmd_runner_fmt.as_bool("--list"), + values_and_types=_values_fmt, + version=cmd_runner_fmt.as_fixed("--version"), + ), + **kwargs + ) + return runner + + +def get_xfconf_version(runner): + with runner("version") as ctx: + rc, out, err = ctx.run() + return out.splitlines()[0].split()[1] diff --git a/plugins/modules/database/aerospike/aerospike_migrations.py b/plugins/modules/aerospike_migrations.py similarity index 70% rename from plugins/modules/database/aerospike/aerospike_migrations.py rename to plugins/modules/aerospike_migrations.py index 27b979ad1f..5258f89c78 100644 --- a/plugins/modules/database/aerospike/aerospike_migrations.py +++ b/plugins/modules/aerospike_migrations.py @@ -1,110 +1,106 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- """short_description: Check or wait for migrations between nodes""" -# Copyright: (c) 2018, Albert Autin -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +# Copyright (c) 2018, Albert Autin +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: aerospike_migrations short_description: Check or wait for migrations between nodes description: - - This can be used to check for migrations in a cluster. - This makes it easy to do a rolling upgrade/update on Aerospike nodes. - - If waiting for migrations is not desired, simply just poll until - port 3000 if available or asinfo -v status returns ok + - This can be used to check for migrations in a cluster. This makes it easy to do a rolling upgrade/update on Aerospike + nodes. + - If waiting for migrations is not desired, simply just poll until port 3000 if available or C(asinfo -v status) returns + ok. author: "Albert Autin (@Alb0t)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - host: - description: - - Which host do we use as seed for info connection - required: False - type: str - default: localhost - port: - description: - - Which port to connect to Aerospike on (service port) - required: False - type: int - default: 3000 - connect_timeout: - description: - - How long to try to connect before giving up (milliseconds) - required: False - type: int - default: 1000 - consecutive_good_checks: - description: - - How many times should the cluster report "no migrations" - consecutively before returning OK back to ansible? - required: False - type: int - default: 3 - sleep_between_checks: - description: - - How long to sleep between each check (seconds). - required: False - type: int - default: 60 - tries_limit: - description: - - How many times do we poll before giving up and failing? - default: 300 - required: False - type: int - local_only: - description: - - Do you wish to only check for migrations on the local node - before returning, or do you want all nodes in the cluster - to finish before returning? - required: True - type: bool - min_cluster_size: - description: - - Check will return bad until cluster size is met - or until tries is exhausted - required: False - type: int - default: 1 - fail_on_cluster_change: - description: - - Fail if the cluster key changes - if something else is changing the cluster, we may want to fail - required: False - type: bool - default: True - migrate_tx_key: - description: - - The metric key used to determine if we have tx migrations - remaining. Changeable due to backwards compatibility. - required: False - type: str - default: migrate_tx_partitions_remaining - migrate_rx_key: - description: - - The metric key used to determine if we have rx migrations - remaining. Changeable due to backwards compatibility. - required: False - type: str - default: migrate_rx_partitions_remaining - target_cluster_size: - description: - - When all aerospike builds in the cluster are greater than - version 4.3, then the C(cluster-stable) info command will be used. - Inside this command, you can optionally specify what the target - cluster size is - but it is not necessary. You can still rely on - min_cluster_size if you don't want to use this option. - - If this option is specified on a cluster that has at least 1 - host <4.3 then it will be ignored until the min version reaches - 4.3. - required: False - type: int -''' -EXAMPLES = ''' + host: + description: + - Which host do we use as seed for info connection. + type: str + default: localhost + port: + description: + - Which port to connect to Aerospike on (service port). + required: false + type: int + default: 3000 + connect_timeout: + description: + - How long to try to connect before giving up (milliseconds). + required: false + type: int + default: 1000 + consecutive_good_checks: + description: + - How many times should the cluster report "no migrations" consecutively before returning OK back to ansible? + required: false + type: int + default: 3 + sleep_between_checks: + description: + - How long to sleep between each check (seconds). + required: false + type: int + default: 60 + tries_limit: + description: + - How many times do we poll before giving up and failing? + default: 300 + required: false + type: int + local_only: + description: + - Do you wish to only check for migrations on the local node before returning, or do you want all nodes in the cluster + to finish before returning? + required: true + type: bool + min_cluster_size: + description: + - Check fails until cluster size is met or until tries is exhausted. + required: false + type: int + default: 1 + fail_on_cluster_change: + description: + - Fail if the cluster key changes if something else is changing the cluster, we may want to fail. + required: false + type: bool + default: true + migrate_tx_key: + description: + - The metric key used to determine if we have tx migrations remaining. Changeable due to backwards compatibility. + required: false + type: str + default: migrate_tx_partitions_remaining + migrate_rx_key: + description: + - The metric key used to determine if we have rx migrations remaining. Changeable due to backwards compatibility. + required: false + type: str + default: migrate_rx_partitions_remaining + target_cluster_size: + description: + - When all aerospike builds in the cluster are greater than version 4.3, then the C(cluster-stable) info command is + used. Inside this command, you can optionally specify what the target cluster size is - but it is not necessary. + You can still rely on O(min_cluster_size) if you do not want to use this option. + - If this option is specified on a cluster that has at least one host <4.3 then it is ignored until the min version + reaches 4.3. + required: false + type: int +""" + +EXAMPLES = r""" # check for migrations on local node - name: Wait for migrations on local node before proceeding community.general.aerospike_migrations: @@ -113,7 +109,7 @@ EXAMPLES = ''' consecutive_good_checks: 5 sleep_between_checks: 15 tries_limit: 600 - local_only: False + local_only: false # example playbook: - name: Upgrade aerospike @@ -124,13 +120,13 @@ EXAMPLES = ''' - name: Install dependencies ansible.builtin.apt: name: - - python - - python-pip - - python-setuptools + - python + - python-pip + - python-setuptools state: latest - name: Setup aerospike ansible.builtin.pip: - name: aerospike + name: aerospike # check for migrations every (sleep_between_checks) # If at least (consecutive_good_checks) checks come back OK in a row, then return OK. # Will exit if any exception, which can be caused by bad nodes, @@ -139,13 +135,13 @@ EXAMPLES = ''' # Tries Limit * Sleep Between Checks * delay * retries - name: Wait for aerospike migrations community.general.aerospike_migrations: - local_only: True - sleep_between_checks: 1 - tries_limit: 5 - consecutive_good_checks: 3 - fail_on_cluster_change: true - min_cluster_size: 3 - target_cluster_size: 4 + local_only: true + sleep_between_checks: 1 + tries_limit: 5 + consecutive_good_checks: 3 + fail_on_cluster_change: true + min_cluster_size: 3 + target_cluster_size: 4 register: migrations_check until: migrations_check is succeeded changed_when: false @@ -153,14 +149,14 @@ EXAMPLES = ''' retries: 120 - name: Another thing ansible.builtin.shell: | - echo foo + echo foo - name: Reboot ansible.builtin.reboot: -''' +""" -RETURN = ''' +RETURN = r""" # Returns only a success/failure result. Changed is always false. -''' +""" import traceback @@ -181,19 +177,19 @@ else: def run_module(): """run ansible module""" module_args = dict( - host=dict(type='str', required=False, default='localhost'), - port=dict(type='int', required=False, default=3000), - connect_timeout=dict(type='int', required=False, default=1000), - consecutive_good_checks=dict(type='int', required=False, default=3), - sleep_between_checks=dict(type='int', required=False, default=60), - tries_limit=dict(type='int', required=False, default=300), + host=dict(type='str', default='localhost'), + port=dict(type='int', default=3000), + connect_timeout=dict(type='int', default=1000), + consecutive_good_checks=dict(type='int', default=3), + sleep_between_checks=dict(type='int', default=60), + tries_limit=dict(type='int', default=300), local_only=dict(type='bool', required=True), - min_cluster_size=dict(type='int', required=False, default=1), - target_cluster_size=dict(type='int', required=False, default=None), - fail_on_cluster_change=dict(type='bool', required=False, default=True), - migrate_tx_key=dict(type='str', required=False, no_log=False, + min_cluster_size=dict(type='int', default=1), + target_cluster_size=dict(type='int'), + fail_on_cluster_change=dict(type='bool', default=True), + migrate_tx_key=dict(type='str', no_log=False, default="migrate_tx_partitions_remaining"), - migrate_rx_key=dict(type='str', required=False, no_log=False, + migrate_rx_key=dict(type='str', no_log=False, default="migrate_rx_partitions_remaining") ) @@ -221,7 +217,7 @@ def run_module(): if has_migrations: module.fail_json(msg="Failed.", skip_reason=skip_reason) except Exception as e: - module.fail_json(msg="Error: {0}".format(e)) + module.fail_json(msg=f"Error: {e}") module.exit_json(**result) @@ -267,8 +263,7 @@ class Migrations: data = data.split("\t") if len(data) != 1 and len(data) != 2: self.module.fail_json( - msg="Unexpected number of values returned in info command: " + - str(len(data)) + msg=f"Unexpected number of values returned in info command: {len(data)}" ) # data will be in format 'command\touput' data = data[-1] @@ -333,7 +328,7 @@ class Migrations: """returns a True or False. Does the namespace have migrations for the node passed? If no node passed, uses the local node or the first one in the list""" - namespace_stats = self._info_cmd_helper("namespace/" + namespace, node) + namespace_stats = self._info_cmd_helper(f"namespace/{namespace}", node) try: namespace_tx = \ int(namespace_stats[self.module.params['migrate_tx_key']]) @@ -341,13 +336,10 @@ class Migrations: int(namespace_stats[self.module.params['migrate_rx_key']]) except KeyError: self.module.fail_json( - msg="Did not find partition remaining key:" + - self.module.params['migrate_tx_key'] + - " or key:" + - self.module.params['migrate_rx_key'] + - " in 'namespace/" + - namespace + - "' output." + msg=( + f"Did not find partition remaining key:{self.module.params['migrate_tx_key']} " + f"or key:{self.module.params['migrate_rx_key']} in 'namespace/{namespace}' output." + ) ) except TypeError: self.module.fail_json( @@ -437,7 +429,7 @@ class Migrations: cmd = "cluster-stable:" target_cluster_size = self.module.params['target_cluster_size'] if target_cluster_size is not None: - cmd = cmd + "size=" + str(target_cluster_size) + ";" + cmd = f"{cmd}size={target_cluster_size};" for node in self._nodes: try: cluster_key.add(self._info_cmd_helper(cmd, node)) @@ -479,8 +471,7 @@ class Migrations: stable, reason = self._cluster_good_state() if stable is not True: skip_reason.append( - "Skipping on try#" + str(try_num) + - " for reason:" + reason + f"Skipping on try#{try_num} for reason:{reason}" ) else: if self._can_use_cluster_stable(): @@ -489,14 +480,12 @@ class Migrations: else: consecutive_good = 0 skip_reason.append( - "Skipping on try#" + str(try_num) + - " for reason:" + " cluster_stable" + f"Skipping on try#{try_num} for reason: cluster_stable" ) elif self._has_migs(local): # print("_has_migs") skip_reason.append( - "Skipping on try#" + str(try_num) + - " for reason:" + " migrations" + f"Skipping on try#{try_num} for reason: migrations" ) consecutive_good = 0 else: diff --git a/plugins/modules/monitoring/airbrake_deployment.py b/plugins/modules/airbrake_deployment.py similarity index 68% rename from plugins/modules/monitoring/airbrake_deployment.py rename to plugins/modules/airbrake_deployment.py index a7d7710a0a..745d3fce5d 100644 --- a/plugins/modules/monitoring/airbrake_deployment.py +++ b/plugins/modules/airbrake_deployment.py @@ -1,26 +1,31 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright 2013 Bruce Pennypacker -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: airbrake_deployment author: -- "Bruce Pennypacker (@bpennypacker)" -- "Patrick Humpal (@phumpal)" + - "Bruce Pennypacker (@bpennypacker)" + - "Patrick Humpal (@phumpal)" short_description: Notify airbrake about app deployments description: - - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)). + - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: project_id: description: - - Airbrake PROJECT_ID + - Airbrake PROJECT_ID. required: true type: str version_added: '0.2.0' @@ -32,27 +37,27 @@ options: version_added: '0.2.0' environment: description: - - The airbrake environment name, typically 'production', 'staging', etc. + - The airbrake environment name, typically v(production), V(staging), and so on. required: true type: str user: description: - - The username of the person doing the deployment + - The username of the person doing the deployment. required: false type: str repo: description: - - URL of the project repository + - URL of the project repository. required: false type: str revision: description: - - A hash, number, tag, or other identifier showing what revision from version control was deployed + - A hash, number, tag, or other identifier showing what revision from version control was deployed. required: false type: str version: description: - - A string identifying what version was deployed + - A string identifying what version was deployed. required: false type: str version_added: '1.0.0' @@ -64,16 +69,16 @@ options: type: str validate_certs: description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates for the target URL is not validated. This should only be used on personally controlled + sites using self-signed certificates. required: false - default: 'yes' + default: true type: bool requirements: [] -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Notify airbrake about an app deployment community.general.airbrake_deployment: project_id: '12345' @@ -90,11 +95,10 @@ EXAMPLES = ''' user: ansible revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15' version: '0.2.0' -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlencode # =========================================== @@ -108,11 +112,11 @@ def main(): project_id=dict(required=True, no_log=True, type='str'), project_key=dict(required=True, no_log=True, type='str'), environment=dict(required=True, type='str'), - user=dict(required=False, type='str'), - repo=dict(required=False, type='str'), - revision=dict(required=False, type='str'), - version=dict(required=False, type='str'), - url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'), + user=dict(type='str'), + repo=dict(type='str'), + revision=dict(type='str'), + version=dict(type='str'), + url=dict(default='https://api.airbrake.io/api/v4/projects/', type='str'), validate_certs=dict(default=True, type='bool'), ), supports_check_mode=True, @@ -142,7 +146,7 @@ def main(): params["version"] = module.params["version"] # Build deploy url - url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"] + url = f"{module.params.get('url')}{module.params['project_id']}/deploys?key={module.params['project_key']}" json_body = module.jsonify(params) # Build header @@ -155,7 +159,7 @@ def main(): if info['status'] == 200 or info['status'] == 201: module.exit_json(changed=True) else: - module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) + module.fail_json(msg=f"HTTP result code: {info['status']} connecting to {url}") if __name__ == '__main__': diff --git a/plugins/modules/system/aix_devices.py b/plugins/modules/aix_devices.py similarity index 74% rename from plugins/modules/system/aix_devices.py rename to plugins/modules/aix_devices.py index 89468059f3..8176b740bb 100644 --- a/plugins/modules/system/aix_devices.py +++ b/plugins/modules/aix_devices.py @@ -1,52 +1,57 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, 2018 Kairo Araujo -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, 2018 Kairo Araujo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: -- Kairo Araujo (@kairoaraujo) + - Kairo Araujo (@kairoaraujo) module: aix_devices short_description: Manages AIX devices description: -- This module discovers, defines, removes and modifies attributes of AIX devices. + - This module discovers, defines, removes and modifies attributes of AIX devices. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: attributes: description: - - A list of device attributes. + - A list of device attributes. type: dict device: description: - - The name of the device. - - C(all) is valid to rescan C(available) all devices (AIX cfgmgr command). + - The name of the device. + - V(all) is valid to rescan C(available) all devices (AIX C(cfgmgr) command). type: str force: description: - - Forces action. + - Forces action. type: bool - default: no + default: false recursive: description: - - Removes or defines a device and children devices. + - Removes or defines a device and children devices. type: bool - default: no + default: false state: description: - - Controls the device state. - - C(available) (alias C(present)) rescan a specific device or all devices (when C(device) is not specified). - - C(removed) (alias C(absent) removes a device. - - C(defined) changes device to Defined state. + - Controls the device state. + - V(available) (alias V(present)) rescan a specific device or all devices (when O(device) is not specified). + - V(removed) (alias V(absent) removes a device. + - V(defined) changes device to Defined state. type: str - choices: [ available, defined, removed ] + choices: [available, defined, removed] default: available -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Scan new devices community.general.aix_devices: device: all @@ -86,13 +91,13 @@ EXAMPLES = r''' - name: Put vscsi1 and children devices in Defined state. community.general.aix_devices: device: vscsi1 - recursive: yes + recursive: true state: defined - name: Removes vscsi1 and children devices. community.general.aix_devices: device: vscsi1 - recursive: yes + recursive: true state: removed - name: Changes en1 mtu to 9000 and disables arp. @@ -100,7 +105,7 @@ EXAMPLES = r''' device: en1 attributes: mtu: 900 - arp: off + arp: 'off' state: available - name: Configure IP, netmask and set en1 up. @@ -118,9 +123,9 @@ EXAMPLES = r''' attributes: alias4: 10.0.0.100,255.255.255.0 state: available -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.basic import AnsibleModule @@ -136,7 +141,7 @@ def _check_device(module, device): """ lsdev_cmd = module.get_bin_path('lsdev', True) - rc, lsdev_out, err = module.run_command(["%s" % lsdev_cmd, '-C', '-l', "%s" % device]) + rc, lsdev_out, err = module.run_command([lsdev_cmd, '-C', '-l', device]) if rc != 0: module.fail_json(msg="Failed to run lsdev", rc=rc, err=err) @@ -161,7 +166,7 @@ def _check_device_attr(module, device, attr): """ lsattr_cmd = module.get_bin_path('lsattr', True) - rc, lsattr_out, err = module.run_command(["%s" % lsattr_cmd, '-El', "%s" % device, '-a', "%s" % attr]) + rc, lsattr_out, err = module.run_command([lsattr_cmd, '-El', device, '-a', f"{attr}"]) hidden_attrs = ['delalias4', 'delalias6'] @@ -175,7 +180,7 @@ def _check_device_attr(module, device, attr): return current_param elif rc != 0: - module.fail_json(msg="Failed to run lsattr: %s" % err, rc=rc, err=err) + module.fail_json(msg=f"Failed to run lsattr: {err}", rc=rc, err=err) current_param = lsattr_out.split()[1] return current_param @@ -186,7 +191,7 @@ def discover_device(module, device): cfgmgr_cmd = module.get_bin_path('cfgmgr', True) if device is not None: - device = "-l %s" % device + device = f"-l {device}" else: device = '' @@ -194,7 +199,7 @@ def discover_device(module, device): changed = True msg = '' if not module.check_mode: - rc, cfgmgr_out, err = module.run_command(["%s" % cfgmgr_cmd, "%s" % device]) + rc, cfgmgr_out, err = module.run_command([cfgmgr_cmd, device]) changed = True msg = cfgmgr_out @@ -218,9 +223,9 @@ def change_device_attr(module, attributes, device, force): elif current_param != new_param: if force: - cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr]), "%s" % force] + cmd = [chdev_cmd, '-l', device, '-a', f"{attr}={attributes[attr]}", f"{force}"] else: - cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr])] + cmd = [chdev_cmd, '-l', device, '-a', f"{attr}={attributes[attr]}"] if not module.check_mode: rc, chdev_out, err = module.run_command(cmd) @@ -233,22 +238,22 @@ def change_device_attr(module, attributes, device, force): if len(attr_changed) > 0: changed = True - attr_changed_msg = "Attributes changed: %s. " % ','.join(attr_changed) + attr_changed_msg = f"Attributes changed: {','.join(attr_changed)}. " else: changed = False attr_changed_msg = '' if len(attr_not_changed) > 0: - attr_not_changed_msg = "Attributes already set: %s. " % ','.join(attr_not_changed) + attr_not_changed_msg = f"Attributes already set: {','.join(attr_not_changed)}. " else: attr_not_changed_msg = '' if len(attr_invalid) > 0: - attr_invalid_msg = "Invalid attributes: %s " % ', '.join(attr_invalid) + attr_invalid_msg = f"Invalid attributes: {', '.join(attr_invalid)} " else: attr_invalid_msg = '' - msg = "%s%s%s" % (attr_changed_msg, attr_not_changed_msg, attr_invalid_msg) + msg = f"{attr_changed_msg}{attr_not_changed_msg}{attr_invalid_msg}" return changed, msg @@ -276,9 +281,9 @@ def remove_device(module, device, force, recursive, state): if not module.check_mode: if state: - rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive, "%s" % force]) + rc, rmdev_out, err = module.run_command([rmdev_cmd, "-l", device, f"{recursive}", f"{force}"]) else: - rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive]) + rc, rmdev_out, err = module.run_command([rmdev_cmd, "-l", device, f"{recursive}"]) if rc != 0: module.fail_json(msg="Failed to run rmdev", rc=rc, err=err) @@ -324,7 +329,7 @@ def main(): if device_status: result['changed'], result['msg'] = change_device_attr(module, attributes, device, force) else: - result['msg'] = "Device %s does not exist." % device + result['msg'] = f"Device {device} does not exist." else: # discovery devices (cfgmgr) @@ -335,7 +340,7 @@ def main(): result['changed'], result['msg'] = discover_device(module, device) else: - result['msg'] = "Device %s does not exist." % device + result['msg'] = f"Device {device} does not exist." else: result['changed'], result['msg'] = discover_device(module, device) @@ -350,16 +355,16 @@ def main(): if check_device: if state == 'defined' and device_state == 'Defined': result['changed'] = False - result['msg'] = 'Device %s already in Defined' % device + result['msg'] = f'Device {device} already in Defined' else: result['changed'], result['msg'] = remove_device(module, device, force, recursive, state) else: - result['msg'] = "Device %s does not exist." % device + result['msg'] = f"Device {device} does not exist." else: - result['msg'] = "Unexpected state %s." % state + result['msg'] = f"Unexpected state {state}." module.fail_json(**result) module.exit_json(**result) diff --git a/plugins/modules/system/aix_filesystem.py b/plugins/modules/aix_filesystem.py similarity index 71% rename from plugins/modules/system/aix_filesystem.py rename to plugins/modules/aix_filesystem.py index a47c29f04b..3b90848fe6 100644 --- a/plugins/modules/system/aix_filesystem.py +++ b/plugins/modules/aix_filesystem.py @@ -1,45 +1,51 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Kairo Araujo -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: - Kairo Araujo (@kairoaraujo) module: aix_filesystem short_description: Configure LVM and NFS file systems for AIX description: - - This module creates, removes, mount and unmount LVM and NFS file system for - AIX using C(/etc/filesystems). + - This module creates, removes, mount and unmount LVM and NFS file system for AIX using C(/etc/filesystems). - For LVM file systems is possible to resize a file system. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: account_subsystem: description: - Specifies whether the file system is to be processed by the accounting subsystem. type: bool - default: no + default: false attributes: description: - Specifies attributes for files system separated by comma. type: list elements: str - default: agblksize='4096',isnapshot='no' + default: + - agblksize=4096 + - isnapshot=no auto_mount: description: - File system is automatically mounted at system restart. type: bool - default: yes + default: true device: description: - Logical volume (LV) device name or remote export device to create a NFS file system. - It is used to create a file system on an already existing logical volume or the exported NFS file system. - - If not mentioned a new logical volume name will be created following AIX standards (LVM). + - If not mentioned a new logical volume name is created following AIX standards (LVM). type: str fs_type: description: @@ -48,9 +54,9 @@ options: default: jfs2 permissions: description: - - Set file system permissions. C(rw) (read-write) or C(ro) (read-only). + - Set file system permissions. V(rw) (read-write) or V(ro) (read-only). type: str - choices: [ ro, rw ] + choices: [ro, rw] default: rw mount_group: description: @@ -67,107 +73,96 @@ options: type: str rm_mount_point: description: - - Removes the mount point directory when used with state C(absent). + - Removes the mount point directory when used with state V(absent). type: bool - default: no + default: false size: description: - Specifies the file system size. - - For already C(present) it will be resized. - - 512-byte blocks, Megabytes or Gigabytes. If the value has M specified - it will be in Megabytes. If the value has G specified it will be in - Gigabytes. - - If no M or G the value will be 512-byte blocks. - - If "+" is specified in begin of value, the value will be added. - - If "-" is specified in begin of value, the value will be removed. - - If "+" or "-" is not specified, the total value will be the specified. - - Size will respects the LVM AIX standards. + - For already present it resizes the filesystem. + - 512-byte blocks, megabytes or gigabytes. If the value has M specified it is in megabytes. If the value has G specified + it is in gigabytes. + - If no M or G the value is 512-byte blocks. + - If V(+) is specified in begin of value, the value is added. + - If V(-) is specified in begin of value, the value is removed. + - If neither V(+) nor V(-) is specified, then the total value is the specified. + - Size respects the LVM AIX standards. type: str state: description: - Controls the file system state. - - C(present) check if file system exists, creates or resize. - - C(absent) removes existing file system if already C(unmounted). - - C(mounted) checks if the file system is mounted or mount the file system. - - C(unmounted) check if the file system is unmounted or unmount the file system. + - V(present) check if file system exists, creates or resize. + - V(absent) removes existing file system if already V(unmounted). + - V(mounted) checks if the file system is mounted or mount the file system. + - V(unmounted) check if the file system is unmounted or unmount the file system. type: str - choices: [ absent, mounted, present, unmounted ] + choices: [absent, mounted, present, unmounted] default: present vg: description: - Specifies an existing volume group (VG). type: str notes: - - For more C(attributes), please check "crfs" AIX manual. -''' + - For more O(attributes), please check "crfs" AIX manual. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create filesystem in a previously defined logical volume. community.general.aix_filesystem: device: testlv - community.general.filesystem: /testfs + filesystem: /testfs state: present - name: Creating NFS filesystem from nfshost. community.general.aix_filesystem: device: /home/ftp nfs_server: nfshost - community.general.filesystem: /home/ftp + filesystem: /home/ftp state: present - name: Creating a new file system without a previously logical volume. community.general.aix_filesystem: - community.general.filesystem: /newfs + filesystem: /newfs size: 1G state: present vg: datavg - name: Unmounting /testfs. community.general.aix_filesystem: - community.general.filesystem: /testfs + filesystem: /testfs state: unmounted - name: Resizing /mksysb to +512M. community.general.aix_filesystem: - community.general.filesystem: /mksysb + filesystem: /mksysb size: +512M state: present - name: Resizing /mksysb to 11G. community.general.aix_filesystem: - community.general.filesystem: /mksysb + filesystem: /mksysb size: 11G state: present - name: Resizing /mksysb to -2G. community.general.aix_filesystem: - community.general.filesystem: /mksysb + filesystem: /mksysb size: -2G state: present - name: Remove NFS filesystem /home/ftp. community.general.aix_filesystem: - community.general.filesystem: /home/ftp - rm_mount_point: yes + filesystem: /home/ftp + rm_mount_point: true state: absent - name: Remove /newfs. community.general.aix_filesystem: - community.general.filesystem: /newfs - rm_mount_point: yes + filesystem: /newfs + rm_mount_point: true state: absent -''' +""" -RETURN = r''' -changed: - description: Return changed for aix_filesystems actions as true or false. - returned: always - type: bool -msg: - description: Return message regarding the action. - returned: always - type: str -''' from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils._mount import ismount @@ -189,7 +184,7 @@ def _fs_exists(module, filesystem): return False else: - module.fail_json(msg="Failed to run lsfs. Error message: %s" % err) + module.fail_json(msg=f"Failed to run lsfs. Error message: {err}") else: @@ -208,7 +203,7 @@ def _check_nfs_device(module, nfs_host, device): showmount_cmd = module.get_bin_path('showmount', True) rc, showmount_out, err = module.run_command([showmount_cmd, "-a", nfs_host]) if rc != 0: - module.fail_json(msg="Failed to run showmount. Error message: %s" % err) + module.fail_json(msg=f"Failed to run showmount. Error message: {err}") else: showmount_data = showmount_out.splitlines() for line in showmount_data: @@ -230,20 +225,20 @@ def _validate_vg(module, vg): lsvg_cmd = module.get_bin_path('lsvg', True) rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"]) if rc != 0: - module.fail_json(msg="Failed executing %s command." % lsvg_cmd) + module.fail_json(msg=f"Failed executing {lsvg_cmd} command.") - rc, current_all_vgs, err = module.run_command([lsvg_cmd, "%s"]) + rc, current_all_vgs, err = module.run_command([lsvg_cmd]) if rc != 0: - module.fail_json(msg="Failed executing %s command." % lsvg_cmd) + module.fail_json(msg=f"Failed executing {lsvg_cmd} command.") if vg in current_all_vgs and vg not in current_active_vgs: - msg = "Volume group %s is in varyoff state." % vg + msg = f"Volume group {vg} is in varyoff state." return False, msg elif vg in current_active_vgs: - msg = "Volume group %s is in varyon state." % vg + msg = f"Volume group {vg} is in varyon state." return True, msg else: - msg = "Volume group %s does not exist." % vg + msg = f"Volume group {vg} does not exist." return None, msg @@ -252,7 +247,7 @@ def resize_fs(module, filesystem, size): chfs_cmd = module.get_bin_path('chfs', True) if not module.check_mode: - rc, chfs_out, err = module.run_command([chfs_cmd, "-a", "size=%s" % size, filesystem]) + rc, chfs_out, err = module.run_command([chfs_cmd, "-a", f"size={size}", filesystem]) if rc == 28: changed = False @@ -262,7 +257,7 @@ def resize_fs(module, filesystem, size): changed = False return changed, err else: - module.fail_json(msg="Failed to run chfs. Error message: %s" % err) + module.fail_json(msg=f"Failed to run chfs. Error message: {err}") else: if re.findall('The filesystem size is already', chfs_out): @@ -306,19 +301,19 @@ def create_fs( if size is None: size = '' else: - size = "-a size=%s" % size + size = f"-a size={size}" if device is None: device = '' else: - device = "-d %s" % device + device = f"-d {device}" if vg is None: vg = '' else: vg_state, msg = _validate_vg(module, vg) if vg_state: - vg = "-g %s" % vg + vg = f"-g {vg}" else: changed = False @@ -328,7 +323,7 @@ def create_fs( mount_group = '' else: - mount_group = "-u %s" % mount_group + mount_group = f"-u {mount_group}" auto_mount = auto_mount_opt[auto_mount] account_subsystem = account_subsys_opt[account_subsystem] @@ -339,10 +334,10 @@ def create_fs( if not module.check_mode: rc, mknfsmnt_out, err = module.run_command([mknfsmnt_cmd, "-f", filesystem, device, "-h", nfs_server, "-t", permissions, auto_mount, "-w", "bg"]) if rc != 0: - module.fail_json(msg="Failed to run mknfsmnt. Error message: %s" % err) + module.fail_json(msg=f"Failed to run mknfsmnt. Error message: {err}") else: changed = True - msg = "NFS file system %s created." % filesystem + msg = f"NFS file system {filesystem} created." return changed, msg else: @@ -355,16 +350,61 @@ def create_fs( # Creates a LVM file system. crfs_cmd = module.get_bin_path('crfs', True) if not module.check_mode: - cmd = [crfs_cmd, "-v", fs_type, "-m", filesystem, vg, device, mount_group, auto_mount, account_subsystem, "-p", permissions, size, "-a", attributes] + cmd = [crfs_cmd] + + cmd.append("-v") + cmd.append(fs_type) + + if vg: + (flag, value) = vg.split() + cmd.append(flag) + cmd.append(value) + + if device: + (flag, value) = device.split() + cmd.append(flag) + cmd.append(value) + + cmd.append("-m") + cmd.append(filesystem) + + if mount_group: + (flag, value) = mount_group.split() + cmd.append(flag) + cmd.append(value) + + if auto_mount: + (flag, value) = auto_mount.split() + cmd.append(flag) + cmd.append(value) + + if account_subsystem: + (flag, value) = account_subsystem.split() + cmd.append(flag) + cmd.append(value) + + cmd.append("-p") + cmd.append(permissions) + + if size: + (flag, value) = size.split() + cmd.append(flag) + cmd.append(value) + + if attributes: + splitted_attributes = attributes.split() + cmd.append("-a") + for value in splitted_attributes: + cmd.append(value) + rc, crfs_out, err = module.run_command(cmd) if rc == 10: module.exit_json( - msg="Using a existent previously defined logical volume, " - "volume group needs to be empty. %s" % err) + msg=f"Using a existent previously defined logical volume, volume group needs to be empty. {err}") elif rc != 0: - module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err)) + module.fail_json(msg=f"Failed to run {cmd}. Error message: {err}") else: changed = True @@ -392,12 +432,12 @@ def remove_fs(module, filesystem, rm_mount_point): cmd = [rmfs_cmd, "-r", rm_mount_point, filesystem] rc, rmfs_out, err = module.run_command(cmd) if rc != 0: - module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err)) + module.fail_json(msg=f"Failed to run {cmd}. Error message: {err}") else: changed = True msg = rmfs_out if not rmfs_out: - msg = "File system %s removed." % filesystem + msg = f"File system {filesystem} removed." return changed, msg else: @@ -414,10 +454,10 @@ def mount_fs(module, filesystem): if not module.check_mode: rc, mount_out, err = module.run_command([mount_cmd, filesystem]) if rc != 0: - module.fail_json(msg="Failed to run mount. Error message: %s" % err) + module.fail_json(msg=f"Failed to run mount. Error message: {err}") else: changed = True - msg = "File system %s mounted." % filesystem + msg = f"File system {filesystem} mounted." return changed, msg else: @@ -434,10 +474,10 @@ def unmount_fs(module, filesystem): if not module.check_mode: rc, unmount_out, err = module.run_command([unmount_cmd, filesystem]) if rc != 0: - module.fail_json(msg="Failed to run unmount. Error message: %s" % err) + module.fail_json(msg=f"Failed to run unmount. Error message: {err}") else: changed = True - msg = "File system %s unmounted." % filesystem + msg = f"File system {filesystem} unmounted." return changed, msg else: @@ -451,7 +491,7 @@ def main(): module = AnsibleModule( argument_spec=dict( account_subsystem=dict(type='bool', default=False), - attributes=dict(type='list', elements='str', default=["agblksize='4096'", "isnapshot='no'"]), + attributes=dict(type='list', elements='str', default=["agblksize=4096", "isnapshot=no"]), auto_mount=dict(type='bool', default=True), device=dict(type='str'), filesystem=dict(type='str', required=True), @@ -492,7 +532,7 @@ def main(): # Check if fs is mounted or exists. if fs_mounted or fs_exists: - result['msg'] = "File system %s already exists." % filesystem + result['msg'] = f"File system {filesystem} already exists." result['changed'] = False # If parameter size was passed, resize fs. @@ -528,32 +568,32 @@ def main(): elif state == 'absent': if ismount(filesystem): - result['msg'] = "File system %s mounted." % filesystem + result['msg'] = f"File system {filesystem} mounted." else: fs_status = _fs_exists(module, filesystem) if not fs_status: - result['msg'] = "File system %s does not exist." % filesystem + result['msg'] = f"File system {filesystem} does not exist." else: result['changed'], result['msg'] = remove_fs(module, filesystem, rm_mount_point) elif state == 'mounted': if ismount(filesystem): result['changed'] = False - result['msg'] = "File system %s already mounted." % filesystem + result['msg'] = f"File system {filesystem} already mounted." else: result['changed'], result['msg'] = mount_fs(module, filesystem) elif state == 'unmounted': if not ismount(filesystem): result['changed'] = False - result['msg'] = "File system %s already unmounted." % filesystem + result['msg'] = f"File system {filesystem} already unmounted." else: result['changed'], result['msg'] = unmount_fs(module, filesystem) else: # Unreachable codeblock - result['msg'] = "Unexpected state %s." % state + result['msg'] = f"Unexpected state {state}." module.fail_json(**result) module.exit_json(**result) diff --git a/plugins/modules/system/aix_inittab.py b/plugins/modules/aix_inittab.py similarity index 72% rename from plugins/modules/system/aix_inittab.py rename to plugins/modules/aix_inittab.py index c2daface36..f20f2e903d 100644 --- a/plugins/modules/system/aix_inittab.py +++ b/plugins/modules/aix_inittab.py @@ -1,73 +1,78 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Joris Weijters -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Joris Weijters +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: -- Joris Weijters (@molekuul) + - Joris Weijters (@molekuul) module: aix_inittab -short_description: Manages the inittab on AIX +short_description: Manages the C(inittab) on AIX description: - - Manages the inittab on AIX. + - Manages the C(inittab) on AIX. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - - Name of the inittab entry. + - Name of the C(inittab) entry. type: str - required: yes - aliases: [ service ] + required: true + aliases: [service] runlevel: description: - - Runlevel of the entry. + - Runlevel of the entry. type: str - required: yes + required: true action: description: - - Action what the init has to do with this entry. + - Action what the init has to do with this entry. type: str choices: - - boot - - bootwait - - hold - - initdefault - - 'off' - - once - - ondemand - - powerfail - - powerwait - - respawn - - sysinit - - wait + - boot + - bootwait + - hold + - initdefault + - 'off' + - once + - ondemand + - powerfail + - powerwait + - respawn + - sysinit + - wait command: description: - - What command has to run. + - What command has to run. type: str - required: yes + required: true insertafter: description: - - After which inittabline should the new entry inserted. + - After which inittabline should the new entry inserted. type: str state: description: - - Whether the entry should be present or absent in the inittab file. + - Whether the entry should be present or absent in the inittab file. type: str - choices: [ absent, present ] + choices: [absent, present] default: present notes: - The changes are persistent across reboots. - You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands. - Tested on AIX 7.1. requirements: -- itertools -''' + - itertools +""" -EXAMPLES = ''' +EXAMPLES = r""" # Add service startmyservice to the inittab, directly after service existingservice. - name: Add startmyservice to inittab community.general.aix_inittab: @@ -77,7 +82,7 @@ EXAMPLES = ''' command: echo hello insertafter: existingservice state: present - become: yes + become: true # Change inittab entry startmyservice to runlevel "2" and processaction "wait". - name: Change startmyservice to inittab @@ -87,7 +92,7 @@ EXAMPLES = ''' action: wait command: echo hello state: present - become: yes + become: true - name: Remove startmyservice from inittab community.general.aix_inittab: @@ -96,33 +101,17 @@ EXAMPLES = ''' action: wait command: echo hello state: absent - become: yes -''' + become: true +""" -RETURN = ''' +RETURN = r""" name: - description: Name of the adjusted inittab entry - returned: always - type: str - sample: startmyservice -msg: - description: Action done with the inittab entry - returned: changed - type: str - sample: changed inittab entry startmyservice -changed: - description: Whether the inittab changed or not - returned: always - type: bool - sample: true -''' + description: Name of the adjusted C(inittab) entry. + returned: always + type: str + sample: startmyservice +""" -# Import necessary libraries -try: - # python 2 - from itertools import izip -except ImportError: - izip = zip from ansible.module_utils.basic import AnsibleModule @@ -141,7 +130,7 @@ def check_current_entry(module): values = out.split(":") # strip non readable characters as \n values = map(lambda s: s.strip(), values) - existsdict = dict(izip(keys, values)) + existsdict = dict(zip(keys, values)) existsdict.update({'exist': True}) return existsdict @@ -184,6 +173,7 @@ def main(): rmitab = module.get_bin_path('rmitab') chitab = module.get_bin_path('chitab') rc = 0 + err = None # check if the new entry exists current_entry = check_current_entry(module) @@ -192,11 +182,10 @@ def main(): if module.params['state'] == 'present': # create new entry string - new_entry = module.params['name'] + ":" + module.params['runlevel'] + \ - ":" + module.params['action'] + ":" + module.params['command'] + new_entry = f"{module.params['name']}:{module.params['runlevel']}:{module.params['action']}:{module.params['command']}" # If current entry exists or fields are different(if the entry does not - # exists, then the entry wil be created + # exists, then the entry will be created if (not current_entry['exist']) or ( module.params['runlevel'] != current_entry['runlevel'] or module.params['action'] != current_entry['action'] or @@ -209,7 +198,7 @@ def main(): if rc != 0: module.fail_json( msg="could not change inittab", rc=rc, err=err) - result['msg'] = "changed inittab entry" + " " + current_entry['name'] + result['msg'] = f"changed inittab entry {current_entry['name']}" result['changed'] = True # If the entry does not exist create the entry @@ -225,7 +214,7 @@ def main(): if rc != 0: module.fail_json(msg="could not adjust inittab", rc=rc, err=err) - result['msg'] = "add inittab entry" + " " + module.params['name'] + result['msg'] = f"add inittab entry {module.params['name']}" result['changed'] = True elif module.params['state'] == 'absent': @@ -237,7 +226,7 @@ def main(): if rc != 0: module.fail_json( msg="could not remove entry from inittab)", rc=rc, err=err) - result['msg'] = "removed inittab entry" + " " + current_entry['name'] + result['msg'] = f"removed inittab entry {current_entry['name']}" result['changed'] = True module.exit_json(**result) diff --git a/plugins/modules/system/aix_lvg.py b/plugins/modules/aix_lvg.py similarity index 74% rename from plugins/modules/system/aix_lvg.py rename to plugins/modules/aix_lvg.py index 37bf71a4bc..aa1a947e2f 100644 --- a/plugins/modules/system/aix_lvg.py +++ b/plugins/modules/aix_lvg.py @@ -1,60 +1,65 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Kairo Araujo -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: -- Kairo Araujo (@kairoaraujo) + - Kairo Araujo (@kairoaraujo) module: aix_lvg short_description: Manage LVM volume groups on AIX description: -- This module creates, removes or resize volume groups on AIX LVM. + - This module creates, removes or resize volume groups on AIX LVM. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: force: description: - - Force volume group creation. + - Force volume group creation. type: bool - default: no + default: false pp_size: description: - - The size of the physical partition in megabytes. + - The size of the physical partition in megabytes. type: int pvs: description: - - List of comma-separated devices to use as physical devices in this volume group. - - Required when creating or extending (C(present) state) the volume group. - - If not informed reducing (C(absent) state) the volume group will be removed. + - List of comma-separated devices to use as physical devices in this volume group. + - Required when creating or extending (V(present) state) the volume group. + - If not informed reducing (V(absent) state) the volume group is removed. type: list elements: str state: description: - - Control if the volume group exists and volume group AIX state varyonvg C(varyon) or varyoffvg C(varyoff). + - Control if the volume group exists and volume group AIX state varyonvg V(varyon) or varyoffvg V(varyoff). type: str - choices: [ absent, present, varyoff, varyon ] + choices: [absent, present, varyoff, varyon] default: present vg: description: - - The name of the volume group. + - The name of the volume group. type: str required: true vg_type: description: - - The type of the volume group. + - The type of the volume group. type: str - choices: [ big, normal, scalable ] + choices: [big, normal, scalable] default: normal notes: -- AIX will permit remove VG only if all LV/Filesystems are not busy. -- Module does not modify PP size for already present volume group. -''' + - AIX allows removing VG only if all LV/Filesystems are not busy. + - Module does not modify PP size for already present volume group. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a volume group datavg community.general.aix_lvg: vg: datavg @@ -78,9 +83,9 @@ EXAMPLES = r''' vg: rootvg pvs: hdisk1 state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.basic import AnsibleModule @@ -110,27 +115,27 @@ def _validate_pv(module, vg, pvs): # Check if pv exists and is free. if pv not in lspv_list.keys(): - module.fail_json(msg="Physical volume '%s' doesn't exist." % pv) + module.fail_json(msg=f"Physical volume '{pv}' doesn't exist.") if lspv_list[pv] == 'None': # Disk None, looks free. # Check if PV is not already in use by Oracle ASM. lquerypv_cmd = module.get_bin_path('lquerypv', True) - rc, current_lquerypv, stderr = module.run_command([lquerypv_cmd, "-h", "/dev/%s" % pv, "20", "10"]) + rc, current_lquerypv, stderr = module.run_command([lquerypv_cmd, "-h", f"/dev/{pv}", "20", "10"]) if rc != 0: module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr) if 'ORCLDISK' in current_lquerypv: - module.fail_json("Physical volume '%s' is already used by Oracle ASM." % pv) + module.fail_json(f"Physical volume '{pv}' is already used by Oracle ASM.") - msg = "Physical volume '%s' is ok to be used." % pv + msg = f"Physical volume '{pv}' is ok to be used." return True, msg # Check if PV is already in use for the same vg. elif vg != lspv_list[pv]: - module.fail_json(msg="Physical volume '%s' is in use by another volume group '%s'." % (pv, lspv_list[pv])) + module.fail_json(msg=f"Physical volume '{pv}' is in use by another volume group '{lspv_list[pv]}'.") - msg = "Physical volume '%s' is already used by volume group '%s'." % (pv, lspv_list[pv]) + msg = f"Physical volume '{pv}' is already used by volume group '{lspv_list[pv]}'." return False, msg @@ -146,21 +151,21 @@ def _validate_vg(module, vg): lsvg_cmd = module.get_bin_path('lsvg', True) rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"]) if rc != 0: - module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd) + module.fail_json(msg=f"Failed executing '{lsvg_cmd}' command.") rc, current_all_vgs, err = module.run_command([lsvg_cmd]) if rc != 0: - module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd) + module.fail_json(msg=f"Failed executing '{lsvg_cmd}' command.") if vg in current_all_vgs and vg not in current_active_vgs: - msg = "Volume group '%s' is in varyoff state." % vg + msg = f"Volume group '{vg}' is in varyoff state." return False, msg if vg in current_active_vgs: - msg = "Volume group '%s' is in varyon state." % vg + msg = f"Volume group '{vg}' is in varyon state." return True, msg - msg = "Volume group '%s' does not exist." % vg + msg = f"Volume group '{vg}' does not exist." return None, msg @@ -200,10 +205,10 @@ def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation): rc, output, err = module.run_command([extendvg_cmd, vg] + pvs) if rc != 0: changed = False - msg = "Extending volume group '%s' has failed." % vg + msg = f"Extending volume group '{vg}' has failed." return changed, msg - msg = "Volume group '%s' extended." % vg + msg = f"Volume group '{vg}' extended." return changed, msg elif vg_state is None: @@ -216,10 +221,10 @@ def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation): rc, output, err = module.run_command([mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], "-y", vg] + pvs) if rc != 0: changed = False - msg = "Creating volume group '%s' failed." % vg + msg = f"Creating volume group '{vg}' failed." return changed, msg - msg = "Volume group '%s' created." % vg + msg = f"Volume group '{vg}' created." return changed, msg @@ -241,16 +246,16 @@ def reduce_vg(module, vg, pvs, vg_validation): lsvg_cmd = module.get_bin_path('lsvg', True) rc, current_pvs, err = module.run_command([lsvg_cmd, "-p", vg]) if rc != 0: - module.fail_json(msg="Failing to execute '%s' command." % lsvg_cmd) + module.fail_json(msg=f"Failing to execute '{lsvg_cmd}' command.") pvs_to_remove = [] for line in current_pvs.splitlines()[2:]: pvs_to_remove.append(line.split()[0]) - reduce_msg = "Volume group '%s' removed." % vg + reduce_msg = f"Volume group '{vg}' removed." else: pvs_to_remove = pvs - reduce_msg = ("Physical volume(s) '%s' removed from Volume group '%s'." % (' '.join(pvs_to_remove), vg)) + reduce_msg = f"Physical volume(s) '{' '.join(pvs_to_remove)}' removed from Volume group '{vg}'." # Reduce volume group. if len(pvs_to_remove) <= 0: @@ -265,7 +270,7 @@ def reduce_vg(module, vg, pvs, vg_validation): reducevg_cmd = module.get_bin_path('reducevg', True) rc, stdout, stderr = module.run_command([reducevg_cmd, "-df", vg] + pvs_to_remove) if rc != 0: - module.fail_json(msg="Unable to remove '%s'." % vg, rc=rc, stdout=stdout, stderr=stderr) + module.fail_json(msg=f"Unable to remove '{vg}'.", rc=rc, stdout=stdout, stderr=stderr) msg = reduce_msg return changed, msg @@ -290,7 +295,7 @@ def state_vg(module, vg, state, vg_validation): if rc != 0: module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err) - msg = "Varyon volume group %s completed." % vg + msg = f"Varyon volume group {vg} completed." return changed, msg elif state == 'varyoff': @@ -307,7 +312,7 @@ def state_vg(module, vg, state, vg_validation): if rc != 0: module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr) - msg = "Varyoff volume group %s completed." % vg + msg = f"Varyoff volume group {vg} completed." return changed, msg @@ -334,7 +339,7 @@ def main(): if pp_size is None: pp_size = '' else: - pp_size = "-s %s" % pp_size + pp_size = f"-s {pp_size}" vg_validation = _validate_vg(module, vg) diff --git a/plugins/modules/system/aix_lvol.py b/plugins/modules/aix_lvol.py similarity index 65% rename from plugins/modules/system/aix_lvol.py rename to plugins/modules/aix_lvol.py index 02b4f06c5b..5f02a91f63 100644 --- a/plugins/modules/system/aix_lvol.py +++ b/plugins/modules/aix_lvol.py @@ -1,74 +1,81 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Alain Dejoux -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Alain Dejoux +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: - - Alain Dejoux (@adejoux) + - Alain Dejoux (@adejoux) module: aix_lvol short_description: Configure AIX LVM logical volumes description: - This module creates, removes or resizes AIX logical volumes. Inspired by lvol module. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: vg: description: - - The volume group this logical volume is part of. + - The volume group this logical volume is part of. type: str required: true lv: description: - - The name of the logical volume. + - The name of the logical volume. type: str required: true lv_type: description: - - The type of the logical volume. + - The type of the logical volume. type: str default: jfs2 size: description: - - The size of the logical volume with one of the [MGT] units. + - The size of the logical volume with one of the [MGT] units. type: str copies: description: - - The number of copies of the logical volume. - - Maximum copies are 3. + - The number of copies of the logical volume. + - Maximum copies are 3. type: int default: 1 policy: description: - - Sets the interphysical volume allocation policy. - - C(maximum) allocates logical partitions across the maximum number of physical volumes. - - C(minimum) allocates logical partitions across the minimum number of physical volumes. + - Sets the interphysical volume allocation policy. + - V(maximum) allocates logical partitions across the maximum number of physical volumes. + - V(minimum) allocates logical partitions across the minimum number of physical volumes. type: str - choices: [ maximum, minimum ] + choices: [maximum, minimum] default: maximum state: description: - - Control if the logical volume exists. If C(present) and the - volume does not already exist then the C(size) option is required. + - Control if the logical volume exists. If V(present) and the volume does not already exist then the O(size) option + is required. type: str - choices: [ absent, present ] + choices: [absent, present] default: present opts: description: - - Free-form options to be passed to the mklv command. + - Free-form options to be passed to the mklv command. type: str + default: '' pvs: description: - - A list of physical volumes e.g. C(hdisk1,hdisk2). + - A list of physical volumes, for example V(hdisk1,hdisk2). type: list elements: str -''' + default: [] +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a logical volume of 512M community.general.aix_lvol: vg: testvg @@ -80,7 +87,7 @@ EXAMPLES = r''' vg: testvg lv: test2lv size: 512M - pvs: [ hdisk1, hdisk2 ] + pvs: [hdisk1, hdisk2] - name: Create a logical volume of 512M mirrored community.general.aix_lvol: @@ -114,15 +121,15 @@ EXAMPLES = r''' vg: testvg lv: testlv state: absent -''' +""" -RETURN = r''' +RETURN = r""" msg: type: str description: A friendly message describing the task result. returned: always sample: Logical volume testlv created. -''' +""" import re @@ -230,8 +237,6 @@ def main(): state = module.params['state'] pvs = module.params['pvs'] - pv_list = ' '.join(pvs) - if policy == 'maximum': lv_policy = 'x' else: @@ -239,22 +244,22 @@ def main(): # Add echo command when running in check-mode if module.check_mode: - test_opt = 'echo ' + test_opt = [module.get_bin_path("echo", required=True)] else: - test_opt = '' + test_opt = [] # check if system commands are available lsvg_cmd = module.get_bin_path("lsvg", required=True) lslv_cmd = module.get_bin_path("lslv", required=True) # Get information on volume group requested - rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg)) + rc, vg_info, err = module.run_command([lsvg_cmd, vg]) if rc != 0: if state == 'absent': - module.exit_json(changed=False, msg="Volume group %s does not exist." % vg) + module.exit_json(changed=False, msg=f"Volume group {vg} does not exist.") else: - module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err) + module.fail_json(msg=f"Volume group {vg} does not exist.", rc=rc, out=vg_info, err=err) this_vg = parse_vg(vg_info) @@ -263,12 +268,11 @@ def main(): lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size']) # Get information on logical volume requested - rc, lv_info, err = module.run_command( - "%s %s" % (lslv_cmd, lv)) + rc, lv_info, err = module.run_command([lslv_cmd, lv]) if rc != 0: if state == 'absent': - module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv) + module.exit_json(changed=False, msg=f"Logical Volume {lv} does not exist.") changed = False @@ -281,56 +285,56 @@ def main(): if this_lv is None: if state == 'present': if lv_size > this_vg['free']: - module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free'])) + module.fail_json(msg=f"Not enough free space in volume group {this_vg['name']}: {this_vg['free']} MB free.") # create LV mklv_cmd = module.get_bin_path("mklv", required=True) - cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list) + cmd = test_opt + [mklv_cmd, "-t", lv_type, "-y", lv, "-c", copies, "-e", lv_policy, opts, vg, f"{lv_size}M"] + pvs rc, out, err = module.run_command(cmd) if rc == 0: - module.exit_json(changed=True, msg="Logical volume %s created." % lv) + module.exit_json(changed=True, msg=f"Logical volume {lv} created.") else: - module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err) + module.fail_json(msg=f"Creating logical volume {lv} failed.", rc=rc, out=out, err=err) else: if state == 'absent': # remove LV rmlv_cmd = module.get_bin_path("rmlv", required=True) - rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name'])) + rc, out, err = module.run_command(test_opt + [rmlv_cmd, "-f", this_lv['name']]) if rc == 0: - module.exit_json(changed=True, msg="Logical volume %s deleted." % lv) + module.exit_json(changed=True, msg=f"Logical volume {lv} deleted.") else: - module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err) + module.fail_json(msg=f"Failed to remove logical volume {lv}.", rc=rc, out=out, err=err) else: if this_lv['policy'] != policy: # change lv allocation policy chlv_cmd = module.get_bin_path("chlv", required=True) - rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name'])) + rc, out, err = module.run_command(test_opt + [chlv_cmd, "-e", lv_policy, this_lv['name']]) if rc == 0: - module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy)) + module.exit_json(changed=True, msg=f"Logical volume {lv} policy changed: {policy}.") else: - module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err) + module.fail_json(msg=f"Failed to change logical volume {lv} policy.", rc=rc, out=out, err=err) if vg != this_lv['vg']: - module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg'])) + module.fail_json(msg=f"Logical volume {lv} already exist in volume group {this_lv['vg']}") # from here the last remaining action is to resize it, if no size parameter is passed we do nothing. if not size: - module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv)) + module.exit_json(changed=False, msg=f"Logical volume {lv} already exist.") # resize LV based on absolute values if int(lv_size) > this_lv['size']: extendlv_cmd = module.get_bin_path("extendlv", required=True) - cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size']) + cmd = test_opt + [extendlv_cmd, lv, f"{lv_size - this_lv['size']}M"] rc, out, err = module.run_command(cmd) if rc == 0: - module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size)) + module.exit_json(changed=True, msg=f"Logical volume {lv} size extended to {lv_size}MB.") else: - module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err) + module.fail_json(msg=f"Unable to resize {lv} to {lv_size}MB.", rc=rc, out=out, err=err) elif lv_size < this_lv['size']: - module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size'])) + module.fail_json(msg=f"No shrinking of Logical Volume {lv} permitted. Current size: {this_lv['size']} MB") else: - module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size)) + module.exit_json(changed=False, msg=f"Logical volume {lv} size is already {lv_size}MB.") if __name__ == '__main__': diff --git a/plugins/modules/monitoring/alerta_customer.py b/plugins/modules/alerta_customer.py similarity index 79% rename from plugins/modules/monitoring/alerta_customer.py rename to plugins/modules/alerta_customer.py index 27b0abe1a9..0fd997a1ae 100644 --- a/plugins/modules/monitoring/alerta_customer.py +++ b/plugins/modules/alerta_customer.py @@ -1,14 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2022, Christian Wollinger <@cwollinger> -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2022, Christian Wollinger <@cwollinger> +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: alerta_customer short_description: Manage customers in Alerta version_added: 4.8.0 @@ -17,8 +15,15 @@ description: author: Christian Wollinger (@cwollinger) seealso: - name: API documentation - description: Documentation for Alerta API + description: Documentation for Alerta API. link: https://docs.alerta.io/api/reference.html#customers +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: customer: description: @@ -50,13 +55,13 @@ options: state: description: - Whether the customer should exist or not. - - Both I(customer) and I(match) identify a customer that should be added or removed. + - Both O(customer) and O(match) identify a customer that should be added or removed. type: str - choices: [ absent, present ] + choices: [absent, present] default: present -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Create customer community.general.alerta_customer: alerta_url: https://alerta.example.com @@ -75,7 +80,7 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" msg: description: - Success or failure message. @@ -104,7 +109,7 @@ class AlertaInterface(object): self.headers = {"Content-Type": "application/json"} if module.params.get('api_key', None): - self.headers["Authorization"] = "Key %s" % module.params['api_key'] + self.headers["Authorization"] = f"Key {module.params['api_key']}" else: self.headers["Authorization"] = basic_auth_header(module.params['api_username'], module.params['api_password']) @@ -113,28 +118,28 @@ class AlertaInterface(object): status_code = info["status"] if status_code == 401: - self.module.fail_json(failed=True, response=info, msg="Unauthorized to request '%s' on '%s'" % (method, url)) + self.module.fail_json(failed=True, response=info, msg=f"Unauthorized to request '{method}' on '{url}'") elif status_code == 403: - self.module.fail_json(failed=True, response=info, msg="Permission Denied for '%s' on '%s'" % (method, url)) + self.module.fail_json(failed=True, response=info, msg=f"Permission Denied for '{method}' on '{url}'") elif status_code == 404: - self.module.fail_json(failed=True, response=info, msg="Not found for request '%s' on '%s'" % (method, url)) + self.module.fail_json(failed=True, response=info, msg=f"Not found for request '{method}' on '{url}'") elif status_code in (200, 201): return self.module.from_json(response.read()) - self.module.fail_json(failed=True, response=info, msg="Alerta API error with HTTP %d for %s" % (status_code, url)) + self.module.fail_json(failed=True, response=info, msg=f"Alerta API error with HTTP {status_code} for {url}") def get_customers(self): - url = "%s/api/customers" % self.alerta_url + url = f"{self.alerta_url}/api/customers" response = self.send_request(url) pages = response["pages"] if pages > 1: for page in range(2, pages + 1): - page_url = url + '?page=' + str(page) + page_url = f"{url}?page={page}" new_results = self.send_request(page_url) response.update(new_results) return response def create_customer(self): - url = "%s/api/customer" % self.alerta_url + url = f"{self.alerta_url}/api/customer" payload = { 'customer': self.customer, @@ -146,7 +151,7 @@ class AlertaInterface(object): return response def delete_customer(self, id): - url = "%s/api/customer/%s" % (self.alerta_url, id) + url = f"{self.alerta_url}/api/customer/{id}" response = self.send_request(url, None, 'DELETE') return response @@ -179,20 +184,20 @@ def main(): if alerta_iface.state == 'present': response = alerta_iface.get_customers() if alerta_iface.find_customer_id(response): - module.exit_json(changed=False, response=response, msg="Customer %s already exists" % alerta_iface.customer) + module.exit_json(changed=False, response=response, msg=f"Customer {alerta_iface.customer} already exists") else: if not module.check_mode: response = alerta_iface.create_customer() - module.exit_json(changed=True, response=response, msg="Customer %s created" % alerta_iface.customer) + module.exit_json(changed=True, response=response, msg=f"Customer {alerta_iface.customer} created") else: response = alerta_iface.get_customers() id = alerta_iface.find_customer_id(response) if id: if not module.check_mode: alerta_iface.delete_customer(id) - module.exit_json(changed=True, response=response, msg="Customer %s with id %s deleted" % (alerta_iface.customer, id)) + module.exit_json(changed=True, response=response, msg=f"Customer {alerta_iface.customer} with id {id} deleted") else: - module.exit_json(changed=False, response=response, msg="Customer %s does not exists" % alerta_iface.customer) + module.exit_json(changed=False, response=response, msg=f"Customer {alerta_iface.customer} does not exists") if __name__ == "__main__": diff --git a/plugins/modules/cloud/alicloud/ali_instance.py b/plugins/modules/ali_instance.py similarity index 53% rename from plugins/modules/cloud/alicloud/ali_instance.py rename to plugins/modules/ali_instance.py index 09754ccdba..37b2f067e0 100644 --- a/plugins/modules/cloud/alicloud/ali_instance.py +++ b/plugins/modules/ali_instance.py @@ -1,8 +1,8 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Ansible # @@ -19,241 +19,241 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see http://www.gnu.org/licenses/. -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ali_instance -short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS. Add or Remove Instance to/from a Security Group. +short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS; Add or Remove Instance to/from a Security + Group description: - - Create, start, stop, restart, modify or terminate ecs instances. - - Add or remove ecs instances to/from security group. + - Create, start, stop, restart, modify or terminate ECS instances. + - Add or remove ecs instances to/from security group. +attributes: + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - The state of the instance after operating. - default: 'present' - choices: ['present', 'running', 'stopped', 'restarted', 'absent'] - type: str - availability_zone: - description: - - Aliyun availability zone ID in which to launch the instance. - If it is not specified, it will be allocated by system automatically. - aliases: ['alicloud_zone', 'zone_id'] - type: str - image_id: - description: - - Image ID used to launch instances. Required when C(state=present) and creating new ECS instances. - aliases: ['image'] - type: str - instance_type: - description: - - Instance type used to launch instances. Required when C(state=present) and creating new ECS instances. - aliases: ['type'] - type: str - security_groups: - description: - - A list of security group IDs. - aliases: ['group_ids'] - type: list - elements: str - vswitch_id: - description: - - The subnet ID in which to launch the instances (VPC). - aliases: ['subnet_id'] - type: str - instance_name: - description: - - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an - uppercase/lowercase letter or a Chinese character and can contain numerals, ".", "_" or "-". - It cannot begin with http:// or https://. - aliases: ['name'] - type: str + state: description: - description: - - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with http:// or https://. - type: str - internet_charge_type: - description: - - Internet charge type of ECS instance. - default: 'PayByBandwidth' - choices: ['PayByBandwidth', 'PayByTraffic'] - type: str - max_bandwidth_in: - description: - - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second). - default: 200 - type: int - max_bandwidth_out: - description: - - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second). - Required when C(allocate_public_ip=True). Ignored when C(allocate_public_ip=False). - default: 0 - type: int - host_name: - description: - - Instance host name. Ordered hostname is not supported. - type: str - unique_suffix: - description: - - Specifies whether to add sequential suffixes to the host_name. - The sequential suffix ranges from 001 to 999. - default: False - type: bool - version_added: '0.2.0' - password: - description: - - The password to login instance. After rebooting instances, modified password will take effect. - type: str - system_disk_category: - description: - - Category of the system disk. - default: 'cloud_efficiency' - choices: ['cloud_efficiency', 'cloud_ssd'] - type: str - system_disk_size: - description: - - Size of the system disk, in GB. The valid values are 40~500. - default: 40 - type: int - system_disk_name: - description: - - Name of the system disk. - type: str - system_disk_description: - description: - - Description of the system disk. - type: str - count: - description: - - The number of the new instance. An integer value which indicates how many instances that match I(count_tag) - should be running. Instances are either created or terminated based on this value. - default: 1 - type: int - count_tag: - description: - - I(count) determines how many instances based on a specific tag criteria should be present. - This can be expressed in multiple ways and is shown in the EXAMPLES section. - The specified count_tag must already exist or be passed in as the I(tags) option. - If it is not specified, it will be replaced by I(instance_name). - type: str - allocate_public_ip: - description: - - Whether allocate a public ip for the new instance. - default: False - aliases: [ 'assign_public_ip' ] - type: bool - instance_charge_type: - description: - - The charge type of the instance. - choices: ['PrePaid', 'PostPaid'] - default: 'PostPaid' - type: str - period: - description: - - The charge duration of the instance, in month. Required when C(instance_charge_type=PrePaid). - - The valid value are [1-9, 12, 24, 36]. - default: 1 - type: int - auto_renew: - description: - - Whether automate renew the charge of the instance. - type: bool - default: False - auto_renew_period: - description: - - The duration of the automatic renew the charge of the instance. Required when C(auto_renew=True). - choices: [1, 2, 3, 6, 12] - type: int - instance_ids: - description: - - A list of instance ids. It is required when need to operate existing instances. - If it is specified, I(count) will lose efficacy. - type: list - elements: str - force: - description: - - Whether the current operation needs to be execute forcibly. - default: False - type: bool - tags: - description: - - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. C({"key":"value"}) - aliases: ["instance_tags"] - type: dict - version_added: '0.2.0' - purge_tags: - description: - - Delete any tags not specified in the task that are on the instance. - If True, it means you have to specify all the desired tags on each task affecting an instance. - default: False - type: bool - version_added: '0.2.0' - key_name: - description: - - The name of key pair which is used to access ECS instance in SSH. - required: false - type: str - aliases: ['keypair'] - user_data: - description: - - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance. - It only will take effect when launching the new ECS instances. - required: false - type: str - ram_role_name: - description: - - The name of the instance RAM role. - type: str - version_added: '0.2.0' - spot_price_limit: - description: - - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal - places and takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit. - type: float - version_added: '0.2.0' - spot_strategy: - description: - - The bidding mode of the pay-as-you-go instance. This parameter is valid when InstanceChargeType is set to PostPaid. - choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo'] - default: 'NoSpot' - type: str - version_added: '0.2.0' - period_unit: - description: - - The duration unit that you will buy the resource. It is valid when C(instance_charge_type=PrePaid) - choices: ['Month', 'Week'] - default: 'Month' - type: str - version_added: '0.2.0' - dry_run: - description: - - Specifies whether to send a dry-run request. - - If I(dry_run=True), Only a dry-run request is sent and no instance is created. The system checks whether the - required parameters are set, and validates the request format, service permissions, and available ECS instances. - If the validation fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned. - - If I(dry_run=False), A request is sent. If the validation succeeds, the instance is created. - default: False - type: bool - version_added: '0.2.0' - include_data_disks: - description: - - Whether to change instance disks charge type when changing instance charge type. - default: True - type: bool - version_added: '0.2.0' + - The state of the instance after operating. + default: 'present' + choices: ['present', 'running', 'stopped', 'restarted', 'absent'] + type: str + availability_zone: + description: + - Aliyun availability zone ID in which to launch the instance. If it is not specified, it is allocated by system automatically. + aliases: ['alicloud_zone', 'zone_id'] + type: str + image_id: + description: + - Image ID used to launch instances. Required when O(state=present) and creating new ECS instances. + aliases: ['image'] + type: str + instance_type: + description: + - Instance type used to launch instances. Required when O(state=present) and creating new ECS instances. + aliases: ['type'] + type: str + security_groups: + description: + - A list of security group IDs. + aliases: ['group_ids'] + type: list + elements: str + vswitch_id: + description: + - The subnet ID in which to launch the instances (VPC). + aliases: ['subnet_id'] + type: str + instance_name: + description: + - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an uppercase/lowercase + letter or a Chinese character and can contain numerals, V(.), V(_) or V(-). It cannot begin with V(http://) or V(https://). + aliases: ['name'] + type: str + description: + description: + - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with V(http://) or V(https://). + type: str + internet_charge_type: + description: + - Internet charge type of ECS instance. + default: 'PayByBandwidth' + choices: ['PayByBandwidth', 'PayByTraffic'] + type: str + max_bandwidth_in: + description: + - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second). + default: 200 + type: int + max_bandwidth_out: + description: + - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second). Required when O(allocate_public_ip=true). + Ignored when O(allocate_public_ip=false). + default: 0 + type: int + host_name: + description: + - Instance host name. Ordered hostname is not supported. + type: str + unique_suffix: + description: + - Specifies whether to add sequential suffixes to the host_name. The sequential suffix ranges from 001 to 999. + default: false + type: bool + version_added: '0.2.0' + password: + description: + - The password to login instance. After rebooting instances, modified password is effective. + type: str + system_disk_category: + description: + - Category of the system disk. + default: 'cloud_efficiency' + choices: ['cloud_efficiency', 'cloud_ssd'] + type: str + system_disk_size: + description: + - Size of the system disk, in GB. The valid values are V(40)~V(500). + default: 40 + type: int + system_disk_name: + description: + - Name of the system disk. + type: str + system_disk_description: + description: + - Description of the system disk. + type: str + count: + description: + - The number of the new instance. An integer value which indicates how many instances that match O(count_tag) should + be running. Instances are either created or terminated based on this value. + default: 1 + type: int + count_tag: + description: + - O(count) determines how many instances based on a specific tag criteria should be present. This can be expressed in + multiple ways and is shown in the EXAMPLES section. The specified count_tag must already exist or be passed in as + the O(tags) option. If it is not specified, it is replaced by O(instance_name). + type: str + allocate_public_ip: + description: + - Whether allocate a public IP for the new instance. + default: false + aliases: ['assign_public_ip'] + type: bool + instance_charge_type: + description: + - The charge type of the instance. + choices: ['PrePaid', 'PostPaid'] + default: 'PostPaid' + type: str + period: + description: + - The charge duration of the instance, in months. Required when O(instance_charge_type=PrePaid). + - The valid value are [V(1-9), V(12), V(24), V(36)]. + default: 1 + type: int + auto_renew: + description: + - Whether automate renew the charge of the instance. + type: bool + default: false + auto_renew_period: + description: + - The duration of the automatic renew the charge of the instance. Required when O(auto_renew=true). + choices: [1, 2, 3, 6, 12] + type: int + instance_ids: + description: + - A list of instance IDs. It is required when need to operate existing instances. If it is specified, O(count) is ignored. + type: list + elements: str + force: + description: + - Whether the current operation needs to be execute forcibly. + default: false + type: bool + tags: + description: + - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. V({"key":"value"}). + aliases: ["instance_tags"] + type: dict + version_added: '0.2.0' + purge_tags: + description: + - Delete any tags not specified in the task that are on the instance. If V(true), it means you have to specify all the + desired tags on each task affecting an instance. + default: false + type: bool + version_added: '0.2.0' + key_name: + description: + - The name of key pair which is used to access ECS instance in SSH. + required: false + type: str + aliases: ['keypair'] + user_data: + description: + - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance. It + only takes effect when launching the new ECS instances. + required: false + type: str + ram_role_name: + description: + - The name of the instance RAM role. + type: str + version_added: '0.2.0' + spot_price_limit: + description: + - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal places and + takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit. + type: float + version_added: '0.2.0' + spot_strategy: + description: + - The bidding mode of the pay-as-you-go instance. This parameter is valid when O(instance_charge_type=PostPaid). + choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo'] + default: 'NoSpot' + type: str + version_added: '0.2.0' + period_unit: + description: + - The duration unit that you are buying the resource. It is valid when O(instance_charge_type=PrePaid). + choices: ['Month', 'Week'] + default: 'Month' + type: str + version_added: '0.2.0' + dry_run: + description: + - Specifies whether to send a dry-run request. + - If O(dry_run=true), Only a dry-run request is sent and no instance is created. The system checks whether the required + parameters are set, and validates the request format, service permissions, and available ECS instances. If the validation + fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned. + - If O(dry_run=false), a request is sent. If the validation succeeds, the instance is created. + default: false + type: bool + version_added: '0.2.0' + include_data_disks: + description: + - Whether to change instance disks charge type when changing instance charge type. + default: true + type: bool + version_added: '0.2.0' author: - - "He Guimin (@xiaozhu36)" + - "He Guimin (@xiaozhu36)" requirements: - - "python >= 3.6" - - "footmark >= 1.19.0" + - "Python >= 3.6" + - "footmark >= 1.19.0" extends_documentation_fragment: - - community.general.alicloud -''' + - community.general.alicloud + - community.general.attributes +""" -EXAMPLES = ''' +EXAMPLES = r""" # basic provisioning example vpc network - name: Basic provisioning example hosts: localhost @@ -264,7 +264,7 @@ EXAMPLES = ''' image: ubuntu1404_64_40G_cloudinit_20160727.raw instance_type: ecs.n4.small vswitch_id: vsw-abcd1234 - assign_public_ip: True + assign_public_ip: true max_bandwidth_out: 10 host_name: myhost password: mypassword @@ -274,7 +274,7 @@ EXAMPLES = ''' security_groups: ["sg-f2rwnfh23r"] instance_ids: ["i-abcd12346", "i-abcd12345"] - force: True + force: true tasks: - name: Launch ECS instance in VPC network @@ -291,7 +291,7 @@ EXAMPLES = ''' internet_charge_type: '{{ internet_charge_type }}' max_bandwidth_out: '{{ max_bandwidth_out }}' tags: - Name: created_one + Name: created_one host_name: '{{ host_name }}' password: '{{ password }}' @@ -309,11 +309,11 @@ EXAMPLES = ''' internet_charge_type: '{{ internet_charge_type }}' max_bandwidth_out: '{{ max_bandwidth_out }}' tags: - Name: created_one - Version: 0.1 + Name: created_one + Version: 0.1 count: 2 count_tag: - Name: created_one + Name: created_one host_name: '{{ host_name }}' password: '{{ password }}' @@ -341,293 +341,285 @@ EXAMPLES = ''' alicloud_region: '{{ alicloud_region }}' instance_ids: '{{ instance_ids }}' security_groups: '{{ security_groups }}' -''' +""" -RETURN = ''' +RETURN = r""" instances: - description: List of ECS instances - returned: always - type: complex - contains: - availability_zone: - description: The availability zone of the instance is in. - returned: always - type: str - sample: cn-beijing-a - block_device_mappings: - description: Any block device mapping entries for the instance. - returned: always - type: complex - contains: - device_name: - description: The device name exposed to the instance (for example, /dev/xvda). - returned: always - type: str - sample: /dev/xvda - attach_time: - description: The time stamp when the attachment initiated. - returned: always - type: str - sample: "2018-06-25T04:08:26Z" - delete_on_termination: - description: Indicates whether the volume is deleted on instance termination. - returned: always - type: bool - sample: true - status: - description: The attachment state. - returned: always - type: str - sample: in_use - volume_id: - description: The ID of the cloud disk. - returned: always - type: str - sample: d-2zei53pjsi117y6gf9t6 - cpu: - description: The CPU core count of the instance. - returned: always - type: int - sample: 4 - creation_time: - description: The time the instance was created. - returned: always - type: str - sample: "2018-06-25T04:08Z" - description: - description: The instance description. - returned: always - type: str - sample: "my ansible instance" - eip: - description: The attribution of EIP associated with the instance. - returned: always - type: complex - contains: - allocation_id: - description: The ID of the EIP. - returned: always - type: str - sample: eip-12345 - internet_charge_type: - description: The internet charge type of the EIP. - returned: always - type: str - sample: "paybybandwidth" - ip_address: - description: EIP address. - returned: always - type: str - sample: 42.10.2.2 - expired_time: - description: The time the instance will expire. - returned: always - type: str - sample: "2099-12-31T15:59Z" - gpu: - description: The attribution of instance GPU. - returned: always - type: complex - contains: - amount: - description: The count of the GPU. - returned: always - type: int - sample: 0 - spec: - description: The specification of the GPU. - returned: always - type: str - sample: "" - host_name: - description: The host name of the instance. - returned: always - type: str - sample: iZ2zewaoZ - id: - description: Alias of instance_id. - returned: always - type: str - sample: i-abc12345 - instance_id: - description: ECS instance resource ID. - returned: always - type: str - sample: i-abc12345 - image_id: - description: The ID of the image used to launch the instance. - returned: always - type: str - sample: m-0011223344 - inner_ip_address: - description: The inner IPv4 address of the classic instance. - returned: always - type: str - sample: 10.0.0.2 - instance_charge_type: - description: The instance charge type. - returned: always - type: str - sample: PostPaid - instance_name: - description: The name of the instance. - returned: always - type: str - sample: my-ecs - instance_type: - description: The instance type of the running instance. - returned: always - type: str - sample: ecs.sn1ne.xlarge - instance_type_family: - description: The instance type family of the instance belongs. - returned: always - type: str - sample: ecs.sn1ne - internet_charge_type: - description: The billing method of the network bandwidth. - returned: always - type: str - sample: PayByBandwidth - internet_max_bandwidth_in: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 200 - internet_max_bandwidth_out: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 20 - io_optimized: - description: Indicates whether the instance is optimized for EBS I/O. - returned: always - type: bool - sample: false - memory: - description: Memory size of the instance. - returned: always - type: int - sample: 8192 - network_interfaces: - description: One or more network interfaces for the instance. - returned: always - type: complex - contains: - mac_address: - description: The MAC address. - returned: always - type: str - sample: "00:11:22:33:44:55" - network_interface_id: - description: The ID of the network interface. - returned: always - type: str - sample: eni-01234567 - primary_ip_address: - description: The primary IPv4 address of the network interface within the vswitch. - returned: always - type: str - sample: 10.0.0.1 - osname: - description: The operation system name of the instance owned. - returned: always - type: str - sample: CentOS - ostype: - description: The operation system type of the instance owned. - returned: always - type: str - sample: linux - private_ip_address: - description: The IPv4 address of the network interface within the subnet. - returned: always - type: str - sample: 10.0.0.1 - public_ip_address: - description: The public IPv4 address assigned to the instance or eip address - returned: always - type: str - sample: 43.0.0.1 - resource_group_id: - description: The id of the resource group to which the instance belongs. - returned: always - type: str - sample: my-ecs-group - security_groups: - description: One or more security groups for the instance. - returned: always - type: list - elements: dict - contains: - group_id: - description: The ID of the security group. - returned: always - type: str - sample: sg-0123456 - group_name: - description: The name of the security group. - returned: always - type: str - sample: my-security-group - status: - description: The current status of the instance. - returned: always - type: str - sample: running - tags: - description: Any tags assigned to the instance. - returned: always - type: dict - sample: - user_data: - description: User-defined data. - returned: always - type: dict - sample: - vswitch_id: - description: The ID of the vswitch in which the instance is running. - returned: always - type: str - sample: vsw-dew00abcdef - vpc_id: - description: The ID of the VPC the instance is in. - returned: always - type: str - sample: vpc-0011223344 - spot_price_limit: - description: - - The maximum hourly price for the preemptible instance. - returned: always - type: float - sample: 0.97 - spot_strategy: - description: - - The bidding mode of the pay-as-you-go instance. + description: List of ECS instances. + returned: always + type: complex + contains: + availability_zone: + description: The availability zone of the instance is in. + returned: always + type: str + sample: cn-beijing-a + block_device_mappings: + description: Any block device mapping entries for the instance. + returned: always + type: complex + contains: + device_name: + description: The device name exposed to the instance. returned: always type: str - sample: NoSpot + sample: /dev/xvda + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2018-06-25T04:08:26Z" + delete_on_termination: + description: Indicates whether the volume is deleted on instance termination. + returned: always + type: bool + sample: true + status: + description: The attachment state. + returned: always + type: str + sample: in_use + volume_id: + description: The ID of the cloud disk. + returned: always + type: str + sample: d-2zei53pjsi117y6gf9t6 + cpu: + description: The CPU core count of the instance. + returned: always + type: int + sample: 4 + creation_time: + description: The time the instance was created. + returned: always + type: str + sample: "2018-06-25T04:08Z" + description: + description: The instance description. + returned: always + type: str + sample: "my ansible instance" + eip: + description: The attribution of EIP associated with the instance. + returned: always + type: complex + contains: + allocation_id: + description: The ID of the EIP. + returned: always + type: str + sample: eip-12345 + internet_charge_type: + description: The internet charge type of the EIP. + returned: always + type: str + sample: "paybybandwidth" + ip_address: + description: EIP address. + returned: always + type: str + sample: 42.10.2.2 + expired_time: + description: The time the instance expires. + returned: always + type: str + sample: "2099-12-31T15:59Z" + gpu: + description: The attribution of instance GPU. + returned: always + type: complex + contains: + amount: + description: The count of the GPU. + returned: always + type: int + sample: 0 + spec: + description: The specification of the GPU. + returned: always + type: str + sample: "" + host_name: + description: The host name of the instance. + returned: always + type: str + sample: iZ2zewaoZ + id: + description: Alias of instance_id. + returned: always + type: str + sample: i-abc12345 + instance_id: + description: ECS instance resource ID. + returned: always + type: str + sample: i-abc12345 + image_id: + description: The ID of the image used to launch the instance. + returned: always + type: str + sample: m-0011223344 + inner_ip_address: + description: The inner IPv4 address of the classic instance. + returned: always + type: str + sample: 10.0.0.2 + instance_charge_type: + description: The instance charge type. + returned: always + type: str + sample: PostPaid + instance_name: + description: The name of the instance. + returned: always + type: str + sample: my-ecs + instance_type: + description: The instance type of the running instance. + returned: always + type: str + sample: ecs.sn1ne.xlarge + instance_type_family: + description: The instance type family of the instance belongs. + returned: always + type: str + sample: ecs.sn1ne + internet_charge_type: + description: The billing method of the network bandwidth. + returned: always + type: str + sample: PayByBandwidth + internet_max_bandwidth_in: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 200 + internet_max_bandwidth_out: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 20 + io_optimized: + description: Indicates whether the instance is optimized for EBS I/O. + returned: always + type: bool + sample: false + memory: + description: Memory size of the instance. + returned: always + type: int + sample: 8192 + network_interfaces: + description: One or more network interfaces for the instance. + returned: always + type: complex + contains: + mac_address: + description: The MAC address. + returned: always + type: str + sample: "00:11:22:33:44:55" + network_interface_id: + description: The ID of the network interface. + returned: always + type: str + sample: eni-01234567 + primary_ip_address: + description: The primary IPv4 address of the network interface within the vswitch. + returned: always + type: str + sample: 10.0.0.1 + osname: + description: The operation system name of the instance owned. + returned: always + type: str + sample: CentOS + ostype: + description: The operation system type of the instance owned. + returned: always + type: str + sample: linux + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + public_ip_address: + description: The public IPv4 address assigned to the instance or eip address. + returned: always + type: str + sample: 43.0.0.1 + resource_group_id: + description: The ID of the resource group to which the instance belongs. + returned: always + type: str + sample: my-ecs-group + security_groups: + description: One or more security groups for the instance. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-0123456 + group_name: + description: The name of the security group. + returned: always + type: str + sample: my-security-group + status: + description: The current status of the instance. + returned: always + type: str + sample: running + tags: + description: Any tags assigned to the instance. + returned: always + type: dict + sample: + user_data: + description: User-defined data. + returned: always + type: dict + sample: + vswitch_id: + description: The ID of the vswitch in which the instance is running. + returned: always + type: str + sample: vsw-dew00abcdef + vpc_id: + description: The ID of the VPC the instance is in. + returned: always + type: str + sample: vpc-0011223344 + spot_price_limit: + description: + - The maximum hourly price for the preemptible instance. + returned: always + type: float + sample: 0.97 + spot_strategy: + description: + - The bidding mode of the pay-as-you-go instance. + returned: always + type: str + sample: NoSpot ids: - description: List of ECS instance IDs - returned: always - type: list - sample: [i-12345er, i-3245fs] -''' + description: List of ECS instance IDs. + returned: always + type: list + sample: ["i-12345er", "i-3245fs"] +""" import re import time -import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect - -HAS_FOOTMARK = False -FOOTMARK_IMP_ERR = None -try: - from footmark.exception import ECSResponseError - HAS_FOOTMARK = True -except ImportError: - FOOTMARK_IMP_ERR = traceback.format_exc() - HAS_FOOTMARK = False +from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ( + ecs_argument_spec, ecs_connect, FOOTMARK_IMP_ERR, HAS_FOOTMARK +) def get_instances_info(connection, ids): @@ -682,7 +674,7 @@ def run_instance(module, ecs, exact_count): if len(security_groups) <= 0: module.fail_json(msg='Expected the parameter security_groups is non-empty when create new ECS instances, aborting') - client_token = "Ansible-Alicloud-{0}-{1}".format(hash(str(module.params)), str(time.time())) + client_token = f"Ansible-Alicloud-{hash(str(module.params))}-{time.time()}" try: # call to create_instance method from footmark @@ -699,7 +691,7 @@ def run_instance(module, ecs, exact_count): spot_price_limit=spot_price_limit, spot_strategy=spot_strategy, unique_suffix=unique_suffix) except Exception as e: - module.fail_json(msg='Unable to create instance, error: {0}'.format(e)) + module.fail_json(msg=f'Unable to create instance, error: {e}') return instances @@ -738,7 +730,7 @@ def modify_instance(module, instance): try: return instance.modify(name=name, description=description, host_name=host_name, password=password, user_data=user_data) except Exception as e: - module.fail_json(msg="Modify instance {0} attribute got an error: {1}".format(instance.id, e)) + module.fail_json(msg=f"Modify instance {instance.id} attribute got an error: {e}") def wait_for_instance_modify_charge(ecs, instance_ids, charge_type, delay=10, timeout=300): @@ -757,7 +749,7 @@ def wait_for_instance_modify_charge(ecs, instance_ids, charge_type, delay=10, ti timeout -= delay time.sleep(delay) if timeout <= 0: - raise Exception("Timeout Error: Waiting for instance to {0}. ".format(charge_type)) + raise Exception(f"Timeout Error: Waiting for instance to {charge_type}. ") except Exception as e: raise e @@ -832,8 +824,7 @@ def main(): module.fail_json(msg='The parameter instance_ids should be a list, aborting') instances = ecs.describe_instances(zone_id=zone_id, instance_ids=instance_ids) if not instances: - module.fail_json(msg="There are no instances in our record based on instance_ids {0}. " - "Please check it and try again.".format(instance_ids)) + module.fail_json(msg=f"There are no instances in our record based on instance_ids {instance_ids}. Please check it and try again.") elif count_tag: instances = ecs.describe_instances(zone_id=zone_id, tags=eval(count_tag)) elif instance_name: @@ -856,7 +847,7 @@ def main(): module.exit_json(changed=changed, ids=ids, instances=[]) except Exception as e: - module.fail_json(msg='Delete instance got an error: {0}'.format(e)) + module.fail_json(msg=f'Delete instance got an error: {e}') if module.params['allocate_public_ip'] and max_bandwidth_out < 0: module.fail_json(msg="'max_bandwidth_out' should be greater than 0 when 'allocate_public_ip' is True.") @@ -869,13 +860,13 @@ def main(): for i in range(0, len(instances) - count): inst = instances[len(instances) - 1] if inst.status != 'stopped' and not force: - module.fail_json(msg="That to delete instance {0} is failed results from it is running, " - "and please stop it or set 'force' as True.".format(inst.id)) + module.fail_json(msg=f"That to delete instance {inst.id} is failed results from it is running, " + "and please stop it or set 'force' as True.") try: if inst.terminate(force=force): changed = True except Exception as e: - module.fail_json(msg="Delete instance {0} got an error: {1}".format(inst.id, e)) + module.fail_json(msg=f"Delete instance {inst.id} got an error: {e}") instances.pop(len(instances) - 1) else: try: @@ -887,7 +878,7 @@ def main(): changed = True instances.extend(new_instances) except Exception as e: - module.fail_json(msg="Create new instances got an error: {0}".format(e)) + module.fail_json(msg=f"Create new instances got an error: {e}") # Security Group join/leave begin security_groups = module.params['security_groups'] @@ -959,7 +950,7 @@ def main(): changed = True ids.extend(targets) except Exception as e: - module.fail_json(msg='Start instances got an error: {0}'.format(e)) + module.fail_json(msg=f'Start instances got an error: {e}') elif state == 'stopped': try: targets = [] @@ -973,7 +964,7 @@ def main(): if modify_instance(module, inst): changed = True except Exception as e: - module.fail_json(msg='Stop instances got an error: {0}'.format(e)) + module.fail_json(msg=f'Stop instances got an error: {e}') elif state == 'restarted': try: targets = [] @@ -985,7 +976,7 @@ def main(): changed = True ids.extend(targets) except Exception as e: - module.fail_json(msg='Reboot instances got an error: {0}'.format(e)) + module.fail_json(msg=f'Reboot instances got an error: {e}') tags = module.params['tags'] if module.params['purge_tags']: @@ -996,7 +987,7 @@ def main(): if inst.remove_tags(tags): changed = True except Exception as e: - module.fail_json(msg="{0}".format(e)) + module.fail_json(msg=f"{e}") module.exit_json(changed=changed, instances=get_instances_info(ecs, ids)) if tags: @@ -1005,7 +996,7 @@ def main(): if inst.add_tags(tags): changed = True except Exception as e: - module.fail_json(msg="{0}".format(e)) + module.fail_json(msg=f"{e}") module.exit_json(changed=changed, instances=get_instances_info(ecs, ids)) diff --git a/plugins/modules/ali_instance_info.py b/plugins/modules/ali_instance_info.py new file mode 100644 index 0000000000..31550c4d0a --- /dev/null +++ b/plugins/modules/ali_instance_info.py @@ -0,0 +1,401 @@ +#!/usr/bin/python + +# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see http://www.gnu.org/licenses/. + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ali_instance_info +short_description: Gather information on instances of Alibaba Cloud ECS +description: + - This module fetches data from the Open API in Alicloud. The module must be called from within the ECS instance itself. +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix + +options: + name_prefix: + description: + - Use a instance name prefix to filter ECS instances. + type: str + version_added: '0.2.0' + tags: + description: + - A hash/dictionaries of instance tags. C({"key":"value"}). + aliases: ["instance_tags"] + type: dict + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be all + of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details. Filter + keys can be same as request parameter name or be lower case and use underscore (V("_")) or dash (V("-")) to connect + different words in one parameter. C(InstanceIds) should be a list. C(Tag.n.Key) and C(Tag.n.Value) should be a dict + and using O(tags) instead. + type: dict + version_added: '0.2.0' +author: + - "He Guimin (@xiaozhu36)" +requirements: + - "Python >= 3.6" + - "footmark >= 1.13.0" +extends_documentation_fragment: + - community.general.alicloud + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +# Fetch instances details according to setting different filters + +- name: Find all instances in the specified region + community.general.ali_instance_info: + register: all_instances + +- name: Find all instances based on the specified ids + community.general.ali_instance_info: + instance_ids: + - "i-35b333d9" + - "i-ddav43kd" + register: instances_by_ids + +- name: Find all instances based on the specified name_prefix + community.general.ali_instance_info: + name_prefix: "ecs_instance_" + register: instances_by_name_prefix + +- name: Find instances based on tags + community.general.ali_instance_info: + tags: + Test: "add" +""" + +RETURN = r""" +instances: + description: List of ECS instances. + returned: always + type: complex + contains: + availability_zone: + description: The availability zone of the instance is in. + returned: always + type: str + sample: cn-beijing-a + block_device_mappings: + description: Any block device mapping entries for the instance. + returned: always + type: complex + contains: + device_name: + description: The device name exposed to the instance (for example, /dev/xvda). + returned: always + type: str + sample: /dev/xvda + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2018-06-25T04:08:26Z" + delete_on_termination: + description: Indicates whether the volume is deleted on instance termination. + returned: always + type: bool + sample: true + status: + description: The attachment state. + returned: always + type: str + sample: in_use + volume_id: + description: The ID of the cloud disk. + returned: always + type: str + sample: d-2zei53pjsi117y6gf9t6 + cpu: + description: The CPU core count of the instance. + returned: always + type: int + sample: 4 + creation_time: + description: The time the instance was created. + returned: always + type: str + sample: "2018-06-25T04:08Z" + description: + description: The instance description. + returned: always + type: str + sample: "my ansible instance" + eip: + description: The attribution of EIP associated with the instance. + returned: always + type: complex + contains: + allocation_id: + description: The ID of the EIP. + returned: always + type: str + sample: eip-12345 + internet_charge_type: + description: The internet charge type of the EIP. + returned: always + type: str + sample: "paybybandwidth" + ip_address: + description: EIP address. + returned: always + type: str + sample: 42.10.2.2 + expired_time: + description: The time the instance expires. + returned: always + type: str + sample: "2099-12-31T15:59Z" + gpu: + description: The attribution of instance GPU. + returned: always + type: complex + contains: + amount: + description: The count of the GPU. + returned: always + type: int + sample: 0 + spec: + description: The specification of the GPU. + returned: always + type: str + sample: "" + host_name: + description: The host name of the instance. + returned: always + type: str + sample: iZ2zewaoZ + id: + description: Alias of instance_id. + returned: always + type: str + sample: i-abc12345 + instance_id: + description: ECS instance resource ID. + returned: always + type: str + sample: i-abc12345 + image_id: + description: The ID of the image used to launch the instance. + returned: always + type: str + sample: m-0011223344 + inner_ip_address: + description: The inner IPv4 address of the classic instance. + returned: always + type: str + sample: 10.0.0.2 + instance_charge_type: + description: The instance charge type. + returned: always + type: str + sample: PostPaid + instance_name: + description: The name of the instance. + returned: always + type: str + sample: my-ecs + instance_type_family: + description: The instance type family of the instance belongs. + returned: always + type: str + sample: ecs.sn1ne + instance_type: + description: The instance type of the running instance. + returned: always + type: str + sample: ecs.sn1ne.xlarge + internet_charge_type: + description: The billing method of the network bandwidth. + returned: always + type: str + sample: PayByBandwidth + internet_max_bandwidth_in: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 200 + internet_max_bandwidth_out: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 20 + io_optimized: + description: Indicates whether the instance is optimized for EBS I/O. + returned: always + type: bool + sample: false + memory: + description: Memory size of the instance. + returned: always + type: int + sample: 8192 + network_interfaces: + description: One or more network interfaces for the instance. + returned: always + type: complex + contains: + mac_address: + description: The MAC address. + returned: always + type: str + sample: "00:11:22:33:44:55" + network_interface_id: + description: The ID of the network interface. + returned: always + type: str + sample: eni-01234567 + primary_ip_address: + description: The primary IPv4 address of the network interface within the vswitch. + returned: always + type: str + sample: 10.0.0.1 + osname: + description: The operation system name of the instance owned. + returned: always + type: str + sample: CentOS + ostype: + description: The operation system type of the instance owned. + returned: always + type: str + sample: linux + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + public_ip_address: + description: The public IPv4 address assigned to the instance or EIP address. + returned: always + type: str + sample: 43.0.0.1 + resource_group_id: + description: The ID of the resource group to which the instance belongs. + returned: always + type: str + sample: my-ecs-group + security_groups: + description: One or more security groups for the instance. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-0123456 + group_name: + description: The name of the security group. + returned: always + type: str + sample: my-security-group + status: + description: The current status of the instance. + returned: always + type: str + sample: running + tags: + description: Any tags assigned to the instance. + returned: always + type: dict + sample: + vswitch_id: + description: The ID of the vswitch in which the instance is running. + returned: always + type: str + sample: vsw-dew00abcdef + vpc_id: + description: The ID of the VPC the instance is in. + returned: always + type: str + sample: vpc-0011223344 +ids: + description: List of ECS instance IDs. + returned: always + type: list + sample: ["i-12345er", "i-3245fs"] +""" + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ( + ecs_argument_spec, ecs_connect, FOOTMARK_IMP_ERR, HAS_FOOTMARK +) + + +def main(): + argument_spec = ecs_argument_spec() + argument_spec.update(dict( + name_prefix=dict(type='str'), + tags=dict(type='dict', aliases=['instance_tags']), + filters=dict(type='dict') + ) + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + if HAS_FOOTMARK is False: + module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) + + ecs = ecs_connect(module) + + instances = [] + instance_ids = [] + ids = [] + name_prefix = module.params['name_prefix'] + + filters = module.params['filters'] + if not filters: + filters = {} + for key, value in list(filters.items()): + if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list): + for id in value: + if id not in ids: + ids.append(value) + if ids: + filters['instance_ids'] = ids + if module.params['tags']: + filters['tags'] = module.params['tags'] + + for inst in ecs.describe_instances(**filters): + if name_prefix: + if not str(inst.instance_name).startswith(name_prefix): + continue + volumes = ecs.describe_disks(instance_id=inst.id) + setattr(inst, 'block_device_mappings', volumes) + setattr(inst, 'user_data', inst.describe_user_data()) + instances.append(inst.read()) + instance_ids.append(inst.id) + + module.exit_json(changed=False, ids=instance_ids, instances=instances) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/alternatives.py b/plugins/modules/alternatives.py new file mode 100644 index 0000000000..feebdd2b4a --- /dev/null +++ b/plugins/modules/alternatives.py @@ -0,0 +1,437 @@ +#!/usr/bin/python + +# Copyright (c) 2014, Gabe Mulley +# Copyright (c) 2015, David Wittman +# Copyright (c) 2022, Marius Rieder +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: alternatives +short_description: Manages alternative programs for common commands +description: + - Manages symbolic links using the C(update-alternatives) tool. + - Useful when multiple programs are installed but provide similar functionality (for example, different editors). +author: + - Marius Rieder (@jiuka) + - David Wittman (@DavidWittman) + - Gabe Mulley (@mulby) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + name: + description: + - The generic name of the link. + type: str + required: true + path: + description: + - The path to the real executable that the link should point to. + type: path + family: + description: + - The family groups similar alternatives. This option is available only on RHEL-based distributions. + type: str + version_added: 10.1.0 + link: + description: + - The path to the symbolic link that should point to the real executable. + - This option is always required on RHEL-based distributions. On Debian-based distributions this option is required + when the alternative O(name) is unknown to the system. + type: path + priority: + description: + - The priority of the alternative. If no priority is given for creation V(50) is used as a fallback. + type: int + state: + description: + - V(present) - install the alternative (if not already installed), but do not set it as the currently selected alternative + for the group. + - V(selected) - install the alternative (if not already installed), and set it as the currently selected alternative + for the group. + - V(auto) - install the alternative (if not already installed), and set the group to auto mode. Added in community.general + 5.1.0. + - V(absent) - removes the alternative. Added in community.general 5.1.0. + choices: [present, selected, auto, absent] + default: selected + type: str + version_added: 4.8.0 + subcommands: + description: + - A list of subcommands. + - Each subcommand needs a name, a link and a path parameter. + - Subcommands are also named C(slaves) or C(followers), depending on the version of C(alternatives). + type: list + elements: dict + aliases: ['slaves'] + suboptions: + name: + description: + - The generic name of the subcommand. + type: str + required: true + path: + description: + - The path to the real executable that the subcommand should point to. + type: path + required: true + link: + description: + - The path to the symbolic link that should point to the real subcommand executable. + type: path + required: true + version_added: 5.1.0 +requirements: [update-alternatives] +""" + +EXAMPLES = r""" +- name: Correct java version selected + community.general.alternatives: + name: java + path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java + +- name: Select java-11-openjdk.x86_64 family + community.general.alternatives: + name: java + family: java-11-openjdk.x86_64 + when: ansible_os_family == 'RedHat' + +- name: Alternatives link created + community.general.alternatives: + name: hadoop-conf + link: /etc/hadoop/conf + path: /etc/hadoop/conf.ansible + +- name: Make java 32 bit an alternative with low priority + community.general.alternatives: + name: java + path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java + priority: -10 + +- name: Install Python 3.5 but do not select it + community.general.alternatives: + name: python + path: /usr/bin/python3.5 + link: /usr/bin/python + state: present + +- name: Install Python 3.5 and reset selection to auto + community.general.alternatives: + name: python + path: /usr/bin/python3.5 + link: /usr/bin/python + state: auto + +- name: keytool is a subcommand of java + community.general.alternatives: + name: java + link: /usr/bin/java + path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java + subcommands: + - name: keytool + link: /usr/bin/keytool + path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/keytool +""" + +import os +import re + +from ansible.module_utils.basic import AnsibleModule + + +class AlternativeState: + PRESENT = "present" + SELECTED = "selected" + ABSENT = "absent" + AUTO = "auto" + + @classmethod + def to_list(cls): + return [cls.PRESENT, cls.SELECTED, cls.ABSENT, cls.AUTO] + + +class AlternativesModule(object): + _UPDATE_ALTERNATIVES = None + + def __init__(self, module): + self.module = module + self.result = dict(changed=False, diff=dict(before=dict(), after=dict())) + self.module.run_command_environ_update = {'LC_ALL': 'C'} + self.messages = [] + self.run() + + @property + def mode_present(self): + return self.module.params.get('state') in [AlternativeState.PRESENT, AlternativeState.SELECTED, AlternativeState.AUTO] + + @property + def mode_selected(self): + return self.module.params.get('state') == AlternativeState.SELECTED + + @property + def mode_auto(self): + return self.module.params.get('state') == AlternativeState.AUTO + + def run(self): + self.parse() + + if self.mode_present: + # Check if we need to (re)install + subcommands_parameter = self.module.params['subcommands'] + priority_parameter = self.module.params['priority'] + if ( + self.path is not None and ( + self.path not in self.current_alternatives or + (priority_parameter is not None and self.current_alternatives[self.path].get('priority') != priority_parameter) or + (subcommands_parameter is not None and ( + not all(s in subcommands_parameter for s in self.current_alternatives[self.path].get('subcommands')) or + not all(s in self.current_alternatives[self.path].get('subcommands') for s in subcommands_parameter) + )) + ) + ): + self.install() + + # Check if we need to set the preference + is_same_path = self.path is not None and self.current_path == self.path + is_same_family = False + if self.current_path is not None and self.current_path in self.current_alternatives: + current_alternative = self.current_alternatives[self.current_path] + is_same_family = current_alternative.get('family') == self.family + + if self.mode_selected and not (is_same_path or is_same_family): + self.set() + + # Check if we need to reset to auto + if self.mode_auto and self.current_mode == 'manual': + self.auto() + else: + # Check if we need to uninstall + if self.path in self.current_alternatives: + self.remove() + + self.result['msg'] = ' '.join(self.messages) + self.module.exit_json(**self.result) + + def install(self): + if not os.path.exists(self.path): + self.module.fail_json(msg=f"Specified path {self.path} does not exist") + if not self.link: + self.module.fail_json(msg='Needed to install the alternative, but unable to do so as we are missing the link') + + cmd = [self.UPDATE_ALTERNATIVES, '--install', self.link, self.name, self.path, str(self.priority)] + if self.family is not None: + cmd.extend(["--family", self.family]) + + if self.module.params['subcommands'] is not None: + subcommands = [['--slave', subcmd['link'], subcmd['name'], subcmd['path']] for subcmd in self.subcommands] + cmd += [item for sublist in subcommands for item in sublist] + + self.result['changed'] = True + self.messages.append(f"Install alternative '{self.path}' for '{self.name}'.") + + if not self.module.check_mode: + self.module.run_command(cmd, check_rc=True) + + if self.module._diff: + self.result['diff']['after'] = dict( + state=AlternativeState.PRESENT, + path=self.path, + family=self.family, + priority=self.priority, + link=self.link, + ) + if self.subcommands: + self.result['diff']['after'].update(dict( + subcommands=self.subcommands + )) + + def remove(self): + cmd = [self.UPDATE_ALTERNATIVES, '--remove', self.name, self.path] + self.result['changed'] = True + self.messages.append(f"Remove alternative '{self.path}' from '{self.name}'.") + + if not self.module.check_mode: + self.module.run_command(cmd, check_rc=True) + + if self.module._diff: + self.result['diff']['after'] = dict(state=AlternativeState.ABSENT) + + def set(self): + # Path takes precedence over family as it is more specific + if self.path is None: + arg = self.family + else: + arg = self.path + + cmd = [self.UPDATE_ALTERNATIVES, '--set', self.name, arg] + self.result['changed'] = True + self.messages.append(f"Set alternative '{arg}' for '{self.name}'.") + + if not self.module.check_mode: + self.module.run_command(cmd, check_rc=True) + + if self.module._diff: + self.result['diff']['after']['state'] = AlternativeState.SELECTED + + def auto(self): + cmd = [self.UPDATE_ALTERNATIVES, '--auto', self.name] + self.messages.append(f"Set alternative to auto for '{self.name}'.") + self.result['changed'] = True + + if not self.module.check_mode: + self.module.run_command(cmd, check_rc=True) + + if self.module._diff: + self.result['diff']['after']['state'] = AlternativeState.PRESENT + + @property + def name(self): + return self.module.params.get('name') + + @property + def path(self): + return self.module.params.get('path') + + @property + def family(self): + return self.module.params.get('family') + + @property + def link(self): + return self.module.params.get('link') or self.current_link + + @property + def priority(self): + if self.module.params.get('priority') is not None: + return self.module.params.get('priority') + return self.current_alternatives.get(self.path, {}).get('priority', 50) + + @property + def subcommands(self): + if self.module.params.get('subcommands') is not None: + return self.module.params.get('subcommands') + elif self.path in self.current_alternatives and self.current_alternatives[self.path].get('subcommands'): + return self.current_alternatives[self.path].get('subcommands') + return None + + @property + def UPDATE_ALTERNATIVES(self): + if self._UPDATE_ALTERNATIVES is None: + self._UPDATE_ALTERNATIVES = self.module.get_bin_path('update-alternatives', True) + return self._UPDATE_ALTERNATIVES + + def parse(self): + self.current_mode = None + self.current_path = None + self.current_link = None + self.current_alternatives = {} + + # Run `update-alternatives --display ` to find existing alternatives + (rc, display_output, dummy) = self.module.run_command( + [self.UPDATE_ALTERNATIVES, '--display', self.name] + ) + + if rc != 0: + self.module.debug(f"No current alternative found. '{self.UPDATE_ALTERNATIVES}' exited with {rc}") + return + + current_mode_regex = re.compile(r'\s-\s(?:status\sis\s)?(\w*)(?:\smode|.)$', re.MULTILINE) + current_path_regex = re.compile(r'^\s*link currently points to (.*)$', re.MULTILINE) + current_link_regex = re.compile(r'^\s*link \w+ is (.*)$', re.MULTILINE) + subcmd_path_link_regex = re.compile(r'^\s*(?:slave|follower) (\S+) is (.*)$', re.MULTILINE) + + alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s(\S+)\s)?priority\s(\d+)((?:\s+(?:slave|follower).*)*)', re.MULTILINE) + subcmd_regex = re.compile(r'^\s+(?:slave|follower) (.*): (.*)$', re.MULTILINE) + + match = current_mode_regex.search(display_output) + if not match: + self.module.debug("No current mode found in output") + return + self.current_mode = match.group(1) + + match = current_path_regex.search(display_output) + if not match: + self.module.debug("No current path found in output") + else: + self.current_path = match.group(1) + + match = current_link_regex.search(display_output) + if not match: + self.module.debug("No current link found in output") + else: + self.current_link = match.group(1) + + subcmd_path_map = dict(subcmd_path_link_regex.findall(display_output)) + if not subcmd_path_map and self.subcommands: + subcmd_path_map = {s['name']: s['link'] for s in self.subcommands} + + for path, family, prio, subcmd in alternative_regex.findall(display_output): + self.current_alternatives[path] = dict( + priority=int(prio), + family=family, + subcommands=[dict( + name=name, + path=spath, + link=subcmd_path_map.get(name) + ) for name, spath in subcmd_regex.findall(subcmd) if spath != '(null)'] + ) + + if self.module._diff: + if self.path in self.current_alternatives: + self.result['diff']['before'].update(dict( + state=AlternativeState.PRESENT, + path=self.path, + priority=self.current_alternatives[self.path].get('priority'), + link=self.current_link, + )) + if self.current_alternatives[self.path].get('subcommands'): + self.result['diff']['before'].update(dict( + subcommands=self.current_alternatives[self.path].get('subcommands') + )) + if self.current_mode == 'manual' and self.current_path != self.path: + self.result['diff']['before'].update(dict( + state=AlternativeState.SELECTED + )) + else: + self.result['diff']['before'].update(dict( + state=AlternativeState.ABSENT + )) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + path=dict(type='path'), + family=dict(type='str'), + link=dict(type='path'), + priority=dict(type='int'), + state=dict( + type='str', + choices=AlternativeState.to_list(), + default=AlternativeState.SELECTED, + ), + subcommands=dict(type='list', elements='dict', aliases=['slaves'], options=dict( + name=dict(type='str', required=True), + path=dict(type='path', required=True), + link=dict(type='path', required=True), + )), + ), + supports_check_mode=True, + required_one_of=[('path', 'family')] + ) + + AlternativesModule(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/android_sdk.py b/plugins/modules/android_sdk.py new file mode 100644 index 0000000000..523ea2bbde --- /dev/null +++ b/plugins/modules/android_sdk.py @@ -0,0 +1,207 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Stanislav Shamilov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: android_sdk +short_description: Manages Android SDK packages +description: + - Manages Android SDK packages. + - Allows installation from different channels (stable, beta, dev, canary). + - Allows installation of packages to a non-default SDK root directory. +author: Stanislav Shamilov (@shamilovstas) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +version_added: 10.2.0 +options: + accept_licenses: + description: + - If this is set to V(true), the module attempts to accept license prompts generated by C(sdkmanager) during package + installation. Otherwise, every license prompt is rejected. + type: bool + default: false + name: + description: + - A name of an Android SDK package (for instance, V(build-tools;34.0.0)). + aliases: ['package', 'pkg'] + type: list + elements: str + state: + description: + - Indicates the desired package(s) state. + - V(present) ensures that package(s) is/are present. + - V(absent) ensures that package(s) is/are absent. + - V(latest) ensures that package(s) is/are installed and updated to the latest version(s). + choices: ['present', 'absent', 'latest'] + default: present + type: str + sdk_root: + description: + - Provides path for an alternative directory to install Android SDK packages to. By default, all packages are installed + to the directory where C(sdkmanager) is installed. + type: path + channel: + description: + - Indicates what channel must C(sdkmanager) use for installation of packages. + choices: ['stable', 'beta', 'dev', 'canary'] + default: stable + type: str +requirements: + - C(java) >= 17 + - C(sdkmanager) Command line tool for installing Android SDK packages. +notes: + - For some of the packages installed by C(sdkmanager) is it necessary to accept licenses. Usually it is done through command + line prompt in a form of a Y/N question when a licensed package is requested to be installed. If there are several packages + requested for installation and at least two of them belong to different licenses, the C(sdkmanager) tool prompts for these + licenses in a loop. In order to install packages, the module must be able to answer these license prompts. Currently, + it is only possible to answer one license prompt at a time, meaning that instead of installing multiple packages as a + single invocation of the C(sdkmanager --install) command, it is done by executing the command independently for each package. + This makes sure that at most only one license prompt needs to be answered. At the time of writing this module, a C(sdkmanager)'s + package may belong to at most one license type that needs to be accepted. However, if this changes in the future, the + module may hang as there might be more prompts generated by the C(sdkmanager) tool which the module is unable to answer. + If this becomes the case, file an issue and in the meantime, consider accepting all the licenses in advance, as it is + described in the C(sdkmanager) L(documentation,https://developer.android.com/tools/sdkmanager#accept-licenses), for instance, + using the M(ansible.builtin.command) module. +seealso: + - name: sdkmanager tool documentation + description: Detailed information of how to install and use sdkmanager command line tool. + link: https://developer.android.com/tools/sdkmanager +""" + +EXAMPLES = r""" +- name: Install build-tools;34.0.0 + community.general.android_sdk: + name: build-tools;34.0.0 + accept_licenses: true + state: present + +- name: Install build-tools;34.0.0 and platform-tools + community.general.android_sdk: + name: + - build-tools;34.0.0 + - platform-tools + accept_licenses: true + state: present + +- name: Delete build-tools;34.0.0 + community.general.android_sdk: + name: build-tools;34.0.0 + state: absent + +- name: Install platform-tools or update if installed + community.general.android_sdk: + name: platform-tools + accept_licenses: true + state: latest + +- name: Install build-tools;34.0.0 to a different SDK root + community.general.android_sdk: + name: build-tools;34.0.0 + accept_licenses: true + state: present + sdk_root: "/path/to/new/root" + +- name: Install a package from another channel + community.general.android_sdk: + name: some-package-present-in-canary-channel + accept_licenses: true + state: present + channel: canary +""" + +RETURN = r""" +installed: + description: A list of packages that have been installed. + returned: when packages have changed + type: list + sample: ["build-tools;34.0.0", "platform-tools"] + +removed: + description: A list of packages that have been removed. + returned: when packages have changed + type: list + sample: ["build-tools;34.0.0", "platform-tools"] +""" + +from ansible_collections.community.general.plugins.module_utils.mh.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.android_sdkmanager import Package, AndroidSdkManager + + +class AndroidSdk(StateModuleHelper): + module = dict( + argument_spec=dict( + state=dict(type='str', default='present', choices=['present', 'absent', 'latest']), + package=dict(type='list', elements='str', aliases=['pkg', 'name']), + sdk_root=dict(type='path'), + channel=dict(type='str', default='stable', choices=['stable', 'beta', 'dev', 'canary']), + accept_licenses=dict(type='bool', default=False) + ), + supports_check_mode=True + ) + + def __init_module__(self): + self.sdkmanager = AndroidSdkManager(self.module) + self.vars.set('installed', [], change=True) + self.vars.set('removed', [], change=True) + + def _parse_packages(self): + arg_pkgs = set(self.vars.package) + if len(arg_pkgs) < len(self.vars.package): + self.do_raise("Packages may not repeat") + return set(Package(p) for p in arg_pkgs) + + def state_present(self): + packages = self._parse_packages() + installed = self.sdkmanager.get_installed_packages() + pending_installation = packages.difference(installed) + + self.vars.installed = AndroidSdk._map_packages_to_names(pending_installation) + if not self.check_mode: + rc, stdout, stderr = self.sdkmanager.apply_packages_changes(pending_installation, self.vars.accept_licenses) + if rc != 0: + self.do_raise(f"Could not install packages: {stderr}") + + def state_absent(self): + packages = self._parse_packages() + installed = self.sdkmanager.get_installed_packages() + to_be_deleted = packages.intersection(installed) + self.vars.removed = AndroidSdk._map_packages_to_names(to_be_deleted) + if not self.check_mode: + rc, stdout, stderr = self.sdkmanager.apply_packages_changes(to_be_deleted) + if rc != 0: + self.do_raise(f"Could not uninstall packages: {stderr}") + + def state_latest(self): + packages = self._parse_packages() + installed = self.sdkmanager.get_installed_packages() + updatable = self.sdkmanager.get_updatable_packages() + not_installed = packages.difference(installed) + to_be_installed = not_installed.union(updatable) + self.vars.installed = AndroidSdk._map_packages_to_names(to_be_installed) + + if not self.check_mode: + rc, stdout, stderr = self.sdkmanager.apply_packages_changes(to_be_installed, self.vars.accept_licenses) + if rc != 0: + self.do_raise(f"Could not install packages: {stderr}") + + @staticmethod + def _map_packages_to_names(packages): + return [x.name for x in packages] + + +def main(): + AndroidSdk.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py new file mode 100644 index 0000000000..ab9a57afb9 --- /dev/null +++ b/plugins/modules/ansible_galaxy_install.py @@ -0,0 +1,330 @@ +#!/usr/bin/python +# Copyright (c) 2021, Alexei Znamensky +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ansible_galaxy_install +author: + - "Alexei Znamensky (@russoz)" +short_description: Install Ansible roles or collections using ansible-galaxy +version_added: 3.5.0 +description: + - This module allows the installation of Ansible collections or roles using C(ansible-galaxy). +notes: + - Support for B(Ansible 2.9/2.10) was removed in community.general 8.0.0. + - The module tries to run using the C(C.UTF-8) locale. If that fails, it tries C(en_US.UTF-8). If that one also fails, the + module fails. +seealso: + - name: C(ansible-galaxy) command manual page + description: Manual page for the command. + link: https://docs.ansible.com/ansible/latest/cli/ansible-galaxy.html + +requirements: + - ansible-core 2.11 or newer +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - If O(state=present) then the collection or role is installed. Note that the collections and roles are not updated + with this option. + - Currently the O(state=latest) is ignored unless O(type=collection), and it ensures the collection is installed and + updated to the latest available version. + - Please note that O(force=true) can be used to perform upgrade regardless of O(type). + type: str + choices: [present, latest] + default: present + version_added: 9.1.0 + type: + description: + - The type of installation performed by C(ansible-galaxy). + - If O(type=both), then O(requirements_file) must be passed and it may contain both roles and collections. + - 'Note however that the opposite is not true: if using a O(requirements_file), then O(type) can be any of the three + choices.' + type: str + choices: [collection, role, both] + required: true + name: + description: + - Name of the collection or role being installed. + - Versions can be specified with C(ansible-galaxy) usual formats. For example, the collection V(community.docker:1.6.1) + or the role V(ansistrano.deploy,3.8.0). + - O(name) and O(requirements_file) are mutually exclusive. + type: str + requirements_file: + description: + - Path to a file containing a list of requirements to be installed. + - It works for O(type) equals to V(collection) and V(role). + - O(name) and O(requirements_file) are mutually exclusive. + type: path + dest: + description: + - The path to the directory containing your collections or roles, according to the value of O(type). + - Please notice that C(ansible-galaxy) does not install collections with O(type=both), when O(requirements_file) contains + both roles and collections and O(dest) is specified. + type: path + no_deps: + description: + - Refrain from installing dependencies. + version_added: 4.5.0 + type: bool + default: false + force: + description: + - Force overwriting existing roles and/or collections. + - It can be used for upgrading, but the module output always reports C(changed=true). + - Using O(force=true) is mandatory when downgrading. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Install collection community.network + community.general.ansible_galaxy_install: + type: collection + name: community.network + +- name: Install role at specific path + community.general.ansible_galaxy_install: + type: role + name: ansistrano.deploy + dest: /ansible/roles + +- name: Install collections and roles together + community.general.ansible_galaxy_install: + type: both + requirements_file: requirements.yml + +- name: Force-install collection community.network at specific version + community.general.ansible_galaxy_install: + type: collection + name: community.network:3.0.2 + force: true +""" + +RETURN = r""" +type: + description: The value of the O(type) parameter. + type: str + returned: always +name: + description: The value of the O(name) parameter. + type: str + returned: always +dest: + description: The value of the O(dest) parameter. + type: str + returned: always +requirements_file: + description: The value of the O(requirements_file) parameter. + type: str + returned: always +force: + description: The value of the O(force) parameter. + type: bool + returned: always +installed_roles: + description: + - If O(requirements_file) is specified instead, returns dictionary with all the roles installed per path. + - If O(name) is specified, returns that role name and the version installed per path. + type: dict + returned: always when installing roles + contains: + "": + description: Roles and versions for that path. + type: dict + sample: + /home/user42/.ansible/roles: + ansistrano.deploy: 3.9.0 + baztian.xfce: v0.0.3 + /custom/ansible/roles: + ansistrano.deploy: 3.8.0 +installed_collections: + description: + - If O(requirements_file) is specified instead, returns dictionary with all the collections installed per path. + - If O(name) is specified, returns that collection name and the version installed per path. + type: dict + returned: always when installing collections + contains: + "": + description: Collections and versions for that path. + type: dict + sample: + /home/az/.ansible/collections/ansible_collections: + community.docker: 1.6.0 + community.general: 3.0.2 + /custom/ansible/ansible_collections: + community.general: 3.1.0 +new_collections: + description: New collections installed by this module. + returned: success + type: dict + sample: + community.general: 3.1.0 + community.docker: 1.6.1 +new_roles: + description: New roles installed by this module. + returned: success + type: dict + sample: + ansistrano.deploy: 3.8.0 + baztian.xfce: v0.0.3 +version: + description: Version of ansible-core for ansible-galaxy. + type: str + returned: always + sample: 2.17.4 + version_added: 10.0.0 +""" + +import re + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper, ModuleHelperException + + +class AnsibleGalaxyInstall(ModuleHelper): + _RE_GALAXY_VERSION = re.compile(r'^ansible-galaxy(?: \[core)? (?P\d+\.\d+\.\d+)(?:\.\w+)?(?:\])?') + _RE_LIST_PATH = re.compile(r'^# (?P.*)$') + _RE_LIST_COLL = re.compile(r'^(?P\w+\.\w+)\s+(?P[\d\.]+)\s*$') + _RE_LIST_ROLE = re.compile(r'^- (?P\w+\.\w+),\s+(?P[\d\.]+)\s*$') + _RE_INSTALL_OUTPUT = re.compile( + r'^(?:(?P\w+\.\w+):(?P[\d\.]+)|- (?P\w+\.\w+) \((?P[\d\.]+)\)) was installed successfully$' + ) + ansible_version = None + + output_params = ('type', 'name', 'dest', 'requirements_file', 'force', 'no_deps') + module = dict( + argument_spec=dict( + state=dict(type='str', choices=['present', 'latest'], default='present'), + type=dict(type='str', choices=('collection', 'role', 'both'), required=True), + name=dict(type='str'), + requirements_file=dict(type='path'), + dest=dict(type='path'), + force=dict(type='bool', default=False), + no_deps=dict(type='bool', default=False), + ), + mutually_exclusive=[('name', 'requirements_file')], + required_one_of=[('name', 'requirements_file')], + required_if=[('type', 'both', ['requirements_file'])], + supports_check_mode=False, + ) + + command = 'ansible-galaxy' + command_args_formats = dict( + type=cmd_runner_fmt.as_func(lambda v: [] if v == 'both' else [v]), + galaxy_cmd=cmd_runner_fmt.as_list(), + upgrade=cmd_runner_fmt.as_bool("--upgrade"), + requirements_file=cmd_runner_fmt.as_opt_val('-r'), + dest=cmd_runner_fmt.as_opt_val('-p'), + force=cmd_runner_fmt.as_bool("--force"), + no_deps=cmd_runner_fmt.as_bool("--no-deps"), + version=cmd_runner_fmt.as_fixed("--version"), + name=cmd_runner_fmt.as_list(), + ) + + def _make_runner(self, lang): + return CmdRunner(self.module, command=self.command, arg_formats=self.command_args_formats, force_lang=lang, check_rc=True) + + def _get_ansible_galaxy_version(self): + class UnsupportedLocale(ModuleHelperException): + pass + + def process(rc, out, err): + if (rc != 0 and "unsupported locale setting" in err) or (rc == 0 and "cannot change locale" in err): + raise UnsupportedLocale(msg=err) + line = out.splitlines()[0] + match = self._RE_GALAXY_VERSION.match(line) + if not match: + self.do_raise(f"Unable to determine ansible-galaxy version from: {line}") + version = match.group("version") + return version + + try: + runner = self._make_runner("C.UTF-8") + with runner("version", check_rc=False, output_process=process) as ctx: + return runner, ctx.run() + except UnsupportedLocale: + runner = self._make_runner("en_US.UTF-8") + with runner("version", check_rc=True, output_process=process) as ctx: + return runner, ctx.run() + + def __init_module__(self): + self.runner, self.vars.version = self._get_ansible_galaxy_version() + self.ansible_version = tuple(int(x) for x in self.vars.version.split('.')[:3]) + if self.ansible_version < (2, 11): + self.module.fail_json(msg="Support for Ansible 2.9 and ansible-base 2.10 has been removed.") + self.vars.set("new_collections", {}, change=True) + self.vars.set("new_roles", {}, change=True) + if self.vars.type != "collection": + self.vars.installed_roles = self._list_roles() + if self.vars.type != "roles": + self.vars.installed_collections = self._list_collections() + + def _list_element(self, _type, path_re, elem_re): + def process(rc, out, err): + return [] if "None of the provided paths were usable" in out else out.splitlines() + + with self.runner('type galaxy_cmd dest', output_process=process, check_rc=False) as ctx: + elems = ctx.run(type=_type, galaxy_cmd='list') + + elems_dict = {} + current_path = None + for line in elems: + if line.startswith("#"): + match = path_re.match(line) + if not match: + continue + if self.vars.dest is not None and match.group('path') != self.vars.dest: + current_path = None + continue + current_path = match.group('path') if match else None + elems_dict[current_path] = {} + + elif current_path is not None: + match = elem_re.match(line) + if not match or (self.vars.name is not None and match.group('elem') != self.vars.name): + continue + elems_dict[current_path][match.group('elem')] = match.group('version') + return elems_dict + + def _list_collections(self): + return self._list_element('collection', self._RE_LIST_PATH, self._RE_LIST_COLL) + + def _list_roles(self): + return self._list_element('role', self._RE_LIST_PATH, self._RE_LIST_ROLE) + + def __run__(self): + + def process(rc, out, err): + for line in out.splitlines(): + match = self._RE_INSTALL_OUTPUT.match(line) + if not match: + continue + if match.group("collection"): + self.vars.new_collections[match.group("collection")] = match.group("cversion") + elif match.group("role"): + self.vars.new_roles[match.group("role")] = match.group("rversion") + + upgrade = (self.vars.type == "collection" and self.vars.state == "latest") + with self.runner("type galaxy_cmd upgrade force no_deps dest requirements_file name", output_process=process) as ctx: + ctx.run(galaxy_cmd="install", upgrade=upgrade) + if self.verbosity > 2: + self.vars.set("run_info", ctx.run_info) + + +def main(): + AnsibleGalaxyInstall.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/apache2_mod_proxy.py b/plugins/modules/apache2_mod_proxy.py new file mode 100644 index 0000000000..2bd96307a9 --- /dev/null +++ b/plugins/modules/apache2_mod_proxy.py @@ -0,0 +1,424 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Olivier Boukili +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: apache2_mod_proxy +author: Olivier Boukili (@oboukili) +short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool +description: + - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool, using HTTP POST and GET requests. The + httpd mod_proxy balancer-member status page has to be enabled and accessible, as this module relies on parsing this page. +extends_documentation_fragment: + - community.general.attributes +requirements: + - Python package C(beautifulsoup4) +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + balancer_url_suffix: + type: str + description: + - Suffix of the balancer pool URL required to access the balancer pool status page (for example V(balancer_vhost[:port]/balancer_url_suffix)). + default: /balancer-manager/ + balancer_vhost: + type: str + description: + - (IPv4|IPv6|FQDN):port of the Apache httpd 2.4 mod_proxy balancer pool. + required: true + member_host: + type: str + description: + - (IPv4|IPv6|FQDN) of the balancer member to get or to set attributes to. Port number is autodetected and should not + be specified here. + - If undefined, the M(community.general.apache2_mod_proxy) module returns a members list of dictionaries of all the + current balancer pool members' attributes. + state: + type: list + elements: str + choices: [present, absent, enabled, disabled, drained, hot_standby, ignore_errors] + description: + - Desired state of the member host. + - States can be simultaneously invoked by separating them with a comma (for example V(state=drained,ignore_errors)), + but it is recommended to specify them as a proper YAML list. + - States V(present) and V(absent) must be used without any other state. + tls: + description: + - Use https to access balancer management page. + type: bool + default: false + validate_certs: + description: + - Validate ssl/tls certificates. + type: bool + default: true +""" + +EXAMPLES = r""" +- name: Get all current balancer pool members attributes + community.general.apache2_mod_proxy: + balancer_vhost: 10.0.0.2 + +- name: Get a specific member attributes + community.general.apache2_mod_proxy: + balancer_vhost: myws.mydomain.org + balancer_suffix: /lb/ + member_host: node1.myws.mydomain.org + +# Enable all balancer pool members: +- name: Get attributes + community.general.apache2_mod_proxy: + balancer_vhost: '{{ myloadbalancer_host }}' + register: result + +- name: Enable all balancer pool members + community.general.apache2_mod_proxy: + balancer_vhost: '{{ myloadbalancer_host }}' + member_host: '{{ item.host }}' + state: present + with_items: '{{ result.members }}' + +# Gracefully disable a member from a loadbalancer node: +- name: Step 1 + community.general.apache2_mod_proxy: + balancer_vhost: '{{ vhost_host }}' + member_host: '{{ member.host }}' + state: drained + delegate_to: myloadbalancernode + +- name: Step 2 + ansible.builtin.wait_for: + host: '{{ member.host }}' + port: '{{ member.port }}' + state: drained + delegate_to: myloadbalancernode + +- name: Step 3 + community.general.apache2_mod_proxy: + balancer_vhost: '{{ vhost_host }}' + member_host: '{{ member.host }}' + state: absent + delegate_to: myloadbalancernode +""" + +RETURN = r""" +member: + description: Specific balancer member information dictionary, returned when the module is invoked with O(member_host) parameter. + type: dict + returned: success + sample: + { + "attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.20", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false + } + } +members: + description: List of member (defined above) dictionaries, returned when the module is invoked with no O(member_host) and + O(state) args. + returned: success + type: list + sample: + [ + { + "attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.20", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false + } + }, + { + "attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.21", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false + } + } + ] +""" + +import re + +from ansible_collections.community.general.plugins.module_utils import deps +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper, ModuleHelperException + +from ansible.module_utils.common.text.converters import to_text +from ansible.module_utils.urls import fetch_url + +with deps.declare("beautifulsoup4"): + from bs4 import BeautifulSoup + +# balancer member attributes extraction regexp: +EXPRESSION = re.compile(to_text(r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)")) +# Apache2 server version extraction regexp: +APACHE_VERSION_EXPRESSION = re.compile(to_text(r"SERVER VERSION: APACHE/([\d.]+)")) + + +def find_all(where, what): + return where.find_all(what) + + +def regexp_extraction(string, _regexp, groups=1): + """ Returns the capture group (default=1) specified in the regexp, applied to the string """ + regexp_search = _regexp.search(string) + if regexp_search: + if regexp_search.group(groups) != '': + return regexp_search.group(groups) + return None + + +class BalancerMember(object): + """ Apache 2.4 mod_proxy LB balancer member. + attributes: + read-only: + host -> member host (string), + management_url -> member management url (string), + protocol -> member protocol (string) + port -> member port (string), + path -> member location (string), + balancer_url -> url of this member's parent balancer (string), + attributes -> whole member attributes (dictionary) + module -> ansible module instance (AnsibleModule object). + writable: + status -> status of the member (dictionary) + """ + + def __init__(self, management_url, balancer_url, module): + self.host = regexp_extraction(management_url, EXPRESSION, 4) + self.management_url = management_url + self.protocol = regexp_extraction(management_url, EXPRESSION, 3) + self.port = regexp_extraction(management_url, EXPRESSION, 5) + self.path = regexp_extraction(management_url, EXPRESSION, 6) + self.balancer_url = balancer_url + self.module = module + + def get_member_attributes(self): + """ Returns a dictionary of a balancer member's attributes.""" + + resp, info = fetch_url(self.module, self.management_url, headers={'Referer': self.management_url}) + + if info['status'] != 200: + raise ModuleHelperException(f"Could not get balancer_member_page, check for connectivity! {info}") + + try: + soup = BeautifulSoup(resp) + except TypeError as exc: + raise ModuleHelperException(f"Cannot parse balancer_member_page HTML! {exc}") from exc + + subsoup = find_all(find_all(soup, 'table')[1], 'tr') + keys = find_all(subsoup[0], 'th') + for valuesset in subsoup[1::1]: + if re.search(pattern=self.host, string=str(valuesset)): + values = find_all(valuesset, 'td') + return {keys[x].string: values[x].string for x in range(0, len(keys))} + + def get_member_status(self): + """ Returns a dictionary of a balancer member's status attributes.""" + status_mapping = {'disabled': 'Dis', + 'drained': 'Drn', + 'hot_standby': 'Stby', + 'ignore_errors': 'Ign'} + actual_status = self.attributes['Status'] + status = {mode: patt in actual_status for mode, patt in status_mapping.items()} + return status + + def set_member_status(self, values): + """ Sets a balancer member's status attributes amongst pre-mapped values.""" + values_mapping = {'disabled': '&w_status_D', + 'drained': '&w_status_N', + 'hot_standby': '&w_status_H', + 'ignore_errors': '&w_status_I'} + + request_body = regexp_extraction(self.management_url, EXPRESSION, 1) + values_url = "".join(f"{url_param}={1 if values[mode] else 0}" for mode, url_param in values_mapping.items()) + request_body = f"{request_body}{values_url}" + + response, info = fetch_url(self.module, self.management_url, data=request_body, headers={'Referer': self.management_url}) + if info['status'] != 200: + raise ModuleHelperException(f"Could not set the member status! {self.host} {info['status']}") + + attributes = property(get_member_attributes) + status = property(get_member_status, set_member_status) + + def as_dict(self): + return { + "host": self.host, + "status": self.status, + "protocol": self.protocol, + "port": self.port, + "path": self.path, + "attributes": self.attributes, + "management_url": self.management_url, + "balancer_url": self.balancer_url + } + + +class Balancer(object): + """ Apache httpd 2.4 mod_proxy balancer object""" + + def __init__(self, module, host, suffix, tls=False): + proto = "https" if tls else "http" + self.base_url = f'{proto}://{host}' + self.url = f'{proto}://{host}{suffix}' + self.module = module + self.page = self.fetch_balancer_page() + + def fetch_balancer_page(self): + """ Returns the balancer management html page as a string for later parsing.""" + resp, info = fetch_url(self.module, self.url) + if info['status'] != 200: + raise ModuleHelperException(f"Could not get balancer page! HTTP status response: {info['status']}") + + content = to_text(resp.read()) + apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1) + if not apache_version: + raise ModuleHelperException("Could not get the Apache server version from the balancer-manager") + + if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version): + raise ModuleHelperException(f"This module only acts on an Apache2 2.4+ instance, current Apache2 version: {apache_version}") + return content + + def get_balancer_members(self): + """ Returns members of the balancer as a generator object for later iteration.""" + try: + soup = BeautifulSoup(self.page) + except TypeError as e: + raise ModuleHelperException(f"Cannot parse balancer page HTML! {self.page}") from e + + elements = find_all(soup, 'a') + for element in elements[1::1]: + balancer_member_suffix = element.get('href') + if not balancer_member_suffix: + raise ModuleHelperException("Argument 'balancer_member_suffix' is empty!") + + yield BalancerMember(self.base_url + balancer_member_suffix, self.url, self.module) + + members = property(get_balancer_members) + + +class ApacheModProxy(ModuleHelper): + """ Initiates module.""" + module = dict( + argument_spec=dict( + balancer_vhost=dict(required=True, type='str'), + balancer_url_suffix=dict(default="/balancer-manager/", type='str'), + member_host=dict(type='str'), + state=dict(type='list', elements='str', choices=['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']), + tls=dict(default=False, type='bool'), + validate_certs=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + def __init_module__(self): + deps.validate(self.module) + + if len(self.vars.state or []) > 1 and ("present" in self.vars.state or "enabled" in self.vars.state): + self.do_raise(msg="states present/enabled are mutually exclusive with other states!") + + self.mybalancer = Balancer(self.module, self.vars.balancer_vhost, self.vars.balancer_url_suffix, tls=self.vars.tls) + + def __run__(self): + if self.vars.member_host is None: + self.vars.members = [member.as_dict() for member in self.mybalancer.members] + else: + member_exists = False + member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False} + for mode in member_status: + for state in self.vars.state or []: + if mode == state: + member_status[mode] = True + elif mode == 'disabled' and state == 'absent': + member_status[mode] = True + + for member in self.mybalancer.members: + if str(member.host) == self.vars.member_host: + member_exists = True + if self.vars.state is not None: + member_status_before = member.status + if not self.check_mode: + member_status_after = member.status = member_status + else: + member_status_after = member_status + self.changed |= (member_status_before != member_status_after) + self.vars.member = member.as_dict() + + if not member_exists: + self.do_raise(msg=f'{self.vars.member_host} is not a member of the balancer {self.vars.balancer_vhost}!') + + +def main(): + ApacheModProxy.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/apache2_module.py b/plugins/modules/apache2_module.py similarity index 54% rename from plugins/modules/web_infrastructure/apache2_module.py rename to plugins/modules/apache2_module.py index 44327fe13c..6e3f10203c 100644 --- a/plugins/modules/web_infrastructure/apache2_module.py +++ b/plugins/modules/apache2_module.py @@ -1,60 +1,70 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013-2014, Christian Berendt -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013-2014, Christian Berendt +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: apache2_module author: - - Christian Berendt (@berendt) - - Ralf Hertel (@n0trax) - - Robin Roth (@robinro) -short_description: Enables/disables a module of the Apache2 webserver. + - Christian Berendt (@berendt) + - Ralf Hertel (@n0trax) + - Robin Roth (@robinro) +short_description: Enables/disables a module of the Apache2 webserver description: - - Enables or disables a specified module of the Apache2 webserver. + - Enables or disables a specified module of the Apache2 webserver. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - description: - - Name of the module to enable/disable as given to C(a2enmod/a2dismod). - required: true - identifier: - type: str - description: - - Identifier of the module as listed by C(apache2ctl -M). - This is optional and usually determined automatically by the common convention of - appending C(_module) to I(name) as well as custom exception for popular modules. - required: False - force: - description: - - Force disabling of default modules and override Debian warnings. - required: false - type: bool - default: False - state: - type: str - description: - - Desired state of the module. - choices: ['present', 'absent'] - default: present - ignore_configcheck: - description: - - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules. - type: bool - default: False -requirements: ["a2enmod","a2dismod"] + name: + type: str + description: + - Name of the module to enable/disable as given to C(a2enmod)/C(a2dismod). + required: true + identifier: + type: str + description: + - Identifier of the module as listed by C(apache2ctl -M). This is optional and usually determined automatically by the + common convention of appending V(_module) to O(name) as well as custom exception for popular modules. + required: false + force: + description: + - Force disabling of default modules and override Debian warnings. + required: false + type: bool + default: false + state: + type: str + description: + - Desired state of the module. + choices: ['present', 'absent'] + default: present + ignore_configcheck: + description: + - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules. + type: bool + default: false + warn_mpm_absent: + description: + - Control the behavior of the warning process for MPM modules. + type: bool + default: true + version_added: 6.3.0 +requirements: ["a2enmod", "a2dismod"] notes: - - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions. - Whether it works on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not. -''' + - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions. Whether it works + on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Enable the Apache2 module wsgi community.general.apache2_module: state: present @@ -69,43 +79,39 @@ EXAMPLES = ''' community.general.apache2_module: state: absent name: autoindex - force: True + force: true - name: Disable mpm_worker and ignore warnings about missing mpm module community.general.apache2_module: state: absent name: mpm_worker - ignore_configcheck: True + ignore_configcheck: true + +- name: Disable mpm_event, enable mpm_prefork and ignore warnings about missing mpm module + community.general.apache2_module: + name: "{{ item.module }}" + state: "{{ item.state }}" + warn_mpm_absent: false + ignore_configcheck: true + loop: + - module: mpm_event + state: absent + - module: mpm_prefork + state: present - name: Enable dump_io module, which is identified as dumpio_module inside apache2 community.general.apache2_module: state: present name: dump_io identifier: dumpio_module -''' +""" -RETURN = ''' +RETURN = r""" result: - description: message about action taken - returned: always - type: str -warnings: - description: list of warning messages - returned: when needed - type: list -rc: - description: return code of underlying command - returned: failed - type: int -stdout: - description: stdout of underlying command - returned: failed - type: str -stderr: - description: stderr of underlying command - returned: failed - type: str -''' + description: Message about action taken. + returned: always + type: str +""" import re @@ -128,7 +134,7 @@ def _get_ctl_binary(module): if ctl_binary is not None: return ctl_binary - module.fail_json(msg="Neither of apache2ctl nor apachctl found. At least one apache control binary is necessary.") + module.fail_json(msg="Neither of apache2ctl nor apachectl found. At least one apache control binary is necessary.") def _module_is_enabled(module): @@ -136,20 +142,21 @@ def _module_is_enabled(module): result, stdout, stderr = module.run_command([control_binary, "-M"]) if result != 0: - error_msg = "Error executing %s: %s" % (control_binary, stderr) + error_msg = f"Error executing {control_binary}: {stderr}" if module.params['ignore_configcheck']: if 'AH00534' in stderr and 'mpm_' in module.params['name']: - module.warnings.append( - "No MPM module loaded! apache2 reload AND other module actions" - " will fail if no MPM module is loaded immediately." - ) + if module.params['warn_mpm_absent']: + module.warn( + "No MPM module loaded! apache2 reload AND other module actions" + " will fail if no MPM module is loaded immediately." + ) else: - module.warnings.append(error_msg) + module.warn(error_msg) return False else: module.fail_json(msg=error_msg) - searchstring = ' ' + module.params['identifier'] + searchstring = f" {module.params['identifier']}" return searchstring in stdout @@ -169,6 +176,7 @@ def create_apache_identifier(name): # re expressions to extract subparts of names re_workarounds = [ + ('php8', re.compile(r'^(php)[\d\.]+')), ('php', re.compile(r'^(php\d)\.')), ] @@ -180,11 +188,11 @@ def create_apache_identifier(name): if search in name: try: rematch = reexpr.search(name) - return rematch.group(1) + '_module' + return f"{rematch.group(1)}_module" except AttributeError: pass - return name + '_module' + return f"{name}_module" def _set_state(module, state): @@ -194,17 +202,15 @@ def _set_state(module, state): want_enabled = state == 'present' state_string = {'present': 'enabled', 'absent': 'disabled'}[state] a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state] - success_msg = "Module %s %s" % (name, state_string) + success_msg = f"Module {name} {state_string}" if _module_is_enabled(module) != want_enabled: if module.check_mode: - module.exit_json(changed=True, - result=success_msg, - warnings=module.warnings) + module.exit_json(changed=True, result=success_msg) a2mod_binary_path = module.get_bin_path(a2mod_binary) if a2mod_binary_path is None: - module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary)) + module.fail_json(msg=f"{a2mod_binary} not found. Perhaps this system does not use {a2mod_binary} to manage apache") a2mod_binary_cmd = [a2mod_binary_path] @@ -215,29 +221,20 @@ def _set_state(module, state): result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name]) if _module_is_enabled(module) == want_enabled: - module.exit_json(changed=True, - result=success_msg, - warnings=module.warnings) + module.exit_json(changed=True, result=success_msg) else: msg = ( - 'Failed to set module {name} to {state}:\n' - '{stdout}\n' - 'Maybe the module identifier ({identifier}) was guessed incorrectly.' + f'Failed to set module {name} to {state_string}:\n' + f'{stdout}\n' + f'Maybe the module identifier ({module.params["identifier"]}) was guessed incorrectly.' 'Consider setting the "identifier" option.' - ).format( - name=name, - state=state_string, - stdout=stdout, - identifier=module.params['identifier'] ) module.fail_json(msg=msg, rc=result, stdout=stdout, stderr=stderr) else: - module.exit_json(changed=False, - result=success_msg, - warnings=module.warnings) + module.exit_json(changed=False, result=success_msg) def main(): @@ -248,15 +245,14 @@ def main(): force=dict(type='bool', default=False), state=dict(default='present', choices=['absent', 'present']), ignore_configcheck=dict(type='bool', default=False), + warn_mpm_absent=dict(type='bool', default=True), ), supports_check_mode=True, ) - module.warnings = [] - name = module.params['name'] - if name == 'cgi' and _run_threaded(module): - module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module cgi possible.") + if name == 'cgi' and module.params['state'] == 'present' and _run_threaded(module): + module.fail_json(msg="Your MPM seems to be threaded, therefore enabling cgi module is not allowed.") if not module.params['identifier']: module.params['identifier'] = create_apache_identifier(module.params['name']) diff --git a/plugins/modules/packaging/os/apk.py b/plugins/modules/apk.py similarity index 63% rename from plugins/modules/packaging/os/apk.py rename to plugins/modules/apk.py index 74b738de27..b65326094b 100644 --- a/plugins/modules/packaging/os/apk.py +++ b/plugins/modules/apk.py @@ -1,81 +1,96 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2015, Kevin Brebanov +# Copyright (c) 2015, Kevin Brebanov # Based on pacman (Afterburn , Aaron Bull Schaefer ) # and apt (Matthew Williams ) modules. # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: apk short_description: Manages apk packages description: - - Manages I(apk) packages for Alpine Linux. + - Manages C(apk) packages for Alpine Linux. author: "Kevin Brebanov (@kbrebanov)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: available: description: - - During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them) - if the currently installed package is no longer available from any repository. + - During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead + of holding them) if the currently installed package is no longer available from any repository. type: bool - default: no + default: false name: description: - - A package name, like C(foo), or multiple packages, like C(foo, bar). + - A package name, like V(foo), or multiple packages, like V(foo,bar). + - Do not include additional whitespace when specifying multiple packages as a string. Prefer YAML lists over comma-separating + multiple package names. type: list elements: str no_cache: description: - Do not use any local cache path. type: bool - default: no + default: false version_added: 1.0.0 repository: description: - - A package repository or multiple repositories. - Unlike with the underlying apk command, this list will override the system repositories rather than supplement them. + - A package repository or multiple repositories. Unlike with the underlying apk command, this list overrides the system + repositories rather than supplement them. type: list elements: str state: description: - Indicates the desired package(s) state. - - C(present) ensures the package(s) is/are present. C(installed) can be used as an alias. - - C(absent) ensures the package(s) is/are absent. C(removed) can be used as an alias. - - C(latest) ensures the package(s) is/are present and the latest version(s). + - V(present) ensures the package(s) is/are present. V(installed) can be used as an alias. + - V(absent) ensures the package(s) is/are absent. V(removed) can be used as an alias. + - V(latest) ensures the package(s) is/are present and the latest version(s). default: present - choices: [ "present", "absent", "latest", "installed", "removed" ] + choices: ["present", "absent", "latest", "installed", "removed"] type: str update_cache: description: - - Update repository indexes. Can be run with other steps or on it's own. + - Update repository indexes. Can be run with other steps or on its own. type: bool - default: no + default: false upgrade: description: - Upgrade all installed packages to their latest version. type: bool - default: no + default: false + world: + description: + - Use a custom world file when checking for explicitly installed packages. The file is used only when a value is provided + for O(name), and O(state) is set to V(present) or V(latest). + type: str + default: /etc/apk/world + version_added: 5.4.0 notes: - - '"name" and "upgrade" are mutually exclusive.' - - When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option. -''' + - O(name) and O(upgrade) are mutually exclusive. + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Update repositories and install foo package community.general.apk: name: foo - update_cache: yes + update_cache: true - name: Update repositories and install foo and bar packages community.general.apk: name: foo,bar - update_cache: yes + update_cache: true - name: Remove foo package community.general.apk: @@ -101,48 +116,54 @@ EXAMPLES = ''' community.general.apk: name: foo state: latest - update_cache: yes + update_cache: true - name: Update repositories and update packages foo and bar to latest versions community.general.apk: name: foo,bar state: latest - update_cache: yes + update_cache: true - name: Update all installed packages to the latest versions community.general.apk: - upgrade: yes + upgrade: true - name: Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available community.general.apk: - available: yes - upgrade: yes + available: true + upgrade: true - name: Update repositories as a separate step community.general.apk: - update_cache: yes + update_cache: true - name: Install package from a specific repository community.general.apk: name: foo state: latest - update_cache: yes + update_cache: true repository: http://dl-3.alpinelinux.org/alpine/edge/main - name: Install package without using cache community.general.apk: name: foo state: latest - no_cache: yes -''' + no_cache: true -RETURN = ''' +- name: Install package checking a custom world + community.general.apk: + name: foo + state: latest + world: /etc/apk/world.custom +""" + +RETURN = r""" packages: - description: a list of packages that have been changed - returned: when packages have changed - type: list - sample: ['package', 'other-package'] -''' + description: A list of packages that have been changed. + returned: when packages have changed + type: list + sample: ["package", "other-package"] +""" import re # Import module snippets. @@ -161,7 +182,7 @@ def parse_for_packages(stdout): def update_package_db(module, exit): - cmd = "%s update" % (APK_PATH) + cmd = APK_PATH + ["update"] rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr) @@ -171,11 +192,11 @@ def update_package_db(module, exit): return True -def query_toplevel(module, name): - # /etc/apk/world contains a list of top-level packages separated by ' ' or \n +def query_toplevel(module, name, world): + # world contains a list of top-level packages separated by ' ' or \n # packages may contain repository (@) or version (=<>~) separator characters or start with negation ! - regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$') - with open('/etc/apk/world') as f: + regex = re.compile(rf"^{re.escape(name)}([@=<>~].+)?$") + with open(world) as f: content = f.read().split() for p in content: if regex.search(p): @@ -184,7 +205,7 @@ def query_toplevel(module, name): def query_package(module, name): - cmd = "%s -v info --installed %s" % (APK_PATH, name) + cmd = APK_PATH + ["-v", "info", "--installed", name] rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc == 0: return True @@ -193,9 +214,9 @@ def query_package(module, name): def query_latest(module, name): - cmd = "%s version %s" % (APK_PATH, name) + cmd = APK_PATH + ["version", name] rc, stdout, stderr = module.run_command(cmd, check_rc=False) - search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name)) + search_pattern = rf"({re.escape(name)})-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" match = re.search(search_pattern, stdout) if match and match.group(2) == "<": return False @@ -203,16 +224,16 @@ def query_latest(module, name): def query_virtual(module, name): - cmd = "%s -v info --description %s" % (APK_PATH, name) + cmd = APK_PATH + ["-v", "info", "--description", name] rc, stdout, stderr = module.run_command(cmd, check_rc=False) - search_pattern = r"^%s: virtual meta package" % (re.escape(name)) + search_pattern = rf"^{re.escape(name)}: virtual meta package" if re.search(search_pattern, stdout): return True return False def get_dependencies(module, name): - cmd = "%s -v info --depends %s" % (APK_PATH, name) + cmd = APK_PATH + ["-v", "info", "--depends", name] rc, stdout, stderr = module.run_command(cmd, check_rc=False) dependencies = stdout.split() if len(dependencies) > 1: @@ -223,11 +244,11 @@ def get_dependencies(module, name): def upgrade_packages(module, available): if module.check_mode: - cmd = "%s upgrade --simulate" % (APK_PATH) + cmd = APK_PATH + ["upgrade", "--simulate"] else: - cmd = "%s upgrade" % (APK_PATH) + cmd = APK_PATH + ["upgrade"] if available: - cmd = "%s --available" % cmd + cmd.append("--available") rc, stdout, stderr = module.run_command(cmd, check_rc=False) packagelist = parse_for_packages(stdout) if rc != 0: @@ -237,7 +258,7 @@ def upgrade_packages(module, available): module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist) -def install_packages(module, names, state): +def install_packages(module, names, state, world): upgrade = False to_install = [] to_upgrade = [] @@ -250,7 +271,7 @@ def install_packages(module, names, state): if state == 'latest' and not query_latest(module, dependency): to_upgrade.append(dependency) else: - if not query_toplevel(module, name): + if not query_toplevel(module, name, world): to_install.append(name) elif state == 'latest' and not query_latest(module, name): to_upgrade.append(name) @@ -258,22 +279,22 @@ def install_packages(module, names, state): upgrade = True if not to_install and not upgrade: module.exit_json(changed=False, msg="package(s) already installed") - packages = " ".join(to_install + to_upgrade) + packages = to_install + to_upgrade if upgrade: if module.check_mode: - cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages) + cmd = APK_PATH + ["add", "--upgrade", "--simulate"] + packages else: - cmd = "%s add --upgrade %s" % (APK_PATH, packages) + cmd = APK_PATH + ["add", "--upgrade"] + packages else: if module.check_mode: - cmd = "%s add --simulate %s" % (APK_PATH, packages) + cmd = APK_PATH + ["add", "--simulate"] + packages else: - cmd = "%s add %s" % (APK_PATH, packages) + cmd = APK_PATH + ["add"] + packages rc, stdout, stderr = module.run_command(cmd, check_rc=False) packagelist = parse_for_packages(stdout) if rc != 0: - module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist) - module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist) + module.fail_json(msg=f"failed to install {packages}", stdout=stdout, stderr=stderr, packages=packagelist) + module.exit_json(changed=True, msg=f"installed {packages} package(s)", stdout=stdout, stderr=stderr, packages=packagelist) def remove_packages(module, names): @@ -283,11 +304,11 @@ def remove_packages(module, names): installed.append(name) if not installed: module.exit_json(changed=False, msg="package(s) already removed") - names = " ".join(installed) + names = installed if module.check_mode: - cmd = "%s del --purge --simulate %s" % (APK_PATH, names) + cmd = APK_PATH + ["del", "--purge", "--simulate"] + names else: - cmd = "%s del --purge %s" % (APK_PATH, names) + cmd = APK_PATH + ["del", "--purge"] + names rc, stdout, stderr = module.run_command(cmd, check_rc=False) packagelist = parse_for_packages(stdout) # Check to see if packages are still present because of dependencies @@ -296,8 +317,8 @@ def remove_packages(module, names): rc = 1 break if rc != 0: - module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist) - module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist) + module.fail_json(msg=f"failed to remove {names} package(s)", stdout=stdout, stderr=stderr, packages=packagelist) + module.exit_json(changed=True, msg=f"removed {names} package(s)", stdout=stdout, stderr=stderr, packages=packagelist) # ========================================== # Main control flow. @@ -313,6 +334,7 @@ def main(): update_cache=dict(default=False, type='bool'), upgrade=dict(default=False, type='bool'), available=dict(default=False, type='bool'), + world=dict(default='/etc/apk/world', type='str'), ), required_one_of=[['name', 'update_cache', 'upgrade']], mutually_exclusive=[['name', 'upgrade']], @@ -323,17 +345,20 @@ def main(): module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') global APK_PATH - APK_PATH = module.get_bin_path('apk', required=True) + APK_PATH = [module.get_bin_path('apk', required=True)] p = module.params + if p['name'] and any(not name.strip() for name in p['name']): + module.fail_json(msg="Package name(s) cannot be empty or whitespace-only") + if p['no_cache']: - APK_PATH = "%s --no-cache" % (APK_PATH, ) + APK_PATH.append("--no-cache") # add repositories to the APK_PATH if p['repository']: for r in p['repository']: - APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r) + APK_PATH.extend(["--repository", r, "--repositories-file", "/dev/null"]) # normalize the state parameter if p['state'] in ['present', 'installed']: @@ -348,7 +373,7 @@ def main(): upgrade_packages(module, p['available']) if p['state'] in ['present', 'latest']: - install_packages(module, p['name'], p['state']) + install_packages(module, p['name'], p['state'], p['world']) elif p['state'] == 'absent': remove_packages(module, p['name']) diff --git a/plugins/modules/packaging/os/apt_repo.py b/plugins/modules/apt_repo.py similarity index 72% rename from plugins/modules/packaging/os/apt_repo.py rename to plugins/modules/apt_repo.py index d196e03be1..f90631195d 100644 --- a/plugins/modules/packaging/os/apt_repo.py +++ b/plugins/modules/apt_repo.py @@ -1,23 +1,28 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Mikhail Gordeev +# Copyright (c) 2018, Mikhail Gordeev -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: apt_repo -short_description: Manage APT repositories via apt-repo +short_description: Manage APT repositories using C(apt-repo) description: - - Manages APT repositories using apt-repo tool. - - See U(https://www.altlinux.org/Apt-repo) for details about apt-repo + - Manages APT repositories using C(apt-repo) tool. + - See U(https://www.altlinux.org/Apt-repo) for details about C(apt-repo). notes: - - This module works on ALT based distros. - - Does NOT support checkmode, due to a limitation in apt-repo tool. + - This module works on ALT based distros. + - Does NOT support checkmode, due to a limitation in C(apt-repo) tool. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: repo: description: @@ -27,25 +32,25 @@ options: state: description: - Indicates the desired repository state. - choices: [ absent, present ] + choices: [absent, present] default: present type: str remove_others: description: - - Remove other then added repositories - - Used if I(state=present) + - Remove other then added repositories. + - Used if O(state=present). type: bool - default: no + default: false update: description: - Update the package database after changing repositories. type: bool - default: no + default: false author: -- Mikhail Gordeev (@obirvalger) -''' + - Mikhail Gordeev (@obirvalger) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Remove all repositories community.general.apt_repo: repo: all @@ -55,16 +60,16 @@ EXAMPLES = ''' community.general.apt_repo: repo: Sisysphus state: present - remove_others: yes + remove_others: true - name: Add local repository `/space/ALT/Sisyphus` and update package cache community.general.apt_repo: repo: copy:///space/ALT/Sisyphus state: present - update: yes -''' + update: true +""" -RETURN = ''' # ''' +RETURN = """ # """ import os @@ -80,7 +85,7 @@ def apt_repo(module, *args): rc, out, err = module.run_command([APT_REPO_PATH] + args) if rc != 0: - module.fail_json(msg="'%s' failed: %s" % (' '.join(['apt-repo'] + args), err)) + module.fail_json(msg=f"'{' '.join(['apt-repo'] + args)}' failed: {err}") return out diff --git a/plugins/modules/apt_rpm.py b/plugins/modules/apt_rpm.py new file mode 100644 index 0000000000..4786ee6e84 --- /dev/null +++ b/plugins/modules/apt_rpm.py @@ -0,0 +1,345 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Evgenii Terechkov +# Written by Evgenii Terechkov +# Based on urpmi module written by Philippe Makowski + +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: apt_rpm +short_description: APT-RPM package manager +description: + - Manages packages with C(apt-rpm). Both low-level (C(rpm)) and high-level (C(apt-get)) package manager binaries required. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + package: + description: + - List of packages to install, upgrade, or remove. + - Since community.general 8.0.0, may include paths to local C(.rpm) files if O(state=installed) or O(state=present), + requires C(rpm) Python module. + aliases: [name, pkg] + type: list + elements: str + state: + description: + - Indicates the desired package state. + - The states V(latest) and V(present_not_latest) have been added in community.general 8.6.0. + - Please note before community.general 11.0.0, V(present) and V(installed) were equivalent to V(latest). This changed + in community.general 11.0.0. Now they are equivalent to V(present_not_latest). + choices: + - absent + - present + - present_not_latest + - installed + - removed + - latest + default: present + type: str + update_cache: + description: + - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as + a separate step. + - Default is not to update the cache. + type: bool + default: false + clean: + description: + - Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything + but the lock file from C(/var/cache/apt/archives/) and C(/var/cache/apt/archives/partial/). + - Can be run as part of the package installation (clean runs before install) or as a separate step. + type: bool + default: false + version_added: 6.5.0 + dist_upgrade: + description: + - If true performs an C(apt-get dist-upgrade) to upgrade system. + type: bool + default: false + version_added: 6.5.0 + update_kernel: + description: + - If true performs an C(update-kernel) to upgrade kernel packages. + type: bool + default: false + version_added: 6.5.0 +requirements: + - C(rpm) Python package (rpm bindings), optional. Required if O(package) option includes local files. +author: + - Evgenii Terechkov (@evgkrsk) +""" + +EXAMPLES = r""" +- name: Install package foo + community.general.apt_rpm: + pkg: foo + state: present + +- name: Install packages foo and bar + community.general.apt_rpm: + pkg: + - foo + - bar + state: present + +- name: Remove package foo + community.general.apt_rpm: + pkg: foo + state: absent + +- name: Remove packages foo and bar + community.general.apt_rpm: + pkg: foo,bar + state: absent + +# bar will be the updated if a newer version exists +- name: Update the package database and install bar + community.general.apt_rpm: + name: bar + state: present + update_cache: true + +- name: Run the equivalent of "apt-get clean" as a separate step + community.general.apt_rpm: + clean: true + +- name: Perform cache update and complete system upgrade (includes kernel) + community.general.apt_rpm: + update_cache: true + dist_upgrade: true + update_kernel: true +""" + +import os +import re +import traceback + +from ansible.module_utils.basic import ( + AnsibleModule, + missing_required_lib, +) +from ansible.module_utils.common.text.converters import to_native + +try: + import rpm +except ImportError: + HAS_RPM_PYTHON = False + RPM_PYTHON_IMPORT_ERROR = traceback.format_exc() +else: + HAS_RPM_PYTHON = True + RPM_PYTHON_IMPORT_ERROR = None + +APT_CACHE = "/usr/bin/apt-cache" +APT_PATH = "/usr/bin/apt-get" +RPM_PATH = "/usr/bin/rpm" +APT_GET_ZERO = "\n0 upgraded, 0 newly installed" +UPDATE_KERNEL_ZERO = "\nTry to install new kernel " + + +def local_rpm_package_name(path): + """return package name of a local rpm passed in. + Inspired by ansible.builtin.yum""" + + ts = rpm.TransactionSet() + ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) + fd = os.open(path, os.O_RDONLY) + try: + header = ts.hdrFromFdno(fd) + except rpm.error as e: + return None + finally: + os.close(fd) + + return to_native(header[rpm.RPMTAG_NAME]) + + +def query_package(module, name): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + rc, out, err = module.run_command([RPM_PATH, "-q", name]) + if rc == 0: + return True + else: + return False + + +def check_package_version(module, name): + # compare installed and candidate version + # if newest version already installed return True + # otherwise return False + + rc, out, err = module.run_command([APT_CACHE, "policy", name], environ_update={"LANG": "C"}) + installed = re.split("\n |: ", out)[2] + candidate = re.split("\n |: ", out)[4] + if installed >= candidate: + return True + return False + + +def query_package_provides(module, name, allow_upgrade=False): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + if name.endswith('.rpm'): + # Likely a local RPM file + if not HAS_RPM_PYTHON: + module.fail_json( + msg=missing_required_lib('rpm'), + exception=RPM_PYTHON_IMPORT_ERROR, + ) + + name = local_rpm_package_name(name) + + rc, out, err = module.run_command([RPM_PATH, "-q", "--provides", name]) + if rc == 0: + if not allow_upgrade: + return True + if check_package_version(module, name): + return True + return False + + +def update_package_db(module): + rc, update_out, err = module.run_command([APT_PATH, "update"], check_rc=True, environ_update={"LANG": "C"}) + return (False, update_out) + + +def dir_size(module, path): + total_size = 0 + for path, dirs, files in os.walk(path): + for f in files: + total_size += os.path.getsize(os.path.join(path, f)) + return total_size + + +def clean(module): + t = dir_size(module, "/var/cache/apt/archives") + rc, out, err = module.run_command([APT_PATH, "clean"], check_rc=True) + return (t != dir_size(module, "/var/cache/apt/archives"), out) + + +def dist_upgrade(module): + rc, out, err = module.run_command([APT_PATH, "-y", "dist-upgrade"], check_rc=True, environ_update={"LANG": "C"}) + return (APT_GET_ZERO not in out, out) + + +def update_kernel(module): + rc, out, err = module.run_command(["/usr/sbin/update-kernel", "-y"], check_rc=True, environ_update={"LANG": "C"}) + return (UPDATE_KERNEL_ZERO not in out, out) + + +def remove_packages(module, packages): + + if packages is None: + return (False, "Empty package list") + + remove_c = 0 + # Using a for loop in case of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, package): + continue + + rc, out, err = module.run_command([APT_PATH, "-y", "remove", package], environ_update={"LANG": "C"}) + + if rc != 0: + module.fail_json(msg=f"failed to remove {package}: {err}") + + remove_c += 1 + + if remove_c > 0: + return (True, f"removed {remove_c} package(s)") + + return (False, "package(s) already absent") + + +def install_packages(module, pkgspec, allow_upgrade=False): + + if pkgspec is None: + return (False, "Empty package list") + + packages = [] + for package in pkgspec: + if not query_package_provides(module, package, allow_upgrade=allow_upgrade): + packages.append(package) + + if packages: + command = [APT_PATH, "-y", "install"] + packages + rc, out, err = module.run_command(command, environ_update={"LANG": "C"}) + + installed = True + for package in pkgspec: + if not query_package_provides(module, package, allow_upgrade=False): + installed = False + + # apt-rpm always have 0 for exit code if --force is used + if rc or not installed: + module.fail_json(msg=f"'{' '.join(command)}' failed: {err}") + else: + return (True, f"{packages} present(s)") + else: + return (False, "Nothing to install") + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed', 'present_not_latest', 'latest']), + update_cache=dict(type='bool', default=False), + clean=dict(type='bool', default=False), + dist_upgrade=dict(type='bool', default=False), + update_kernel=dict(type='bool', default=False), + package=dict(type='list', elements='str', aliases=['name', 'pkg']), + ), + ) + + if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH): + module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm") + + p = module.params + + modified = False + output = "" + + if p['update_cache']: + update_package_db(module) + + if p['clean']: + (m, out) = clean(module) + modified = modified or m + + if p['dist_upgrade']: + (m, out) = dist_upgrade(module) + modified = modified or m + output += out + + if p['update_kernel']: + (m, out) = update_kernel(module) + modified = modified or m + output += out + + packages = p['package'] + if p['state'] in ['installed', 'present', 'present_not_latest', 'latest']: + (m, out) = install_packages(module, packages, allow_upgrade=p['state'] == 'latest') + modified = modified or m + output += out + + if p['state'] in ['absent', 'removed']: + (m, out) = remove_packages(module, packages) + modified = modified or m + output += out + + # Return total modification status and output of all commands + module.exit_json(changed=modified, msg=output) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/files/archive.py b/plugins/modules/archive.py similarity index 77% rename from plugins/modules/files/archive.py rename to plugins/modules/archive.py index 7b13eb6c4d..710a1c8fa6 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/archive.py @@ -1,23 +1,28 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Ben Doherty +# Copyright (c) 2016, Ben Doherty # Sponsored by Oomph, Inc. http://www.oomphinc.com -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: archive short_description: Creates a compressed archive of one or more files or trees -extends_documentation_fragment: files +extends_documentation_fragment: + - files + - community.general.attributes description: - - Creates or extends an archive. - - The source and archive are on the remote host, and the archive I(is not) copied to the local host. - - Source files can be deleted after archival by specifying I(remove=True). + - Creates or extends an archive. + - The source and archive are on the target host, and the archive I(is not) copied to the controller host. + - Source files can be deleted after archival by specifying O(remove=True). +attributes: + check_mode: + support: full + diff_mode: + support: none options: path: description: @@ -28,27 +33,28 @@ options: format: description: - The type of compression to use. - - Support for xz was added in Ansible 2.5. type: str - choices: [ bz2, gz, tar, xz, zip ] + choices: [bz2, gz, tar, xz, zip] default: gz dest: description: - The file name of the destination archive. The parent directory must exists on the remote host. - - This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list. - - If the destination archive already exists, it will be truncated and overwritten. + - This is required when O(path) refers to multiple files by either specifying a glob, a directory or multiple paths + in a list. + - If the destination archive already exists, it is truncated and overwritten. type: path exclude_path: description: - - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from I(path) list and glob expansion. - - Use I(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the I(path) list. + - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from O(path) list and glob + expansion. + - Use O(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the O(path) list. type: list elements: path default: [] exclusion_patterns: description: - Glob style patterns to exclude files or directories from the resulting archive. - - This differs from I(exclude_path) which applies only to the source paths from I(path). + - This differs from O(exclude_path) which applies only to the source paths from O(path). type: list elements: path version_added: 3.2.0 @@ -63,18 +69,18 @@ options: description: - Remove any added source files and trees after adding to archive. type: bool - default: no + default: false notes: - - Requires tarfile, zipfile, gzip and bzip2 packages on target host. - - Requires lzma or backports.lzma if using xz format. - - Can produce I(gzip), I(bzip2), I(lzma) and I(zip) compressed files or archives. + - Can produce C(gzip), C(bzip2), C(lzma), and C(zip) compressed files or archives. + - This module uses C(tarfile), C(zipfile), C(gzip), C(bz2), and C(lzma) packages on the target host to create archives. These are + part of the Python standard library. seealso: -- module: ansible.builtin.unarchive + - module: ansible.builtin.unarchive author: -- Ben Doherty (@bendoh) -''' + - Ben Doherty (@bendoh) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Compress directory /path/to/foo/ into /path/to/foo.tgz community.general.archive: path: /path/to/foo @@ -83,7 +89,7 @@ EXAMPLES = r''' - name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it community.general.archive: path: /path/to/foo - remove: yes + remove: true - name: Create a zip archive of /path/to/foo community.general.archive: @@ -93,28 +99,28 @@ EXAMPLES = r''' - name: Create a bz2 archive of multiple files, rooted at /path community.general.archive: path: - - /path/to/foo - - /path/wong/foo + - /path/to/foo + - /path/wong/foo dest: /path/file.tar.bz2 format: bz2 - name: Create a bz2 archive of a globbed path, while excluding specific dirnames community.general.archive: path: - - /path/to/foo/* + - /path/to/foo/* dest: /path/file.tar.bz2 exclude_path: - - /path/to/foo/bar - - /path/to/foo/baz + - /path/to/foo/bar + - /path/to/foo/baz format: bz2 - name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames community.general.archive: path: - - /path/to/foo/* + - /path/to/foo/* dest: /path/file.tar.bz2 exclude_path: - - /path/to/foo/ba* + - /path/to/foo/ba* format: bz2 - name: Use gzip to compress a single archive (i.e don't archive it first with tar) @@ -129,84 +135,65 @@ EXAMPLES = r''' dest: /path/file.tar.gz format: gz force_archive: true -''' +""" -RETURN = r''' +RETURN = r""" state: - description: - The state of the input C(path). - type: str - returned: always + description: The state of the input O(path). + type: str + returned: always dest_state: - description: - - The state of the I(dest) file. - - C(absent) when the file does not exist. - - C(archive) when the file is an archive. - - C(compress) when the file is compressed, but not an archive. - - C(incomplete) when the file is an archive, but some files under I(path) were not found. - type: str - returned: success - version_added: 3.4.0 + description: + - The state of the O(dest) file. + - V(absent) when the file does not exist. + - V(archive) when the file is an archive. + - V(compress) when the file is compressed, but not an archive. + - V(incomplete) when the file is an archive, but some files under O(path) were not found. + type: str + returned: success + version_added: 3.4.0 missing: - description: Any files that were missing from the source. - type: list - returned: success + description: Any files that were missing from the source. + type: list + returned: success archived: - description: Any files that were compressed or added to the archive. - type: list - returned: success + description: Any files that were compressed or added to the archive. + type: list + returned: success arcroot: - description: The archive root. - type: str - returned: always + description: The archive root. + type: str + returned: always expanded_paths: - description: The list of matching paths from paths argument. - type: list - returned: always + description: The list of matching paths from paths argument. + type: list + returned: always expanded_exclude_paths: - description: The list of matching exclude paths from the exclude_path argument. - type: list - returned: always -''' + description: The list of matching exclude paths from the exclude_path argument. + type: list + returned: always +""" import abc import bz2 import glob import gzip import io +import lzma import os import re import shutil import tarfile import zipfile from fnmatch import fnmatch -from sys import version_info from traceback import format_exc +from zipfile import BadZipFile from zlib import crc32 -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_bytes, to_native -from ansible.module_utils import six -LZMA_IMP_ERR = None -if six.PY3: - try: - import lzma - HAS_LZMA = True - except ImportError: - LZMA_IMP_ERR = format_exc() - HAS_LZMA = False -else: - try: - from backports import lzma - HAS_LZMA = True - except ImportError: - LZMA_IMP_ERR = format_exc() - HAS_LZMA = False - -PY27 = version_info[0:2] >= (2, 7) - STATE_ABSENT = 'absent' STATE_ARCHIVED = 'archive' STATE_COMPRESSED = 'compress' @@ -214,7 +201,7 @@ STATE_INCOMPLETE = 'incomplete' def common_path(paths): - empty = b'' if paths and isinstance(paths[0], six.binary_type) else '' + empty = b'' if paths and isinstance(paths[0], bytes) else '' return os.path.join( os.path.dirname(os.path.commonprefix([os.path.join(os.path.dirname(p), empty) for p in paths])), empty @@ -259,8 +246,7 @@ def _to_native_ascii(s): return to_native(s, errors='surrogate_or_strict', encoding='ascii') -@six.add_metaclass(abc.ABCMeta) -class Archive(object): +class Archive(object, metaclass=abc.ABCMeta): def __init__(self, module): self.module = module @@ -319,7 +305,7 @@ class Archive(object): if self.contains(_to_native(archive_name)): self.successes.append(path) except Exception as e: - self.errors.append('%s: %s' % (_to_native_ascii(path), _to_native(e))) + self.errors.append(f'{_to_native_ascii(path)}: {e}') def add_single_target(self, path): if self.format in ('zip', 'tar'): @@ -339,7 +325,7 @@ class Archive(object): self.module.fail_json( path=_to_native(path), dest=_to_native(self.destination), - msg='Unable to write to compressed file: %s' % _to_native(e), exception=format_exc() + msg=f'Unable to write to compressed file: {e}', exception=format_exc() ) def add_targets(self): @@ -361,18 +347,16 @@ class Archive(object): if self.format in ('zip', 'tar'): archive_format = self.format else: - archive_format = 'tar.' + self.format + archive_format = f"tar.{self.format}" self.module.fail_json( - msg='Error when writing %s archive at %s: %s' % ( - archive_format, _to_native(self.destination), _to_native(e) - ), + msg=f'Error when writing {archive_format} archive at {_to_native(self.destination)}: {e}', exception=format_exc() ) self.close() if self.errors: self.module.fail_json( - msg='Errors when writing archive at %s: %s' % (_to_native(self.destination), '; '.join(self.errors)) + msg=f"Errors when writing archive at {_to_native(self.destination)}: {'; '.join(self.errors)}" ) def is_different_from_original(self): @@ -414,7 +398,7 @@ class Archive(object): except OSError as e: self.module.fail_json( path=_to_native(path), - msg='Unable to remove source file: %s' % _to_native(e), exception=format_exc() + msg=f'Unable to remove source file: {e}', exception=format_exc() ) def remove_targets(self): @@ -473,7 +457,7 @@ class Archive(object): elif self.format == 'xz': f = lzma.LZMAFile(path, mode) else: - self.module.fail_json(msg="%s is not a valid format" % self.format) + self.module.fail_json(msg=f"{self.format} is not a valid format") return f @@ -524,7 +508,7 @@ class ZipArchive(Archive): archive = zipfile.ZipFile(_to_native_ascii(path), 'r') checksums = set((info.filename, info.CRC) for info in archive.infolist()) archive.close() - except zipfile.BadZipfile: + except BadZipFile: checksums = set() return checksums @@ -550,7 +534,7 @@ class TarArchive(Archive): def open(self): if self.format in ('gz', 'bz2'): - self.file = tarfile.open(_to_native_ascii(self.destination), 'w|' + self.format) + self.file = tarfile.open(_to_native_ascii(self.destination), f"w|{self.format}") # python3 tarfile module allows xz format but for python2 we have to create the tarfile # in memory and then compress it with lzma. elif self.format == 'xz': @@ -559,21 +543,17 @@ class TarArchive(Archive): elif self.format == 'tar': self.file = tarfile.open(_to_native_ascii(self.destination), 'w') else: - self.module.fail_json(msg="%s is not a valid archive format" % self.format) + self.module.fail_json(msg=f"{self.format} is not a valid archive format") def _add(self, path, archive_name): - def py27_filter(tarinfo): + def filter(tarinfo): return None if matches_exclusion_patterns(tarinfo.name, self.exclusion_patterns) else tarinfo - def py26_filter(path): - return matches_exclusion_patterns(path, self.exclusion_patterns) - - if PY27: - self.file.add(path, archive_name, recursive=False, filter=py27_filter) - else: - self.file.add(path, archive_name, recursive=False, exclude=py26_filter) + self.file.add(path, archive_name, recursive=False, filter=filter) def _get_checksums(self, path): + LZMAError = lzma.LZMAError + try: if self.format == 'xz': with lzma.open(_to_native_ascii(path), 'r') as f: @@ -581,15 +561,21 @@ class TarArchive(Archive): checksums = set((info.name, info.chksum) for info in archive.getmembers()) archive.close() else: - archive = tarfile.open(_to_native_ascii(path), 'r|' + self.format) + archive = tarfile.open(_to_native_ascii(path), f"r|{self.format}") checksums = set((info.name, info.chksum) for info in archive.getmembers()) archive.close() - except (lzma.LZMAError, tarfile.ReadError, tarfile.CompressionError): + except (LZMAError, tarfile.ReadError, tarfile.CompressionError): try: # The python implementations of gzip, bz2, and lzma do not support restoring compressed files # to their original names so only file checksum is returned f = self._open_compressed_file(_to_native_ascii(path), 'r') - checksums = set([(b'', crc32(f.read()))]) + checksum = 0 + while True: + chunk = f.read(16 * 1024 * 1024) + if not chunk: + break + checksum = crc32(chunk, checksum) + checksums = set([(b'', checksum)]) f.close() except Exception: checksums = set() @@ -618,11 +604,6 @@ def main(): supports_check_mode=True, ) - if not HAS_LZMA and module.params['format'] == 'xz': - module.fail_json( - msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"), exception=LZMA_IMP_ERR - ) - check_mode = module.check_mode archive = get_archive(module) diff --git a/plugins/modules/cloud/atomic/atomic_container.py b/plugins/modules/atomic_container.py similarity index 71% rename from plugins/modules/cloud/atomic/atomic_container.py rename to plugins/modules/atomic_container.py index a6be44317b..9051705f12 100644 --- a/plugins/modules/cloud/atomic/atomic_container.py +++ b/plugins/modules/atomic_container.py @@ -1,69 +1,77 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: atomic_container short_description: Manage the containers on the atomic host platform description: - - Manage the containers on the atomic host platform. - - Allows to manage the lifecycle of a container on the atomic host platform. + - Manage the containers on the atomic host platform. + - Allows to manage the lifecycle of a container on the atomic host platform. +deprecated: + removed_in: 13.0.0 + why: Project Atomic was sunset by the end of 2019. + alternative: There is none. author: "Giuseppe Scrivano (@giuseppe)" -notes: - - Host should support C(atomic) command requirements: - - atomic - - "python >= 2.6" + - atomic +notes: + - According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - backend: - description: - - Define the backend to use for the container. - required: True - choices: ["docker", "ostree"] - type: str - name: - description: - - Name of the container. - required: True - type: str - image: - description: - - The image to use to install the container. - required: True - type: str - rootfs: - description: - - Define the rootfs of the image. - type: str - state: - description: - - State of the container. - choices: ["absent", "latest", "present", "rollback"] - default: "latest" - type: str - mode: - description: - - Define if it is an user or a system container. - choices: ["user", "system"] - type: str - values: - description: - - Values for the installation of the container. - - This option is permitted only with mode 'user' or 'system'. - - The values specified here will be used at installation time as --set arguments for atomic install. - type: list - elements: str -''' - -EXAMPLES = r''' + backend: + description: + - Define the backend to use for the container. + required: true + choices: ["docker", "ostree"] + type: str + name: + description: + - Name of the container. + required: true + type: str + image: + description: + - The image to use to install the container. + required: true + type: str + rootfs: + description: + - Define the rootfs of the image. + type: str + state: + description: + - State of the container. + choices: ["absent", "latest", "present", "rollback"] + default: "latest" + type: str + mode: + description: + - Define if it is an user or a system container. + choices: ["user", "system"] + type: str + values: + description: + - Values for the installation of the container. + - This option is permitted only with mode 'user' or 'system'. + - The values specified here will be used at installation time as --set arguments for atomic install. + type: list + elements: str + default: [] +""" +EXAMPLES = r""" - name: Install the etcd system container community.general.atomic_container: name: etcd @@ -72,7 +80,7 @@ EXAMPLES = r''' state: latest mode: system values: - - ETCD_NAME=etcd.server + - ETCD_NAME=etcd.server - name: Uninstall the etcd system container community.general.atomic_container: @@ -81,15 +89,15 @@ EXAMPLES = r''' backend: ostree state: absent mode: system -''' +""" -RETURN = r''' +RETURN = r""" msg: - description: The command standard output - returned: always - type: str - sample: [u'Using default tag: latest ...'] -''' + description: The command standard output. + returned: always + type: str + sample: 'Using default tag: latest ...' +""" # import module snippets import traceback diff --git a/plugins/modules/cloud/atomic/atomic_host.py b/plugins/modules/atomic_host.py similarity index 59% rename from plugins/modules/cloud/atomic/atomic_host.py rename to plugins/modules/atomic_host.py index 85b00f917a..470e65c919 100644 --- a/plugins/modules/cloud/atomic/atomic_host.py +++ b/plugins/modules/atomic_host.py @@ -1,37 +1,46 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: atomic_host short_description: Manage the atomic host platform description: - - Manage the atomic host platform. - - Rebooting of Atomic host platform should be done outside this module. + - Manage the atomic host platform. + - Rebooting of Atomic host platform should be done outside this module. +deprecated: + removed_in: 13.0.0 + why: Project Atomic was sunset by the end of 2019. + alternative: There is none. author: -- Saravanan KR (@krsacme) + - Saravanan KR (@krsacme) notes: - - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file). + - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file). + - According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS. requirements: - atomic - - python >= 2.6 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - revision: - description: - - The version number of the atomic host to be deployed. - - Providing C(latest) will upgrade to the latest available version. - default: 'latest' - aliases: [ version ] - type: str -''' + revision: + description: + - The version number of the atomic host to be deployed. + - Providing V(latest) will upgrade to the latest available version. + default: 'latest' + aliases: [version] + type: str +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Upgrade the atomic host platform to the latest version (atomic host upgrade) community.general.atomic_host: revision: latest @@ -39,15 +48,15 @@ EXAMPLES = r''' - name: Deploy a specific revision as the atomic host (atomic host deploy 23.130) community.general.atomic_host: revision: 23.130 -''' +""" -RETURN = r''' +RETURN = r""" msg: - description: The command standard output - returned: always - type: str - sample: 'Already on latest' -''' + description: The command standard output. + returned: always + type: str + sample: 'Already on latest' +""" import os import traceback diff --git a/plugins/modules/cloud/atomic/atomic_image.py b/plugins/modules/atomic_image.py similarity index 71% rename from plugins/modules/cloud/atomic/atomic_image.py rename to plugins/modules/atomic_image.py index 350ad4c2ae..0c3025b75f 100644 --- a/plugins/modules/cloud/atomic/atomic_image.py +++ b/plugins/modules/atomic_image.py @@ -1,52 +1,61 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: atomic_image short_description: Manage the container images on the atomic host platform description: - - Manage the container images on the atomic host platform. - - Allows to execute the commands specified by the RUN label in the container image when present. + - Manage the container images on the atomic host platform. + - Allows to execute the commands specified by the RUN label in the container image when present. +deprecated: + removed_in: 13.0.0 + why: Project Atomic was sunset by the end of 2019. + alternative: There is none. author: -- Saravanan KR (@krsacme) + - Saravanan KR (@krsacme) notes: - - Host should support C(atomic) command. + - According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS. requirements: - atomic - - python >= 2.6 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - backend: - description: - - Define the backend where the image is pulled. - choices: [ 'docker', 'ostree' ] - type: str - name: - description: - - Name of the container image. - required: True - type: str - state: - description: - - The state of the container image. - - The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running. - choices: [ 'absent', 'latest', 'present' ] - default: 'latest' - type: str - started: - description: - - Start or Stop the container. - type: bool - default: 'yes' -''' + backend: + description: + - Define the backend where the image is pulled. + choices: ['docker', 'ostree'] + type: str + name: + description: + - Name of the container image. + required: true + type: str + state: + description: + - The state of the container image. + - The state V(latest) will ensure container image is upgraded to the latest version and forcefully restart container, + if running. + choices: ['absent', 'latest', 'present'] + default: 'latest' + type: str + started: + description: + - Start or stop the container. + type: bool + default: true +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog) community.general.atomic_image: name: rhel7/rsyslog @@ -57,15 +66,15 @@ EXAMPLES = r''' name: busybox state: latest backend: ostree -''' +""" -RETURN = r''' +RETURN = r""" msg: - description: The command standard output - returned: always - type: str - sample: [u'Using default tag: latest ...'] -''' + description: The command standard output. + returned: always + type: str + sample: 'Using default tag: latest ...' +""" import traceback from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/system/awall.py b/plugins/modules/awall.py similarity index 65% rename from plugins/modules/system/awall.py rename to plugins/modules/awall.py index 260c7ae4d0..6e5b5b1775 100644 --- a/plugins/modules/system/awall.py +++ b/plugins/modules/awall.py @@ -1,22 +1,27 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ted Trask -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ted Trask +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: awall short_description: Manage awall policies author: Ted Trask (@tdtrask) description: - - This modules allows for enable/disable/activate of I(awall) policies. - - Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files - and activates the configuration on the system. + - This modules allows for enable/disable/activate of C(awall) policies. + - Alpine Wall (C(awall)) generates a firewall configuration from the enabled policy files and activates the configuration + on the system. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: @@ -27,43 +32,46 @@ options: description: - Whether the policies should be enabled or disabled. type: str - choices: [ disabled, enabled ] + choices: [disabled, enabled] default: enabled activate: description: - Activate the new firewall rules. - Can be run with other steps or on its own. + - Idempotency is affected if O(activate=true), as the module always reports a changed state. type: bool - default: no -''' + default: false +notes: + - At least one of O(name) and O(activate) is required. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Enable "foo" and "bar" policy community.general.awall: - name: [ foo bar ] + name: [foo bar] state: enabled - name: Disable "foo" and "bar" policy and activate new rules community.general.awall: name: - - foo - - bar + - foo + - bar state: disabled - activate: no + activate: false - name: Activate currently enabled firewall rules community.general.awall: - activate: yes -''' + activate: true +""" -RETURN = ''' # ''' +RETURN = """ # """ import re from ansible.module_utils.basic import AnsibleModule def activate(module): - cmd = "%s activate --force" % (AWALL_PATH) + cmd = f"{AWALL_PATH} activate --force" rc, stdout, stderr = module.run_command(cmd) if rc == 0: return True @@ -72,9 +80,9 @@ def activate(module): def is_policy_enabled(module, name): - cmd = "%s list" % (AWALL_PATH) + cmd = f"{AWALL_PATH} list" rc, stdout, stderr = module.run_command(cmd) - if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE): + if re.search(rf"^{name}\s+enabled", stdout, re.MULTILINE): return True return False @@ -88,15 +96,15 @@ def enable_policy(module, names, act): module.exit_json(changed=False, msg="policy(ies) already enabled") names = " ".join(policies) if module.check_mode: - cmd = "%s list" % (AWALL_PATH) + cmd = f"{AWALL_PATH} list" else: - cmd = "%s enable %s" % (AWALL_PATH, names) + cmd = f"{AWALL_PATH} enable {names}" rc, stdout, stderr = module.run_command(cmd) if rc != 0: - module.fail_json(msg="failed to enable %s" % names, stdout=stdout, stderr=stderr) + module.fail_json(msg=f"failed to enable {names}", stdout=stdout, stderr=stderr) if act and not module.check_mode: activate(module) - module.exit_json(changed=True, msg="enabled awall policy(ies): %s" % names) + module.exit_json(changed=True, msg=f"enabled awall policy(ies): {names}") def disable_policy(module, names, act): @@ -108,15 +116,15 @@ def disable_policy(module, names, act): module.exit_json(changed=False, msg="policy(ies) already disabled") names = " ".join(policies) if module.check_mode: - cmd = "%s list" % (AWALL_PATH) + cmd = f"{AWALL_PATH} list" else: - cmd = "%s disable %s" % (AWALL_PATH, names) + cmd = f"{AWALL_PATH} disable {names}" rc, stdout, stderr = module.run_command(cmd) if rc != 0: - module.fail_json(msg="failed to disable %s" % names, stdout=stdout, stderr=stderr) + module.fail_json(msg=f"failed to disable {names}", stdout=stdout, stderr=stderr) if act and not module.check_mode: activate(module) - module.exit_json(changed=True, msg="disabled awall policy(ies): %s" % names) + module.exit_json(changed=True, msg=f"disabled awall policy(ies): {names}") def main(): diff --git a/plugins/modules/system/beadm.py b/plugins/modules/beadm.py similarity index 80% rename from plugins/modules/system/beadm.py rename to plugins/modules/beadm.py index d89ca79af1..f285616ca7 100644 --- a/plugins/modules/system/beadm.py +++ b/plugins/modules/beadm.py @@ -1,62 +1,65 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Adam Števko -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Adam Števko +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: beadm -short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems. +short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems description: - - Create, delete or activate ZFS boot environments. - - Mount and unmount ZFS boot environments. + - Create, delete or activate ZFS boot environments. + - Mount and unmount ZFS boot environments. author: Adam Števko (@xen0l) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - ZFS boot environment name. - type: str - required: True - aliases: [ "be" ] - snapshot: - description: - - If specified, the new boot environment will be cloned from the given - snapshot or inactive boot environment. - type: str + name: description: - description: - - Associate a description with a new boot environment. This option is - available only on Solarish platforms. - type: str - options: - description: - - Create the datasets for new BE with specific ZFS properties. - - Multiple options can be specified. - - This option is available only on Solarish platforms. - type: str - mountpoint: - description: - - Path where to mount the ZFS boot environment. - type: path - state: - description: - - Create or delete ZFS boot environment. - type: str - choices: [ absent, activated, mounted, present, unmounted ] - default: present - force: - description: - - Specifies if the unmount should be forced. - type: bool - default: false -''' + - ZFS boot environment name. + type: str + required: true + aliases: ["be"] + snapshot: + description: + - If specified, the new boot environment is cloned from the given snapshot or inactive boot environment. + type: str + description: + description: + - Associate a description with a new boot environment. This option is available only on Solarish platforms. + type: str + options: + description: + - Create the datasets for new BE with specific ZFS properties. + - Multiple options can be specified. + - This option is available only on Solarish platforms. + type: str + mountpoint: + description: + - Path where to mount the ZFS boot environment. + type: path + state: + description: + - Create or delete ZFS boot environment. + type: str + choices: [absent, activated, mounted, present, unmounted] + default: present + force: + description: + - Specifies if the unmount should be forced. + type: bool + default: false +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create ZFS boot environment community.general.beadm: name: upgrade-be @@ -95,48 +98,47 @@ EXAMPLES = r''' community.general.beadm: name: upgrade-be state: activated -''' +""" -RETURN = r''' +RETURN = r""" name: - description: BE name - returned: always - type: str - sample: pre-upgrade + description: BE name. + returned: always + type: str + sample: pre-upgrade snapshot: - description: ZFS snapshot to create BE from - returned: always - type: str - sample: rpool/ROOT/oi-hipster@fresh + description: ZFS snapshot to create BE from. + returned: always + type: str + sample: rpool/ROOT/oi-hipster@fresh description: - description: BE description - returned: always - type: str - sample: Upgrade from 9.0 to 10.0 + description: BE description. + returned: always + type: str + sample: Upgrade from 9.0 to 10.0 options: - description: BE additional options - returned: always - type: str - sample: compression=on + description: BE additional options. + returned: always + type: str + sample: compression=on mountpoint: - description: BE mountpoint - returned: always - type: str - sample: /mnt/be + description: BE mountpoint. + returned: always + type: str + sample: /mnt/be state: - description: state of the target - returned: always - type: str - sample: present + description: State of the target. + returned: always + type: str + sample: present force: - description: If forced action is wanted - returned: always - type: bool - sample: False -''' + description: If forced action is wanted. + returned: always + type: bool + sample: false +""" import os -import re from ansible.module_utils.basic import AnsibleModule @@ -164,10 +166,10 @@ class BE(object): for line in out.splitlines(): if self.is_freebsd: check = line.split() - if(check == []): + if check == []: continue full_name = check[0].split('/') - if(full_name == []): + if full_name == []: continue check[0] = full_name[len(full_name) - 1] if check[0] == self.name: diff --git a/plugins/modules/monitoring/bigpanda.py b/plugins/modules/bigpanda.py similarity index 78% rename from plugins/modules/monitoring/bigpanda.py rename to plugins/modules/bigpanda.py index c5fe61cbf6..1bdd79d548 100644 --- a/plugins/modules/monitoring/bigpanda.py +++ b/plugins/modules/bigpanda.py @@ -1,24 +1,30 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: bigpanda author: "Hagai Kariti (@hkariti)" short_description: Notify BigPanda about deployments description: - - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls. + - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters + for future module calls. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: component: type: str description: - - "The name of the component being deployed. Ex: billing" + - 'The name of the component being deployed. Ex: V(billing).' required: true aliases: ['name'] version: @@ -47,7 +53,7 @@ options: env: type: str description: - - The environment name, typically 'production', 'staging', etc. + - The environment name, typically V(production), V(staging), and so on. required: false owner: type: str @@ -63,31 +69,31 @@ options: type: str description: - Base URL of the API server. - required: False - default: https://api.bigpanda.io + required: false + default: "https://api.bigpanda.io" validate_certs: description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled + sites using self-signed certificates. required: false - default: 'yes' + default: true type: bool deployment_message: type: str description: - - Message about the deployment. + - Message about the deployment. version_added: '0.2.0' source_system: type: str description: - - Source system used in the requests to the API + - Source system used in the requests to the API. default: ansible # informational: requirements for nodes -requirements: [ ] -''' +requirements: [] +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Notify BigPanda about a deployment community.general.bigpanda: component: myapp @@ -120,7 +126,7 @@ EXAMPLES = ''' token: '{{ deployment.token }}' state: finished delegate_to: localhost -''' +""" # =========================================== # Module execution. @@ -142,14 +148,14 @@ def main(): version=dict(required=True), token=dict(required=True, no_log=True), state=dict(required=True, choices=['started', 'finished', 'failed']), - hosts=dict(required=False, aliases=['host']), - env=dict(required=False), - owner=dict(required=False), - description=dict(required=False), - deployment_message=dict(required=False), - source_system=dict(required=False, default='ansible'), + hosts=dict(aliases=['host']), + env=dict(), + owner=dict(), + description=dict(), + deployment_message=dict(), + source_system=dict(default='ansible'), validate_certs=dict(default=True, type='bool'), - url=dict(required=False, default='https://api.bigpanda.io'), + url=dict(default='https://api.bigpanda.io'), ), supports_check_mode=True, ) diff --git a/plugins/modules/source_control/bitbucket/bitbucket_access_key.py b/plugins/modules/bitbucket_access_key.py similarity index 91% rename from plugins/modules/source_control/bitbucket/bitbucket_access_key.py rename to plugins/modules/bitbucket_access_key.py index 6451d72909..2b2bf9b8c5 100644 --- a/plugins/modules/source_control/bitbucket/bitbucket_access_key.py +++ b/plugins/modules/bitbucket_access_key.py @@ -1,14 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Evgeniy Krysanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: bitbucket_access_key short_description: Manages Bitbucket repository access keys description: @@ -17,6 +15,12 @@ author: - Evgeniy Krysanov (@catcombo) extends_documentation_fragment: - community.general.bitbucket + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: repository: description: @@ -26,10 +30,9 @@ options: workspace: description: - The repository owner. - - Alias I(username) has been deprecated and will become an alias of I(user) in community.general 6.0.0. + - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user). type: str required: true - aliases: [ username ] key: description: - The SSH public key. @@ -44,13 +47,13 @@ options: - Indicates desired state of the access key. type: str required: true - choices: [ absent, present ] + choices: [absent, present] notes: - Bitbucket OAuth consumer or App password should have permissions to read and administrate account repositories. - Check mode is supported. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create access key community.general.bitbucket_access_key: repository: 'bitbucket-repo' @@ -65,9 +68,9 @@ EXAMPLES = r''' workspace: bitbucket_workspace label: Bitbucket state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper @@ -152,7 +155,7 @@ def get_existing_deploy_key(module, bitbucket): if info['status'] != 200: module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info)) - res = next(iter(filter(lambda v: v['label'] == module.params['label'], content['values'])), None) + res = next((v for v in content['values'] if v['label'] == module.params['label']), None) if res is not None: return res @@ -217,8 +220,7 @@ def main(): argument_spec.update( repository=dict(type='str', required=True), workspace=dict( - type='str', aliases=['username'], required=True, - deprecated_aliases=[dict(name='username', version='6.0.0', collection_name='community.general')], + type='str', required=True, ), key=dict(type='str', no_log=False), label=dict(type='str', required=True), diff --git a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py b/plugins/modules/bitbucket_pipeline_key_pair.py similarity index 88% rename from plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py rename to plugins/modules/bitbucket_pipeline_key_pair.py index 5d42419dfa..28d837c914 100644 --- a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py +++ b/plugins/modules/bitbucket_pipeline_key_pair.py @@ -1,14 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Evgeniy Krysanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: bitbucket_pipeline_key_pair short_description: Manages Bitbucket pipeline SSH key pair description: @@ -17,6 +15,12 @@ author: - Evgeniy Krysanov (@catcombo) extends_documentation_fragment: - community.general.bitbucket + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: repository: description: @@ -26,10 +30,9 @@ options: workspace: description: - The repository owner. - - Alias I(username) has been deprecated and will become an alias of I(user) in community.general 6.0.0. + - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user). type: str required: true - aliases: [ username ] public_key: description: - The public key. @@ -43,12 +46,12 @@ options: - Indicates desired state of the key pair. type: str required: true - choices: [ absent, present ] + choices: [absent, present] notes: - Check mode is supported. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create or update SSH key pair community.general.bitbucket_pipeline_key_pair: repository: 'bitbucket-repo' @@ -62,9 +65,9 @@ EXAMPLES = r''' repository: bitbucket-repo workspace: bitbucket_workspace state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper @@ -153,10 +156,7 @@ def main(): argument_spec = BitbucketHelper.bitbucket_argument_spec() argument_spec.update( repository=dict(type='str', required=True), - workspace=dict( - type='str', aliases=['username'], required=True, - deprecated_aliases=[dict(name='username', version='6.0.0', collection_name='community.general')], - ), + workspace=dict(type='str', required=True), public_key=dict(type='str'), private_key=dict(type='str', no_log=True), state=dict(type='str', choices=['present', 'absent'], required=True), diff --git a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py b/plugins/modules/bitbucket_pipeline_known_host.py similarity index 89% rename from plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py rename to plugins/modules/bitbucket_pipeline_known_host.py index 9f4f2b9498..fb382c8afb 100644 --- a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py +++ b/plugins/modules/bitbucket_pipeline_known_host.py @@ -1,25 +1,29 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Evgeniy Krysanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: bitbucket_pipeline_known_host short_description: Manages Bitbucket pipeline known hosts description: - Manages Bitbucket pipeline known hosts under the "SSH Keys" menu. - - The host fingerprint will be retrieved automatically, but in case of an error, one can use I(key) field to specify it manually. + - The host fingerprint is retrieved automatically, but in case of an error, one can use O(key) field to specify it manually. author: - Evgeniy Krysanov (@catcombo) extends_documentation_fragment: - community.general.bitbucket + - community.general.attributes requirements: - - paramiko + - paramiko +attributes: + check_mode: + support: full + diff_mode: + support: none options: repository: description: @@ -29,10 +33,9 @@ options: workspace: description: - The repository owner. - - Alias I(username) has been deprecated and will become an alias of I(user) in community.general 6.0.0. + - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user). type: str required: true - aliases: [ username ] name: description: - The FQDN of the known host. @@ -47,12 +50,12 @@ options: - Indicates desired state of the record. type: str required: true - choices: [ absent, present ] + choices: [absent, present] notes: - Check mode is supported. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create known hosts from the list community.general.bitbucket_pipeline_known_host: repository: 'bitbucket-repo' @@ -77,9 +80,9 @@ EXAMPLES = r''' name: bitbucket.org key: '{{lookup("file", "bitbucket.pub") }}' state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ import socket @@ -148,7 +151,7 @@ def get_existing_known_host(module, bitbucket): if info['status'] != 200: module.fail_json(msg='Failed to retrieve list of known hosts: {0}'.format(info)) - host = next(filter(lambda v: v['hostname'] == module.params['name'], content['values']), None) + host = next((v for v in content['values'] if v['hostname'] == module.params['name']), None) if host is not None: return host @@ -254,10 +257,7 @@ def main(): argument_spec = BitbucketHelper.bitbucket_argument_spec() argument_spec.update( repository=dict(type='str', required=True), - workspace=dict( - type='str', aliases=['username'], required=True, - deprecated_aliases=[dict(name='username', version='6.0.0', collection_name='community.general')], - ), + workspace=dict(type='str', required=True), name=dict(type='str', required=True), key=dict(type='str', no_log=False), state=dict(type='str', choices=['present', 'absent'], required=True), diff --git a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py b/plugins/modules/bitbucket_pipeline_variable.py similarity index 88% rename from plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py rename to plugins/modules/bitbucket_pipeline_variable.py index e5701184c3..ea43beba55 100644 --- a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py +++ b/plugins/modules/bitbucket_pipeline_variable.py @@ -1,14 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Evgeniy Krysanov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Evgeniy Krysanov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: bitbucket_pipeline_variable short_description: Manages Bitbucket pipeline variables description: @@ -17,6 +15,12 @@ author: - Evgeniy Krysanov (@catcombo) extends_documentation_fragment: - community.general.bitbucket + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: repository: description: @@ -26,10 +30,9 @@ options: workspace: description: - The repository owner. - - Alias I(username) has been deprecated and will become an alias of I(user) in community.general 6.0.0. + - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user). type: str required: true - aliases: [ username ] name: description: - The pipeline variable name. @@ -43,19 +46,19 @@ options: description: - Whether to encrypt the variable value. type: bool - default: no + default: false state: description: - Indicates desired state of the variable. type: str required: true - choices: [ absent, present ] + choices: [absent, present] notes: - Check mode is supported. - - For secured values return parameter C(changed) is always C(True). -''' + - For secured values return parameter C(changed) is always V(true). +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create or update pipeline variables from the list community.general.bitbucket_pipeline_variable: repository: 'bitbucket-repo' @@ -65,8 +68,8 @@ EXAMPLES = r''' secured: '{{ item.secured }}' state: present with_items: - - { name: AWS_ACCESS_KEY, value: ABCD1234, secured: False } - - { name: AWS_SECRET, value: qwe789poi123vbn0, secured: True } + - {name: AWS_ACCESS_KEY, value: ABCD1234, secured: false} + - {name: AWS_SECRET, value: qwe789poi123vbn0, secured: true} - name: Remove pipeline variable community.general.bitbucket_pipeline_variable: @@ -74,9 +77,9 @@ EXAMPLES = r''' workspace: bitbucket_workspace name: AWS_ACCESS_KEY state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.basic import AnsibleModule, _load_params from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper @@ -136,7 +139,7 @@ def get_existing_pipeline_variable(module, bitbucket): return None page += 1 - var = next(filter(lambda v: v['key'] == module.params['name'], content['values']), None) + var = next((v for v in content['values'] if v['key'] == module.params['name']), None) if var is not None: var['name'] = var.pop('key') @@ -214,10 +217,7 @@ def main(): argument_spec = BitbucketHelper.bitbucket_argument_spec() argument_spec.update( repository=dict(type='str', required=True), - workspace=dict( - type='str', aliases=['username'], required=True, - deprecated_aliases=[dict(name='username', version='6.0.0', collection_name='community.general')], - ), + workspace=dict(type='str', required=True), name=dict(type='str', required=True), value=dict(type='str'), secured=dict(type='bool', default=False), diff --git a/plugins/modules/bootc_manage.py b/plugins/modules/bootc_manage.py new file mode 100644 index 0000000000..d854f866bf --- /dev/null +++ b/plugins/modules/bootc_manage.py @@ -0,0 +1,92 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Ryan Cook +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt +# or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: bootc_manage +version_added: 9.3.0 +author: + - Ryan Cook (@cooktheryan) +short_description: Bootc Switch and Upgrade +description: + - This module manages the switching and upgrading of C(bootc). +options: + state: + description: + - Control whether to apply the latest image or switch the image. + - B(Note:) This does not reboot the system. + - Please use M(ansible.builtin.reboot) to reboot the system. + required: true + type: str + choices: ['switch', 'latest'] + image: + description: + - The image to switch to. + - This is required when O(state=switch). + required: false + type: str +""" + +EXAMPLES = r""" +# Switch to a different image +- name: Provide image to switch to a different image and retain the current running image + community.general.bootc_manage: + state: switch + image: "example.com/image:latest" + +# Apply updates of the current running image +- name: Apply updates of the current running image + community.general.bootc_manage: + state: latest +""" + +RETURN = r""" +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.locale import get_best_parsable_locale + + +def main(): + argument_spec = dict( + state=dict(type='str', required=True, choices=['switch', 'latest']), + image=dict(type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'switch', ['image']), + ], + ) + + state = module.params['state'] + image = module.params['image'] + + if state == 'switch': + command = ['bootc', 'switch', image, '--retain'] + elif state == 'latest': + command = ['bootc', 'upgrade'] + + locale = get_best_parsable_locale(module) + module.run_command_environ_update = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale) + rc, stdout, err = module.run_command(command, check_rc=True) + + if 'Queued for next boot: ' in stdout: + result = {'changed': True, 'stdout': stdout} + module.exit_json(**result) + elif 'No changes in ' in stdout or 'Image specification is unchanged.' in stdout: + result = {'changed': False, 'stdout': stdout} + module.exit_json(**result) + else: + result = {'changed': False, 'stderr': err} + module.fail_json(msg='ERROR: Command execution failed.', **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/bower.py b/plugins/modules/bower.py similarity index 84% rename from plugins/modules/packaging/language/bower.py rename to plugins/modules/bower.py index 911d99b7d9..fd4e2c4920 100644 --- a/plugins/modules/packaging/language/bower.py +++ b/plugins/modules/bower.py @@ -1,57 +1,62 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2014, Michael Warkentin -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Michael Warkentin +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: bower -short_description: Manage bower packages with bower +short_description: Manage bower packages with C(bower) description: - - Manage bower packages with bower + - Manage bower packages with C(bower). author: "Michael Warkentin (@mwarkentin)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: name: type: str description: - - The name of a bower package to install + - The name of a bower package to install. offline: description: - - Install packages from local cache, if the packages were installed before + - Install packages from local cache, if the packages were installed before. type: bool - default: 'no' + default: false production: description: - - Install with --production flag + - Install with C(--production) flag. type: bool - default: 'no' + default: false path: type: path description: - - The base path where to install the bower packages + - The base path where to install the bower packages. required: true relative_execpath: type: path description: - - Relative path to bower executable from install path + - Relative path to bower executable from install path. state: type: str description: - - The state of the bower package + - The state of the bower package. default: present - choices: [ "present", "absent", "latest" ] + choices: ["present", "absent", "latest"] version: type: str description: - - The version to be installed -''' + - The version to be installed. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install "bootstrap" bower package. community.general.bower: name: bootstrap @@ -79,11 +84,12 @@ EXAMPLES = ''' - npm: path: /app/location name: bower - global: no + global: false - community.general.bower: path: /app/location relative_execpath: node_modules/.bin -''' +""" + import json import os @@ -179,13 +185,13 @@ class Bower(object): def main(): arg_spec = dict( - name=dict(default=None), + name=dict(), offline=dict(default=False, type='bool'), production=dict(default=False, type='bool'), path=dict(required=True, type='path'), - relative_execpath=dict(default=None, required=False, type='path'), + relative_execpath=dict(type='path'), state=dict(default='present', choices=['present', 'absent', 'latest', ]), - version=dict(default=None), + version=dict(), ) module = AnsibleModule( argument_spec=arg_spec diff --git a/plugins/modules/btrfs_info.py b/plugins/modules/btrfs_info.py new file mode 100644 index 0000000000..e05b6e6c6d --- /dev/null +++ b/plugins/modules/btrfs_info.py @@ -0,0 +1,103 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Gregory Furlong +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: btrfs_info +short_description: Query btrfs filesystem info +version_added: "6.6.0" +description: Query status of available btrfs filesystems, including UUID, label, subvolumes and mountpoints. + +author: + - Gregory Furlong (@gnfzdz) + +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Query information about mounted btrfs filesystems + community.general.btrfs_info: + register: my_btrfs_info +""" + +RETURN = r""" +filesystems: + description: Summaries of the current state for all btrfs filesystems found on the target host. + type: list + elements: dict + returned: success + contains: + uuid: + description: A unique identifier assigned to the filesystem. + type: str + sample: 96c9c605-1454-49b8-a63a-15e2584c208e + label: + description: An optional label assigned to the filesystem. + type: str + sample: Tank + devices: + description: A list of devices assigned to the filesystem. + type: list + sample: + - /dev/sda1 + - /dev/sdb1 + default_subvolume: + description: The ID of the filesystem's default subvolume. + type: int + sample: 5 + subvolumes: + description: A list of dicts containing metadata for all of the filesystem's subvolumes. + type: list + elements: dict + contains: + id: + description: An identifier assigned to the subvolume, unique within the containing filesystem. + type: int + sample: 256 + mountpoints: + description: Paths where the subvolume is mounted on the targeted host. + type: list + sample: ["/home"] + parent: + description: The identifier of this subvolume's parent. + type: int + sample: 5 + path: + description: The full path of the subvolume relative to the btrfs fileystem's root. + type: str + sample: /@home +""" + + +from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider +from ansible.module_utils.basic import AnsibleModule + + +def run_module(): + module_args = dict() + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + provider = BtrfsFilesystemsProvider(module) + filesystems = [x.get_summary() for x in provider.get_filesystems()] + result = { + "filesystems": filesystems, + } + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/btrfs_subvolume.py b/plugins/modules/btrfs_subvolume.py new file mode 100644 index 0000000000..92c3c99c02 --- /dev/null +++ b/plugins/modules/btrfs_subvolume.py @@ -0,0 +1,676 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Gregory Furlong +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: btrfs_subvolume +short_description: Manage btrfs subvolumes +version_added: "6.6.0" + +description: Creates, updates and deletes btrfs subvolumes and snapshots. + +options: + automount: + description: + - Allow the module to temporarily mount the targeted btrfs filesystem in order to validate the current state and make + any required changes. + type: bool + default: false + default: + description: + - Make the subvolume specified by O(name) the filesystem's default subvolume. + type: bool + default: false + filesystem_device: + description: + - A block device contained within the btrfs filesystem to be targeted. + - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted. + type: path + filesystem_label: + description: + - A descriptive label assigned to the btrfs filesystem to be targeted. + - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted. + type: str + filesystem_uuid: + description: + - A unique identifier assigned to the btrfs filesystem to be targeted. + - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted. + type: str + name: + description: + - Name of the subvolume/snapshot to be targeted. + required: true + type: str + recursive: + description: + - When true, indicates that parent/child subvolumes should be created/removedas necessary to complete the operation + (for O(state=present) and O(state=absent) respectively). + type: bool + default: false + snapshot_source: + description: + - Identifies the source subvolume for the created snapshot. + - Infers that the created subvolume is a snapshot. + type: str + snapshot_conflict: + description: + - Policy defining behavior when a subvolume already exists at the path of the requested snapshot. + - V(skip) - Create a snapshot only if a subvolume does not yet exist at the target location, otherwise indicate that + no change is required. Warning, this option does not yet verify that the target subvolume was generated from a snapshot + of the requested source. + - V(clobber) - If a subvolume already exists at the requested location, delete it first. This option is not idempotent + and results in a new snapshot being generated on every execution. + - V(error) - If a subvolume already exists at the requested location, return an error. This option is not idempotent + and results in an error on replay of the module. + type: str + choices: [skip, clobber, error] + default: skip + state: + description: + - Indicates the current state of the targeted subvolume. + type: str + choices: [absent, present] + default: present + +notes: + - If any or all of the options O(filesystem_device), O(filesystem_label) or O(filesystem_uuid) parameters are provided, + there is expected to be a matching btrfs filesystem. If none are provided and only a single btrfs filesystem exists or + only a single btrfs filesystem is mounted, that filesystem is used; otherwise, the module takes no action and returns an + error. +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: partial + details: + - In some scenarios it may erroneously report intermediate subvolumes being created. After mounting, if a directory + like file is found where the subvolume would have been created, the operation is skipped. + diff_mode: + support: none + +author: + - Gregory Furlong (@gnfzdz) +""" + +EXAMPLES = r""" +- name: Create a @home subvolume under the root subvolume + community.general.btrfs_subvolume: + name: /@home + filesystem_device: /dev/vda2 + +- name: Remove the @home subvolume if it exists + community.general.btrfs_subvolume: + name: /@home + state: absent + filesystem_device: /dev/vda2 + +- name: Create a snapshot of the root subvolume named @ + community.general.btrfs_subvolume: + name: /@ + snapshot_source: / + filesystem_device: /dev/vda2 + +- name: Create a snapshot of the root subvolume and make it the new default subvolume + community.general.btrfs_subvolume: + name: /@ + snapshot_source: / + default: true + filesystem_device: /dev/vda2 + +- name: Create a snapshot of the /@ subvolume and recursively creating intermediate subvolumes as required + community.general.btrfs_subvolume: + name: /@snapshots/@2022_06_09 + snapshot_source: /@ + recursive: true + filesystem_device: /dev/vda2 + +- name: Remove the /@ subvolume and recursively delete child subvolumes as required + community.general.btrfs_subvolume: + name: /@snapshots/@2022_06_09 + snapshot_source: /@ + recursive: true + filesystem_device: /dev/vda2 +""" + +RETURN = r""" +filesystem: + description: + - A summary of the final state of the targeted btrfs filesystem. + type: dict + returned: success + contains: + uuid: + description: A unique identifier assigned to the filesystem. + returned: success + type: str + sample: 96c9c605-1454-49b8-a63a-15e2584c208e + label: + description: An optional label assigned to the filesystem. + returned: success + type: str + sample: Tank + devices: + description: A list of devices assigned to the filesystem. + returned: success + type: list + sample: + - /dev/sda1 + - /dev/sdb1 + default_subvolume: + description: The ID of the filesystem's default subvolume. + returned: success and if filesystem is mounted + type: int + sample: 5 + subvolumes: + description: A list of dicts containing metadata for all of the filesystem's subvolumes. + returned: success and if filesystem is mounted + type: list + elements: dict + contains: + id: + description: An identifier assigned to the subvolume, unique within the containing filesystem. + type: int + sample: 256 + mountpoints: + description: Paths where the subvolume is mounted on the targeted host. + type: list + sample: ["/home"] + parent: + description: The identifier of this subvolume's parent. + type: int + sample: 5 + path: + description: The full path of the subvolume relative to the btrfs fileystem's root. + type: str + sample: /@home + +modifications: + description: + - A list where each element describes a change made to the target btrfs filesystem. + type: list + returned: Success + elements: str + +target_subvolume_id: + description: + - The ID of the subvolume specified with the O(name) parameter, either pre-existing or created as part of module execution. + type: int + sample: 257 + returned: Success and subvolume exists after module execution +""" + +from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider, BtrfsCommands, BtrfsModuleException +from ansible_collections.community.general.plugins.module_utils.btrfs import normalize_subvolume_path +from ansible.module_utils.basic import AnsibleModule +import os +import tempfile + + +class BtrfsSubvolumeModule(object): + + __BTRFS_ROOT_SUBVOLUME = '/' + __BTRFS_ROOT_SUBVOLUME_ID = 5 + __BTRFS_SUBVOLUME_INODE_NUMBER = 256 + + __CREATE_SUBVOLUME_OPERATION = 'create' + __CREATE_SNAPSHOT_OPERATION = 'snapshot' + __DELETE_SUBVOLUME_OPERATION = 'delete' + __SET_DEFAULT_SUBVOLUME_OPERATION = 'set-default' + + __UNKNOWN_SUBVOLUME_ID = '?' + + def __init__(self, module): + self.module = module + self.__btrfs_api = BtrfsCommands(module) + self.__provider = BtrfsFilesystemsProvider(module) + + # module parameters + name = self.module.params['name'] + self.__name = normalize_subvolume_path(name) if name is not None else None + self.__state = self.module.params['state'] + + self.__automount = self.module.params['automount'] + self.__default = self.module.params['default'] + self.__filesystem_device = self.module.params['filesystem_device'] + self.__filesystem_label = self.module.params['filesystem_label'] + self.__filesystem_uuid = self.module.params['filesystem_uuid'] + self.__recursive = self.module.params['recursive'] + self.__snapshot_conflict = self.module.params['snapshot_conflict'] + snapshot_source = self.module.params['snapshot_source'] + self.__snapshot_source = normalize_subvolume_path(snapshot_source) if snapshot_source is not None else None + + # execution state + self.__filesystem = None + self.__required_mounts = [] + self.__unit_of_work = [] + self.__completed_work = [] + self.__temporary_mounts = dict() + + def run(self): + error = None + try: + self.__load_filesystem() + self.__prepare_unit_of_work() + + if not self.module.check_mode: + # check required mounts & mount + if len(self.__unit_of_work) > 0: + self.__execute_unit_of_work() + self.__filesystem.refresh() + else: + # check required mounts + self.__completed_work.extend(self.__unit_of_work) + except Exception as e: + error = e + finally: + self.__cleanup_mounts() + if self.__filesystem is not None: + self.__filesystem.refresh_mountpoints() + + return (error, self.get_results()) + + # Identify the targeted filesystem and obtain the current state + def __load_filesystem(self): + if self.__has_filesystem_criteria(): + filesystem = self.__find_matching_filesytem() + else: + filesystem = self.__find_default_filesystem() + + # The filesystem must be mounted to obtain the current state (subvolumes, default, etc) + if not filesystem.is_mounted(): + if not self.__automount: + raise BtrfsModuleException( + "Target filesystem uuid=%s is not currently mounted and automount=False." + "Mount explicitly before module execution or pass automount=True" % filesystem.uuid) + elif self.module.check_mode: + # TODO is failing the module an appropriate outcome in this scenario? + raise BtrfsModuleException( + "Target filesystem uuid=%s is not currently mounted. Unable to validate the current" + "state while running with check_mode=True" % filesystem.uuid) + else: + self.__mount_subvolume_id_to_tempdir(filesystem, self.__BTRFS_ROOT_SUBVOLUME_ID) + filesystem.refresh() + self.__filesystem = filesystem + + def __has_filesystem_criteria(self): + return self.__filesystem_uuid is not None or self.__filesystem_label is not None or self.__filesystem_device is not None + + def __find_matching_filesytem(self): + criteria = { + 'uuid': self.__filesystem_uuid, + 'label': self.__filesystem_label, + 'device': self.__filesystem_device, + } + return self.__provider.get_matching_filesystem(criteria) + + def __find_default_filesystem(self): + filesystems = self.__provider.get_filesystems() + filesystem = None + + if len(filesystems) == 1: + filesystem = filesystems[0] + else: + mounted_filesystems = [x for x in filesystems if x.is_mounted()] + if len(mounted_filesystems) == 1: + filesystem = mounted_filesystems[0] + + if filesystem is not None: + return filesystem + else: + raise BtrfsModuleException( + "Failed to automatically identify targeted filesystem. " + "No explicit device indicated and found %d available filesystems." % len(filesystems) + ) + + # Prepare unit of work + def __prepare_unit_of_work(self): + if self.__state == "present": + if self.__snapshot_source is None: + self.__prepare_subvolume_present() + else: + self.__prepare_snapshot_present() + + if self.__default: + self.__prepare_set_default() + elif self.__state == "absent": + self.__prepare_subvolume_absent() + + def __prepare_subvolume_present(self): + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + if subvolume is None: + self.__prepare_before_create_subvolume(self.__name) + self.__stage_create_subvolume(self.__name) + + def __prepare_before_create_subvolume(self, subvolume_name): + closest_parent = self.__filesystem.get_nearest_subvolume(subvolume_name) + self.__stage_required_mount(closest_parent) + if self.__recursive: + self.__prepare_create_intermediates(closest_parent, subvolume_name) + + def __prepare_create_intermediates(self, closest_subvolume, subvolume_name): + relative_path = closest_subvolume.get_child_relative_path(self.__name) + missing_subvolumes = [x for x in relative_path.split(os.path.sep) if len(x) > 0] + if len(missing_subvolumes) > 1: + current = closest_subvolume.path + for s in missing_subvolumes[:-1]: + separator = os.path.sep if current[-1] != os.path.sep else "" + current = current + separator + s + self.__stage_create_subvolume(current, True) + + def __prepare_snapshot_present(self): + source_subvolume = self.__filesystem.get_subvolume_by_name(self.__snapshot_source) + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + subvolume_exists = subvolume is not None + + if subvolume_exists: + if self.__snapshot_conflict == "skip": + # No change required + return + elif self.__snapshot_conflict == "error": + raise BtrfsModuleException("Target subvolume=%s already exists and snapshot_conflict='error'" % self.__name) + + if source_subvolume is None: + raise BtrfsModuleException("Source subvolume %s does not exist" % self.__snapshot_source) + elif subvolume is not None and source_subvolume.id == subvolume.id: + raise BtrfsModuleException("Snapshot source and target are the same.") + else: + self.__stage_required_mount(source_subvolume) + + if subvolume_exists and self.__snapshot_conflict == "clobber": + self.__prepare_delete_subvolume_tree(subvolume) + elif not subvolume_exists: + self.__prepare_before_create_subvolume(self.__name) + + self.__stage_create_snapshot(source_subvolume, self.__name) + + def __prepare_subvolume_absent(self): + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + if subvolume is not None: + self.__prepare_delete_subvolume_tree(subvolume) + + def __prepare_delete_subvolume_tree(self, subvolume): + if subvolume.is_filesystem_root(): + raise BtrfsModuleException("Can not delete the filesystem's root subvolume") + if not self.__recursive and len(subvolume.get_child_subvolumes()) > 0: + raise BtrfsModuleException("Subvolume targeted for deletion %s has children and recursive=False." + "Either explicitly delete the child subvolumes first or pass " + "parameter recursive=True." % subvolume.path) + + self.__stage_required_mount(subvolume.get_parent_subvolume()) + queue = self.__prepare_recursive_delete_order(subvolume) if self.__recursive else [subvolume] + # prepare unit of work + for s in queue: + if s.is_mounted(): + # TODO potentially unmount the subvolume if automount=True ? + raise BtrfsModuleException("Can not delete mounted subvolume=%s" % s.path) + if s.is_filesystem_default(): + self.__stage_set_default_subvolume(self.__BTRFS_ROOT_SUBVOLUME, self.__BTRFS_ROOT_SUBVOLUME_ID) + self.__stage_delete_subvolume(s) + + def __prepare_recursive_delete_order(self, subvolume): + """Return the subvolume and all descendents as a list, ordered so that descendents always occur before their ancestors""" + pending = [subvolume] + ordered = [] + while len(pending) > 0: + next = pending.pop() + ordered.append(next) + pending.extend(next.get_child_subvolumes()) + ordered.reverse() # reverse to ensure children are deleted before their parent + return ordered + + def __prepare_set_default(self): + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + subvolume_id = subvolume.id if subvolume is not None else None + + if self.__filesystem.default_subvolid != subvolume_id: + self.__stage_set_default_subvolume(self.__name, subvolume_id) + + # Stage operations to the unit of work + def __stage_required_mount(self, subvolume): + if subvolume.get_mounted_path() is None: + if self.__automount: + self.__required_mounts.append(subvolume) + else: + raise BtrfsModuleException("The requested changes will require the subvolume '%s' to be mounted, but automount=False" % subvolume.path) + + def __stage_create_subvolume(self, subvolume_path, intermediate=False): + """ + Add required creation of an intermediate subvolume to the unit of work + If intermediate is true, the action will be skipped if a directory like file is found at target + after mounting a parent subvolume + """ + self.__unit_of_work.append({ + 'action': self.__CREATE_SUBVOLUME_OPERATION, + 'target': subvolume_path, + 'intermediate': intermediate, + }) + + def __stage_create_snapshot(self, source_subvolume, target_subvolume_path): + """Add creation of a snapshot from source to target to the unit of work""" + self.__unit_of_work.append({ + 'action': self.__CREATE_SNAPSHOT_OPERATION, + 'source': source_subvolume.path, + 'source_id': source_subvolume.id, + 'target': target_subvolume_path, + }) + + def __stage_delete_subvolume(self, subvolume): + """Add deletion of the target subvolume to the unit of work""" + self.__unit_of_work.append({ + 'action': self.__DELETE_SUBVOLUME_OPERATION, + 'target': subvolume.path, + 'target_id': subvolume.id, + }) + + def __stage_set_default_subvolume(self, subvolume_path, subvolume_id=None): + """Add update of the filesystem's default subvolume to the unit of work""" + self.__unit_of_work.append({ + 'action': self.__SET_DEFAULT_SUBVOLUME_OPERATION, + 'target': subvolume_path, + 'target_id': subvolume_id, + }) + + # Execute the unit of work + def __execute_unit_of_work(self): + self.__check_required_mounts() + for op in self.__unit_of_work: + if op['action'] == self.__CREATE_SUBVOLUME_OPERATION: + self.__execute_create_subvolume(op) + elif op['action'] == self.__CREATE_SNAPSHOT_OPERATION: + self.__execute_create_snapshot(op) + elif op['action'] == self.__DELETE_SUBVOLUME_OPERATION: + self.__execute_delete_subvolume(op) + elif op['action'] == self.__SET_DEFAULT_SUBVOLUME_OPERATION: + self.__execute_set_default_subvolume(op) + else: + raise ValueError("Unknown operation type '%s'" % op['action']) + + def __execute_create_subvolume(self, operation): + target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target']) + if not self.__is_existing_directory_like(target_mounted_path): + self.__btrfs_api.subvolume_create(target_mounted_path) + self.__completed_work.append(operation) + + def __execute_create_snapshot(self, operation): + source_subvolume = self.__filesystem.get_subvolume_by_name(operation['source']) + source_mounted_path = source_subvolume.get_mounted_path() + target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target']) + + self.__btrfs_api.subvolume_snapshot(source_mounted_path, target_mounted_path) + self.__completed_work.append(operation) + + def __execute_delete_subvolume(self, operation): + target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target']) + self.__btrfs_api.subvolume_delete(target_mounted_path) + self.__completed_work.append(operation) + + def __execute_set_default_subvolume(self, operation): + target = operation['target'] + target_id = operation['target_id'] + + if target_id is None: + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + + if target_subvolume is None: + self.__filesystem.refresh() # the target may have been created earlier in module execution + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + + if target_subvolume is None: + raise BtrfsModuleException("Failed to find existing subvolume '%s'" % target) + else: + target_id = target_subvolume.id + + self.__btrfs_api.subvolume_set_default(self.__filesystem.get_any_mountpoint(), target_id) + self.__completed_work.append(operation) + + def __is_existing_directory_like(self, path): + return os.path.exists(path) and ( + os.path.isdir(path) or + os.stat(path).st_ino == self.__BTRFS_SUBVOLUME_INODE_NUMBER + ) + + def __check_required_mounts(self): + filtered = self.__filter_child_subvolumes(self.__required_mounts) + if len(filtered) > 0: + for subvolume in filtered: + self.__mount_subvolume_id_to_tempdir(self.__filesystem, subvolume.id) + self.__filesystem.refresh_mountpoints() + + def __filter_child_subvolumes(self, subvolumes): + """Filter the provided list of subvolumes to remove any that are a child of another item in the list""" + filtered = [] + last = None + ordered = sorted(subvolumes, key=lambda x: x.path) + for next in ordered: + if last is None or not next.path[0:len(last)] == last: + filtered.append(next) + last = next.path + return filtered + + # Create/cleanup temporary mountpoints + def __mount_subvolume_id_to_tempdir(self, filesystem, subvolid): + # this check should be redundant + if self.module.check_mode or not self.__automount: + raise BtrfsModuleException("Unable to temporarily mount required subvolumes" + "with automount=%s and check_mode=%s" % (self.__automount, self.module.check_mode)) + + cache_key = "%s:%d" % (filesystem.uuid, subvolid) + # The subvolume was already mounted, so return the current path + if cache_key in self.__temporary_mounts: + return self.__temporary_mounts[cache_key] + + device = filesystem.devices[0] + mountpoint = tempfile.mkdtemp(dir="/tmp") + self.__temporary_mounts[cache_key] = mountpoint + + mount = self.module.get_bin_path("mount", required=True) + command = [mount, "-o", "noatime,subvolid=%d" % subvolid, device, mountpoint] + result = self.module.run_command(command, check_rc=True) + + return mountpoint + + def __cleanup_mounts(self): + for key in self.__temporary_mounts.keys(): + self.__cleanup_mount(self.__temporary_mounts[key]) + + def __cleanup_mount(self, mountpoint): + umount = self.module.get_bin_path("umount", required=True) + result = self.module.run_command([umount, mountpoint]) + if result[0] == 0: + rmdir = self.module.get_bin_path("rmdir", required=True) + self.module.run_command([rmdir, mountpoint]) + + # Format and return results + def get_results(self): + target = self.__filesystem.get_subvolume_by_name(self.__name) + return dict( + changed=len(self.__completed_work) > 0, + filesystem=self.__filesystem.get_summary(), + modifications=self.__get_formatted_modifications(), + target_subvolume_id=(target.id if target is not None else None) + ) + + def __get_formatted_modifications(self): + return [self.__format_operation_result(op) for op in self.__completed_work] + + def __format_operation_result(self, operation): + action_type = operation['action'] + if action_type == self.__CREATE_SUBVOLUME_OPERATION: + return self.__format_create_subvolume_result(operation) + elif action_type == self.__CREATE_SNAPSHOT_OPERATION: + return self.__format_create_snapshot_result(operation) + elif action_type == self.__DELETE_SUBVOLUME_OPERATION: + return self.__format_delete_subvolume_result(operation) + elif action_type == self.__SET_DEFAULT_SUBVOLUME_OPERATION: + return self.__format_set_default_subvolume_result(operation) + else: + raise ValueError("Unknown operation type '%s'" % operation['action']) + + def __format_create_subvolume_result(self, operation): + target = operation['target'] + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID + return "Created subvolume '%s' (%s)" % (target, target_id) + + def __format_create_snapshot_result(self, operation): + source = operation['source'] + source_id = operation['source_id'] + + target = operation['target'] + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID + return "Created snapshot '%s' (%s) from '%s' (%s)" % (target, target_id, source, source_id) + + def __format_delete_subvolume_result(self, operation): + target = operation['target'] + target_id = operation['target_id'] + return "Deleted subvolume '%s' (%s)" % (target, target_id) + + def __format_set_default_subvolume_result(self, operation): + target = operation['target'] + if 'target_id' in operation: + target_id = operation['target_id'] + else: + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID + return "Updated default subvolume to '%s' (%s)" % (target, target_id) + + +def run_module(): + module_args = dict( + automount=dict(type='bool', default=False), + default=dict(type='bool', default=False), + filesystem_device=dict(type='path'), + filesystem_label=dict(type='str'), + filesystem_uuid=dict(type='str'), + name=dict(type='str', required=True), + recursive=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['present', 'absent']), + snapshot_source=dict(type='str'), + snapshot_conflict=dict(type='str', default='skip', choices=['skip', 'clobber', 'error']) + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + subvolume = BtrfsSubvolumeModule(module) + error, result = subvolume.run() + if error is not None: + module.fail_json(str(error), **result) + else: + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/bundler.py b/plugins/modules/bundler.py similarity index 55% rename from plugins/modules/packaging/language/bundler.py rename to plugins/modules/bundler.py index 43f8cfa2ee..2395cda332 100644 --- a/plugins/modules/packaging/language/bundler.py +++ b/plugins/modules/bundler.py @@ -1,97 +1,94 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2015, Tim Hoiberg -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Tim Hoiberg +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: bundler short_description: Manage Ruby Gem dependencies with Bundler description: - - Manage installation and Gem version dependencies for Ruby using the Bundler gem + - Manage installation and Gem version dependencies for Ruby using the Bundler gem. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: executable: type: str description: - - The path to the bundler executable + - The path to the bundler executable. state: type: str description: - - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version + - The desired state of the Gem bundle. V(latest) updates gems to the most recent, acceptable version. choices: [present, latest] default: present chdir: type: path description: - - The directory to execute the bundler commands from. This directory - needs to contain a valid Gemfile or .bundle/ directory - - If not specified, it will default to the temporary working directory + - The directory to execute the bundler commands from. This directory needs to contain a valid Gemfile or .bundle/ directory. + - If not specified, it defaults to the temporary working directory. exclude_groups: type: list elements: str description: - - A list of Gemfile groups to exclude during operations. This only - applies when state is C(present). Bundler considers this - a 'remembered' property for the Gemfile and will automatically exclude - groups in future operations even if C(exclude_groups) is not set + - A list of Gemfile groups to exclude during operations. This only applies when O(state=present). Bundler considers + this a 'remembered' property for the Gemfile and automatically excludes groups in future operations even if O(exclude_groups) + is not set. clean: description: - - Only applies if state is C(present). If set removes any gems on the - target host that are not in the gemfile + - Only applies if O(state=present). If set removes any gems on the target host that are not in the gemfile. type: bool - default: 'no' + default: false gemfile: type: path description: - - Only applies if state is C(present). The path to the gemfile to use to install gems. - - If not specified it will default to the Gemfile in current directory + - Only applies if O(state=present). The path to the gemfile to use to install gems. + - If not specified it defaults to the Gemfile in current directory. local: description: - - If set only installs gems from the cache on the target host + - If set only installs gems from the cache on the target host. type: bool - default: 'no' + default: false deployment_mode: description: - - Only applies if state is C(present). If set it will install gems in - ./vendor/bundle instead of the default location. Requires a Gemfile.lock - file to have been created prior + - Only applies if O(state=present). If set it installs gems in C(./vendor/bundle) instead of the default location. Requires + a C(Gemfile.lock) file to have been created prior. type: bool - default: 'no' + default: false user_install: description: - - Only applies if state is C(present). Installs gems in the local user's cache or for all users + - Only applies if O(state=present). Installs gems in the local user's cache or for all users. type: bool - default: 'yes' + default: true gem_path: type: path description: - - Only applies if state is C(present). Specifies the directory to - install the gems into. If C(chdir) is set then this path is relative to - C(chdir) - - If not specified the default RubyGems gem paths will be used. + - Only applies if O(state=present). Specifies the directory to install the gems into. If O(chdir) is set then this path + is relative to O(chdir). + - If not specified the default RubyGems gem paths are used. binstub_directory: type: path description: - - Only applies if state is C(present). Specifies the directory to - install any gem bins files to. When executed the bin files will run - within the context of the Gemfile and fail if any required gem - dependencies are not installed. If C(chdir) is set then this path is - relative to C(chdir) + - Only applies if O(state=present). Specifies the directory to install any gem bins files to. When executed the bin + files run within the context of the Gemfile and fail if any required gem dependencies are not installed. If O(chdir) + is set then this path is relative to O(chdir). extra_args: type: str description: - - A space separated string of additional commands that can be applied to - the Bundler command. Refer to the Bundler documentation for more - information + - A space separated string of additional commands that can be applied to the Bundler command. Refer to the Bundler documentation + for more information. author: "Tim Hoiberg (@thoiberg)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install gems from a Gemfile in the current directory community.general.bundler: state: present @@ -105,7 +102,7 @@ EXAMPLES = ''' - name: Install gems into ./vendor/bundle community.general.bundler: state: present - deployment_mode: yes + deployment_mode: true - name: Install gems using a Gemfile in another directory community.general.bundler: @@ -116,7 +113,7 @@ EXAMPLES = ''' community.general.bundler: state: latest chdir: ~/rails_project -''' +""" from ansible.module_utils.basic import AnsibleModule @@ -132,18 +129,18 @@ def get_bundler_executable(module): def main(): module = AnsibleModule( argument_spec=dict( - executable=dict(default=None, required=False), - state=dict(default='present', required=False, choices=['present', 'latest']), - chdir=dict(default=None, required=False, type='path'), - exclude_groups=dict(default=None, required=False, type='list', elements='str'), - clean=dict(default=False, required=False, type='bool'), - gemfile=dict(default=None, required=False, type='path'), - local=dict(default=False, required=False, type='bool'), - deployment_mode=dict(default=False, required=False, type='bool'), - user_install=dict(default=True, required=False, type='bool'), - gem_path=dict(default=None, required=False, type='path'), - binstub_directory=dict(default=None, required=False, type='path'), - extra_args=dict(default=None, required=False), + executable=dict(), + state=dict(default='present', choices=['present', 'latest']), + chdir=dict(type='path'), + exclude_groups=dict(type='list', elements='str'), + clean=dict(default=False, type='bool'), + gemfile=dict(type='path'), + local=dict(default=False, type='bool'), + deployment_mode=dict(default=False, type='bool'), + user_install=dict(default=True, type='bool'), + gem_path=dict(type='path'), + binstub_directory=dict(type='path'), + extra_args=dict(), ), supports_check_mode=True ) diff --git a/plugins/modules/source_control/bzr.py b/plugins/modules/bzr.py similarity index 75% rename from plugins/modules/source_control/bzr.py rename to plugins/modules/bzr.py index a4ce4bc075..3493b9476d 100644 --- a/plugins/modules/source_control/bzr.py +++ b/plugins/modules/bzr.py @@ -1,60 +1,61 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2013, André Paramés +# Copyright (c) 2013, André Paramés # Based on the Git module by Michael DeHaan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: bzr author: -- André Paramés (@andreparames) + - André Paramés (@andreparames) short_description: Deploy software (or files) from bzr branches description: - - Manage I(bzr) branches to deploy files or software. + - Manage C(bzr) branches to deploy files or software. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - SSH or HTTP protocol address of the parent branch. - aliases: [ parent ] - required: yes - type: str - dest: - description: - - Absolute path of where the branch should be cloned to. - required: yes - type: path - version: - description: - - What version of the branch to clone. This can be the - bzr revno or revid. - default: head - type: str - force: - description: - - If C(yes), any modified files in the working - tree will be discarded. Before 1.9 the default - value was C(yes). - type: bool - default: 'no' - executable: - description: - - Path to bzr executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. - type: str -''' + name: + description: + - SSH or HTTP protocol address of the parent branch. + aliases: [parent] + required: true + type: str + dest: + description: + - Absolute path of where the branch should be cloned to. + required: true + type: path + version: + description: + - What version of the branch to clone. This can be the bzr revno or revid. + default: head + type: str + force: + description: + - If V(true), any modified files in the working tree is discarded. + type: bool + default: false + executable: + description: + - Path to C(bzr) executable to use. If not supplied, the normal mechanism for resolving binary paths is used. + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Checkout community.general.bzr: name: bzr+ssh://foosball.example.org/path/to/branch dest: /srv/checkout version: 22 -''' +""" import os import re @@ -77,7 +78,7 @@ class Bzr(object): def get_version(self): '''samples the version of the bzr branch''' - cmd = "%s revno" % self.bzr_path + cmd = [self.bzr_path, "revno"] rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) revno = stdout.strip() return revno @@ -97,11 +98,12 @@ class Bzr(object): def has_local_mods(self): - cmd = "%s status -S" % self.bzr_path + cmd = [self.bzr_path, "status", "-S"] rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) lines = stdout.splitlines() + mods_re = re.compile('^\\?\\?.*$') - lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) + lines = [c for c in lines if not mods_re.search(c)] return len(lines) > 0 def reset(self, force): @@ -111,7 +113,7 @@ class Bzr(object): tree since that commit. ''' if not force and self.has_local_mods(): - self.module.fail_json(msg="Local modifications exist in branch (force=no).") + self.module.fail_json(msg="Local modifications exist in branch (force=false).") return self._command(["revert"], check_rc=True, cwd=self.dest) def fetch(self): diff --git a/plugins/modules/notification/campfire.py b/plugins/modules/campfire.py similarity index 76% rename from plugins/modules/notification/campfire.py rename to plugins/modules/campfire.py index c684823889..c1da278634 100644 --- a/plugins/modules/notification/campfire.py +++ b/plugins/modules/campfire.py @@ -1,20 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: campfire short_description: Send a message to Campfire description: - - Send a message to Campfire. - - Messages with newlines will result in a "Paste" message being sent. + - Send a message to Campfire. + - Messages with newlines result in a "Paste" message being sent. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: subscription: type: str @@ -41,22 +46,58 @@ options: description: - Send a notification sound before the message. required: false - choices: ["56k", "bell", "bezos", "bueller", "clowntown", - "cottoneyejoe", "crickets", "dadgummit", "dangerzone", - "danielsan", "deeper", "drama", "greatjob", "greyjoy", - "guarantee", "heygirl", "horn", "horror", - "inconceivable", "live", "loggins", "makeitso", "noooo", - "nyan", "ohmy", "ohyeah", "pushit", "rimshot", - "rollout", "rumble", "sax", "secret", "sexyback", - "story", "tada", "tmyk", "trololo", "trombone", "unix", - "vuvuzela", "what", "whoomp", "yeah", "yodel"] + choices: + - 56k + - bell + - bezos + - bueller + - clowntown + - cottoneyejoe + - crickets + - dadgummit + - dangerzone + - danielsan + - deeper + - drama + - greatjob + - greyjoy + - guarantee + - heygirl + - horn + - horror + - inconceivable + - live + - loggins + - makeitso + - noooo + - nyan + - ohmy + - ohyeah + - pushit + - rimshot + - rollout + - rumble + - sax + - secret + - sexyback + - story + - tada + - tmyk + - trololo + - trombone + - unix + - vuvuzela + - what + - whoomp + - yeah + - yodel # informational: requirements for nodes -requirements: [ ] +requirements: [] author: "Adam Garside (@fabulops)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Send a message to Campfire community.general.campfire: subscription: foo @@ -71,7 +112,7 @@ EXAMPLES = ''' room: 123 notify: loggins msg: Task completed ... with feeling. -''' +""" try: from html import escape as html_escape @@ -94,8 +135,7 @@ def main(): token=dict(required=True, no_log=True), room=dict(required=True), msg=dict(required=True), - notify=dict(required=False, - choices=["56k", "bell", "bezos", "bueller", + notify=dict(choices=["56k", "bell", "bezos", "bueller", "clowntown", "cottoneyejoe", "crickets", "dadgummit", "dangerzone", "danielsan", "deeper", "drama", diff --git a/plugins/modules/system/capabilities.py b/plugins/modules/capabilities.py similarity index 72% rename from plugins/modules/system/capabilities.py rename to plugins/modules/capabilities.py index ac6dde6761..64df086d67 100644 --- a/plugins/modules/system/capabilities.py +++ b/plugins/modules/capabilities.py @@ -1,47 +1,52 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2014, Nate Coraor -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Nate Coraor +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: capabilities short_description: Manage Linux capabilities description: - - This module manipulates files privileges using the Linux capabilities(7) system. + - This module manipulates files privileges using the Linux capabilities(7) system. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - path: - description: - - Specifies the path to the file to be managed. - type: str - required: yes - aliases: [ key ] - capability: - description: - - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent)) - type: str - required: yes - aliases: [ cap ] - state: - description: - - Whether the entry should be present or absent in the file's capabilities. - type: str - choices: [ absent, present ] - default: present + path: + description: + - Specifies the path to the file to be managed. + type: str + required: true + aliases: [key] + capability: + description: + - Desired capability to set (with operator and flags, if O(state=present)) or remove (if O(state=absent)). + type: str + required: true + aliases: [cap] + state: + description: + - Whether the entry should be present or absent in the file's capabilities. + type: str + choices: [absent, present] + default: present notes: - - The capabilities system will automatically transform operators and flags into the effective set, - so for example, C(cap_foo=ep) will probably become C(cap_foo+ep). - - This module does not attempt to determine the final operator and flags to compare, - so you will want to ensure that your capabilities argument matches the final capabilities. + - The capabilities system automatically transforms operators and flags into the effective set, so for example, C(cap_foo=ep) + probably becomes C(cap_foo+ep). + - This module does not attempt to determine the final operator and flags to compare, so you want to ensure that your capabilities + argument matches the final capabilities. author: -- Nate Coraor (@natefoo) -''' + - Nate Coraor (@natefoo) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set cap_sys_chroot+ep on /foo community.general.capabilities: path: /foo @@ -53,7 +58,7 @@ EXAMPLES = r''' path: /bar capability: cap_net_bind_service state: absent -''' +""" from ansible.module_utils.basic import AnsibleModule @@ -85,8 +90,8 @@ class CapabilitiesModule(object): if self.module.check_mode: self.module.exit_json(changed=True, msg='capabilities changed') else: - # remove from current cap list if it's already set (but op/flags differ) - current = list(filter(lambda x: x[0] != self.capability_tup[0], current)) + # remove from current cap list if it is already set (but op/flags differ) + current = [x for x in current if x[0] != self.capability_tup[0]] # add new cap with correct op/flags current.append(self.capability_tup) self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) @@ -96,13 +101,13 @@ class CapabilitiesModule(object): self.module.exit_json(changed=True, msg='capabilities changed') else: # remove from current cap list and then set current list - current = filter(lambda x: x[0] != self.capability_tup[0], current) + current = [x for x in current if x[0] != self.capability_tup[0]] self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) self.module.exit_json(changed=False, state=self.state) def getcap(self, path): rval = [] - cmd = "%s -v %s" % (self.getcap_cmd, path) + cmd = [self.getcap_cmd, "-v", path] rc, stdout, stderr = self.module.run_command(cmd) # If file xattrs are set but no caps are set the output will be: # '/foo =' @@ -116,6 +121,8 @@ class CapabilitiesModule(object): if ' =' in stdout: # process output of an older version of libcap caps = stdout.split(' =')[1].strip().split() + elif stdout.strip().endswith(")"): # '/foo (Error Message)' + self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr) else: # otherwise, we have a newer version here # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git @@ -135,7 +142,7 @@ class CapabilitiesModule(object): def setcap(self, path, caps): caps = ' '.join([''.join(cap) for cap in caps]) - cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path) + cmd = [self.setcap_cmd, caps, path] rc, stdout, stderr = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr) diff --git a/plugins/modules/packaging/language/cargo.py b/plugins/modules/cargo.py similarity index 53% rename from plugins/modules/packaging/language/cargo.py rename to plugins/modules/cargo.py index d449f1020e..3ec0012ca0 100644 --- a/plugins/modules/packaging/language/cargo.py +++ b/plugins/modules/cargo.py @@ -1,22 +1,32 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021 Radek Sprta -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type +# Copyright (c) 2024 Colin Nolan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations DOCUMENTATION = r""" ---- module: cargo short_description: Manage Rust packages with cargo version_added: 4.3.0 description: - Manage Rust packages with cargo. author: "Radek Sprta (@radek-sprta)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: + executable: + description: + - Path to the C(cargo) installed in the system. + - If not specified, the module looks for C(cargo) in E(PATH). + type: path + version_added: 7.5.0 name: description: - The name of a Rust package to install. @@ -24,27 +34,47 @@ options: elements: str required: true path: - description: - -> - The base path where to install the Rust packages. Cargo automatically appends - C(/bin). In other words, C(/usr/local) will become C(/usr/local/bin). + description: The base path where to install the Rust packages. Cargo automatically appends V(/bin). In other words, V(/usr/local) + becomes V(/usr/local/bin). type: path version: - description: - -> - The version to install. If I(name) contains multiple values, the module will - try to install all of them in this version. + description: The version to install. If O(name) contains multiple values, the module tries to install all of them in this + version. type: str required: false + locked: + description: + - Install with locked dependencies. + - This is only used when installing packages. + required: false + type: bool + default: false + version_added: 7.5.0 state: description: - The state of the Rust package. required: false type: str default: present - choices: [ "present", "absent", "latest" ] + choices: ["present", "absent", "latest"] + directory: + description: + - Path to the source directory to install the Rust package from. + - This is only used when installing packages. + type: path + required: false + version_added: 9.1.0 + features: + description: + - List of features to activate. + - This is only used when installing packages. + type: list + elements: str + required: false + default: [] + version_added: 11.0.0 requirements: - - cargo installed in bin path (recommended /usr/local/bin) + - cargo installed """ EXAMPLES = r""" @@ -52,6 +82,11 @@ EXAMPLES = r""" community.general.cargo: name: ludusavi +- name: Install "ludusavi" Rust package with locked dependencies + community.general.cargo: + name: ludusavi + locked: true + - name: Install "ludusavi" Rust package in version 0.10.0 community.general.cargo: name: ludusavi @@ -71,8 +106,20 @@ EXAMPLES = r""" community.general.cargo: name: ludusavi state: latest + +- name: Install "ludusavi" Rust package from source directory + community.general.cargo: + name: ludusavi + directory: /path/to/ludusavi/source + +- name: Install "serpl" Rust package with ast_grep feature + community.general.cargo: + name: serpl + features: + - ast_grep """ +import json import os import re @@ -82,12 +129,14 @@ from ansible.module_utils.basic import AnsibleModule class Cargo(object): def __init__(self, module, **kwargs): self.module = module + self.executable = [kwargs["executable"] or module.get_bin_path("cargo", True)] self.name = kwargs["name"] self.path = kwargs["path"] self.state = kwargs["state"] self.version = kwargs["version"] - - self.executable = [module.get_bin_path("cargo", True)] + self.locked = kwargs["locked"] + self.directory = kwargs["directory"] + self.features = kwargs["features"] @property def path(self): @@ -110,9 +159,13 @@ class Cargo(object): def get_installed(self): cmd = ["install", "--list"] + if self.path: + cmd.append("--root") + cmd.append(self.path) + data, dummy = self._exec(cmd, True, False, False) - package_regex = re.compile(r"^([\w\-]+) v(.+):$") + package_regex = re.compile(r"^([\w\-]+) v(\S+).*:$") installed = {} for line in data.splitlines(): package_info = package_regex.match(line) @@ -124,25 +177,63 @@ class Cargo(object): def install(self, packages=None): cmd = ["install"] cmd.extend(packages or self.name) + if self.locked: + cmd.append("--locked") if self.path: cmd.append("--root") cmd.append(self.path) if self.version: cmd.append("--version") cmd.append(self.version) + if self.directory: + cmd.append("--path") + cmd.append(self.directory) + if self.features: + cmd += ["--features", ",".join(self.features)] return self._exec(cmd) def is_outdated(self, name): installed_version = self.get_installed().get(name) + latest_version = ( + self.get_latest_published_version(name) + if not self.directory + else self.get_source_directory_version(name) + ) + return installed_version != latest_version + def get_latest_published_version(self, name): cmd = ["search", name, "--limit", "1"] data, dummy = self._exec(cmd, True, False, False) match = re.search(r'"(.+)"', data) - if match: - latest_version = match.group(1) + if not match: + self.module.fail_json( + msg="No published version for package %s found" % name + ) + return match.group(1) - return installed_version != latest_version + def get_source_directory_version(self, name): + cmd = [ + "metadata", + "--format-version", + "1", + "--no-deps", + "--manifest-path", + os.path.join(self.directory, "Cargo.toml"), + ] + data, dummy = self._exec(cmd, True, False, False) + manifest = json.loads(data) + + package = next( + (package for package in manifest["packages"] if package["name"] == name), + None, + ) + if not package: + self.module.fail_json( + msg="Package %s not defined in source, found: %s" + % (name, [x["name"] for x in manifest["packages"]]) + ) + return package["version"] def uninstall(self, packages=None): cmd = ["uninstall"] @@ -152,27 +243,34 @@ class Cargo(object): def main(): arg_spec = dict( + executable=dict(type="path"), name=dict(required=True, type="list", elements="str"), - path=dict(default=None, type="path"), + path=dict(type="path"), state=dict(default="present", choices=["present", "absent", "latest"]), - version=dict(default=None, type="str"), + version=dict(type="str"), + locked=dict(default=False, type="bool"), + directory=dict(type="path"), + features=dict(default=[], type="list", elements="str"), ) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) name = module.params["name"] - path = module.params["path"] state = module.params["state"] version = module.params["version"] + directory = module.params["directory"] if not name: module.fail_json(msg="Package name must be specified") + if directory is not None and not os.path.isdir(directory): + module.fail_json(msg="Source directory does not exist") + # Set LANG env since we parse stdout module.run_command_environ_update = dict( LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C" ) - cargo = Cargo(module, name=name, path=path, state=state, version=version) + cargo = Cargo(module, **module.params) changed, out, err = False, None, None installed_packages = cargo.get_installed() if state == "present": diff --git a/plugins/modules/notification/catapult.py b/plugins/modules/catapult.py similarity index 72% rename from plugins/modules/notification/catapult.py rename to plugins/modules/catapult.py index 1383362068..053eb4b51b 100644 --- a/plugins/modules/notification/catapult.py +++ b/plugins/modules/catapult.py @@ -1,32 +1,43 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2016, Jonathan Mainguy -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Jonathan Mainguy +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # # basis of code taken from the ansible twillio and nexmo modules -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: catapult -short_description: Send a sms / mms using the catapult bandwidth api +short_description: Send a sms / mms using the catapult bandwidth API description: - - Allows notifications to be sent using sms / mms via the catapult bandwidth api. + - Allows notifications to be sent using SMS / MMS using the catapult bandwidth API. +deprecated: + removed_in: 13.0.0 + why: >- + DNS fails to resolve the API endpoint used by the module since Oct 2024. + See L(the associated issue, https://github.com/ansible-collections/community.general/issues/10318) for details. + alternative: There is none. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: src: type: str description: - - One of your catapult telephone numbers the message should come from (must be in E.164 format, like C(+19195551212)). + - One of your catapult telephone numbers the message should come from (must be in E.164 format, like V(+19195551212)). required: true dest: type: list elements: str description: - - The phone number or numbers the message should be sent to (must be in E.164 format, like C(+19195551212)). + - The phone number or numbers the message should be sent to (must be in E.164 format, like V(+19195551212)). required: true msg: type: str @@ -36,31 +47,30 @@ options: media: type: str description: - - For MMS messages, a media url to the location of the media to be sent with the message. + - For MMS messages, a media URL to the location of the media to be sent with the message. user_id: type: str description: - - User Id from Api account page. + - User ID from API account page. required: true api_token: type: str description: - - Api Token from Api account page. + - API Token from API account page. required: true api_secret: type: str description: - - Api Secret from Api account page. + - API Secret from API account page. required: true author: "Jonathan Mainguy (@Jmainguy)" notes: - - Will return changed even if the media url is wrong. - - Will return changed if the destination number is invalid. + - Will return changed even if the media URL is wrong. + - Will return changed if the destination number is invalid. +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Send a mms to multiple users community.general.catapult: src: "+15035555555" @@ -81,16 +91,7 @@ EXAMPLES = ''' user_id: "{{ user_id }}" api_token: "{{ api_token }}" api_secret: "{{ api_secret }}" - -''' - -RETURN = ''' -changed: - description: Whether the api accepted the message. - returned: always - type: bool - sample: True -''' +""" import json @@ -127,7 +128,7 @@ def main(): user_id=dict(required=True), api_token=dict(required=True, no_log=True), api_secret=dict(required=True, no_log=True), - media=dict(default=None, required=False), + media=dict(), ), ) diff --git a/plugins/modules/monitoring/circonus_annotation.py b/plugins/modules/circonus_annotation.py similarity index 56% rename from plugins/modules/monitoring/circonus_annotation.py rename to plugins/modules/circonus_annotation.py index 40c7297dd7..4d00b6fb98 100644 --- a/plugins/modules/monitoring/circonus_annotation.py +++ b/plugins/modules/circonus_annotation.py @@ -1,62 +1,65 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# (c) 2014-2015, Epic Games, Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014-2015, Epic Games, Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: circonus_annotation -short_description: create an annotation in circonus +short_description: Create an annotation in Circonus description: - - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided + - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided. author: "Nick Harring (@NickatEpic)" requirements: - - requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2) -notes: - - Check mode isn't supported. + - requests >= 2.0.0 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - api_key: - type: str - description: - - Circonus API key - required: true - category: - type: str - description: - - Annotation Category - required: true + api_key: + type: str description: - type: str - description: - - Description of annotation - required: true - title: - type: str - description: - - Title of annotation - required: true - start: - type: int - description: - - Unix timestamp of event start - - If not specified, it defaults to I(now). - stop: - type: int - description: - - Unix timestamp of event end - - If not specified, it defaults to I(now) + I(duration). - duration: - type: int - description: - - Duration in seconds of annotation - default: 0 -''' -EXAMPLES = ''' + - Circonus API key. + required: true + category: + type: str + description: + - Annotation Category. + required: true + description: + type: str + description: + - Description of annotation. + required: true + title: + type: str + description: + - Title of annotation. + required: true + start: + type: int + description: + - Unix timestamp of event start. + - If not specified, it defaults to "now". + stop: + type: int + description: + - Unix timestamp of event end. + - If not specified, it defaults to "now" + O(duration). + duration: + type: int + description: + - Duration in seconds of annotation. + default: 0 +""" +EXAMPLES = r""" - name: Create a simple annotation event with a source, defaults to start and end time of now community.general.circonus_annotation: api_key: XXXXXXXXXXXXXXXXX @@ -80,66 +83,67 @@ EXAMPLES = ''' category: This category groups like annotations start_time: 1395940006 end_time: 1395954407 -''' +""" -RETURN = ''' +RETURN = r""" annotation: - description: details about the created annotation - returned: success - type: complex - contains: - _cid: - description: annotation identifier - returned: success - type: str - sample: /annotation/100000 - _created: - description: creation timestamp - returned: success - type: int - sample: 1502236928 - _last_modified: - description: last modification timestamp - returned: success - type: int - sample: 1502236928 - _last_modified_by: - description: last modified by - returned: success - type: str - sample: /user/1000 - category: - description: category of the created annotation - returned: success - type: str - sample: alerts - title: - description: title of the created annotation - returned: success - type: str - sample: WARNING - description: - description: description of the created annotation - returned: success - type: str - sample: Host is down. - start: - description: timestamp, since annotation applies - returned: success - type: int - sample: Host is down. - stop: - description: timestamp, since annotation ends - returned: success - type: str - sample: Host is down. - rel_metrics: - description: Array of metrics related to this annotation, each metrics is a string. - returned: success - type: list - sample: - - 54321_kbps -''' + description: Details about the created annotation. + returned: success + type: complex + contains: + _cid: + description: Annotation identifier. + returned: success + type: str + sample: /annotation/100000 + _created: + description: Creation timestamp. + returned: success + type: int + sample: 1502236928 + _last_modified: + description: Last modification timestamp. + returned: success + type: int + sample: 1502236928 + _last_modified_by: + description: Last modified by. + returned: success + type: str + sample: /user/1000 + category: + description: Category of the created annotation. + returned: success + type: str + sample: alerts + title: + description: Title of the created annotation. + returned: success + type: str + sample: WARNING + description: + description: Description of the created annotation. + returned: success + type: str + sample: Host is down. + start: + description: Timestamp, since annotation applies. + returned: success + type: int + sample: Host is down. + stop: + description: Timestamp, since annotation ends. + returned: success + type: str + sample: Host is down. + rel_metrics: + description: Array of metrics related to this annotation, each metrics is a string. + returned: success + type: list + sample: + - 54321_kbps +""" + import json import time import traceback @@ -155,7 +159,6 @@ except ImportError: HAS_REQUESTS = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six import PY3 from ansible.module_utils.common.text.converters import to_native @@ -164,7 +167,7 @@ def check_requests_dep(module): if not HAS_REQUESTS: module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) else: - required_version = '2.0.0' if PY3 else '1.0.0' + required_version = '2.0.0' if LooseVersion(requests.__version__) < LooseVersion(required_version): module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__)) diff --git a/plugins/modules/notification/cisco_webex.py b/plugins/modules/cisco_webex.py similarity index 77% rename from plugins/modules/notification/cisco_webex.py rename to plugins/modules/cisco_webex.py index 8c1361fb14..bd9c148b53 100644 --- a/plugins/modules/notification/cisco_webex.py +++ b/plugins/modules/cisco_webex.py @@ -1,43 +1,48 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: cisco_webex short_description: Send a message to a Cisco Webex Teams Room or Individual description: - - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting. + - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting. author: Drew Rusell (@drew-russell) notes: - - The C(recipient_id) type must be valid for the supplied C(recipient_id). + - The O(recipient_type) must be valid for the supplied O(recipient_id). - Full API documentation can be found at U(https://developer.webex.com/docs/api/basics). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: recipient_type: description: - - The request parameter you would like to send the message to. - - Messages can be sent to either a room or individual (by ID or E-Mail). - required: yes + - The request parameter you would like to send the message to. + - Messages can be sent to either a room or individual (by ID or E-Mail). + required: true choices: ['roomId', 'toPersonEmail', 'toPersonId'] type: str recipient_id: description: - - The unique identifier associated with the supplied C(recipient_type). - required: yes + - The unique identifier associated with the supplied O(recipient_type). + required: true type: str msg_type: description: - - Specifies how you would like the message formatted. + - Specifies how you would like the message formatted. default: text choices: ['text', 'markdown'] type: str @@ -46,18 +51,18 @@ options: personal_token: description: - Your personal access token required to validate the Webex Teams API. - required: yes + required: true aliases: ['token'] type: str msg: description: - The message you would like to send. - required: yes + required: true type: str -''' +""" -EXAMPLES = """ +EXAMPLES = r""" # Note: The following examples assume a variable file has been imported # that contains the appropriate information. @@ -92,10 +97,9 @@ EXAMPLES = """ msg_type: text personal_token: "{{ token }}" msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail" - """ -RETURN = """ +RETURN = r""" status_code: description: - The Response Code returned by the Webex Teams API. @@ -105,12 +109,12 @@ status_code: sample: 200 message: - description: - - The Response Message returned by the Webex Teams API. - - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics). - returned: always - type: str - sample: OK (585 bytes) + description: + - The Response Message returned by the Webex Teams API. + - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics). + returned: always + type: str + sample: OK (585 bytes) """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url @@ -171,7 +175,7 @@ def main(): argument_spec=dict( recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']), recipient_id=dict(required=True, no_log=True), - msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']), + msg_type=dict(default='text', aliases=['message_type'], choices=['text', 'markdown']), personal_token=dict(required=True, no_log=True, aliases=['token']), msg=dict(required=True), ), diff --git a/plugins/modules/cloud/alicloud/ali_instance_info.py b/plugins/modules/cloud/alicloud/ali_instance_info.py deleted file mode 100644 index 2331db69a6..0000000000 --- a/plugins/modules/cloud/alicloud/ali_instance_info.py +++ /dev/null @@ -1,407 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see http://www.gnu.org/licenses/. - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ali_instance_info -short_description: Gather information on instances of Alibaba Cloud ECS. -description: - - This module fetches data from the Open API in Alicloud. - The module must be called from within the ECS instance itself. - - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change. - -options: - name_prefix: - description: - - Use a instance name prefix to filter ecs instances. - type: str - version_added: '0.2.0' - tags: - description: - - A hash/dictionaries of instance tags. C({"key":"value"}) - aliases: ["instance_tags"] - type: dict - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be - all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details. - Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to - connect different words in one parameter. 'InstanceIds' should be a list. - 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead. - type: dict - version_added: '0.2.0' -author: - - "He Guimin (@xiaozhu36)" -requirements: - - "python >= 3.6" - - "footmark >= 1.13.0" -extends_documentation_fragment: - - community.general.alicloud -''' - -EXAMPLES = ''' -# Fetch instances details according to setting different filters - -- name: Find all instances in the specified region - community.general.ali_instance_info: - register: all_instances - -- name: Find all instances based on the specified ids - community.general.ali_instance_info: - instance_ids: - - "i-35b333d9" - - "i-ddav43kd" - register: instances_by_ids - -- name: Find all instances based on the specified name_prefix - community.general.ali_instance_info: - name_prefix: "ecs_instance_" - register: instances_by_name_prefix - -- name: Find instances based on tags - community.general.ali_instance_info: - tags: - Test: "add" -''' - -RETURN = ''' -instances: - description: List of ECS instances - returned: always - type: complex - contains: - availability_zone: - description: The availability zone of the instance is in. - returned: always - type: str - sample: cn-beijing-a - block_device_mappings: - description: Any block device mapping entries for the instance. - returned: always - type: complex - contains: - device_name: - description: The device name exposed to the instance (for example, /dev/xvda). - returned: always - type: str - sample: /dev/xvda - attach_time: - description: The time stamp when the attachment initiated. - returned: always - type: str - sample: "2018-06-25T04:08:26Z" - delete_on_termination: - description: Indicates whether the volume is deleted on instance termination. - returned: always - type: bool - sample: true - status: - description: The attachment state. - returned: always - type: str - sample: in_use - volume_id: - description: The ID of the cloud disk. - returned: always - type: str - sample: d-2zei53pjsi117y6gf9t6 - cpu: - description: The CPU core count of the instance. - returned: always - type: int - sample: 4 - creation_time: - description: The time the instance was created. - returned: always - type: str - sample: "2018-06-25T04:08Z" - description: - description: The instance description. - returned: always - type: str - sample: "my ansible instance" - eip: - description: The attribution of EIP associated with the instance. - returned: always - type: complex - contains: - allocation_id: - description: The ID of the EIP. - returned: always - type: str - sample: eip-12345 - internet_charge_type: - description: The internet charge type of the EIP. - returned: always - type: str - sample: "paybybandwidth" - ip_address: - description: EIP address. - returned: always - type: str - sample: 42.10.2.2 - expired_time: - description: The time the instance will expire. - returned: always - type: str - sample: "2099-12-31T15:59Z" - gpu: - description: The attribution of instance GPU. - returned: always - type: complex - contains: - amount: - description: The count of the GPU. - returned: always - type: int - sample: 0 - spec: - description: The specification of the GPU. - returned: always - type: str - sample: "" - host_name: - description: The host name of the instance. - returned: always - type: str - sample: iZ2zewaoZ - id: - description: Alias of instance_id. - returned: always - type: str - sample: i-abc12345 - instance_id: - description: ECS instance resource ID. - returned: always - type: str - sample: i-abc12345 - image_id: - description: The ID of the image used to launch the instance. - returned: always - type: str - sample: m-0011223344 - inner_ip_address: - description: The inner IPv4 address of the classic instance. - returned: always - type: str - sample: 10.0.0.2 - instance_charge_type: - description: The instance charge type. - returned: always - type: str - sample: PostPaid - instance_name: - description: The name of the instance. - returned: always - type: str - sample: my-ecs - instance_type_family: - description: The instance type family of the instance belongs. - returned: always - type: str - sample: ecs.sn1ne - instance_type: - description: The instance type of the running instance. - returned: always - type: str - sample: ecs.sn1ne.xlarge - internet_charge_type: - description: The billing method of the network bandwidth. - returned: always - type: str - sample: PayByBandwidth - internet_max_bandwidth_in: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 200 - internet_max_bandwidth_out: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 20 - io_optimized: - description: Indicates whether the instance is optimized for EBS I/O. - returned: always - type: bool - sample: false - memory: - description: Memory size of the instance. - returned: always - type: int - sample: 8192 - network_interfaces: - description: One or more network interfaces for the instance. - returned: always - type: complex - contains: - mac_address: - description: The MAC address. - returned: always - type: str - sample: "00:11:22:33:44:55" - network_interface_id: - description: The ID of the network interface. - returned: always - type: str - sample: eni-01234567 - primary_ip_address: - description: The primary IPv4 address of the network interface within the vswitch. - returned: always - type: str - sample: 10.0.0.1 - osname: - description: The operation system name of the instance owned. - returned: always - type: str - sample: CentOS - ostype: - description: The operation system type of the instance owned. - returned: always - type: str - sample: linux - private_ip_address: - description: The IPv4 address of the network interface within the subnet. - returned: always - type: str - sample: 10.0.0.1 - public_ip_address: - description: The public IPv4 address assigned to the instance or eip address - returned: always - type: str - sample: 43.0.0.1 - resource_group_id: - description: The id of the resource group to which the instance belongs. - returned: always - type: str - sample: my-ecs-group - security_groups: - description: One or more security groups for the instance. - returned: always - type: list - elements: dict - contains: - group_id: - description: The ID of the security group. - returned: always - type: str - sample: sg-0123456 - group_name: - description: The name of the security group. - returned: always - type: str - sample: my-security-group - status: - description: The current status of the instance. - returned: always - type: str - sample: running - tags: - description: Any tags assigned to the instance. - returned: always - type: dict - sample: - vswitch_id: - description: The ID of the vswitch in which the instance is running. - returned: always - type: str - sample: vsw-dew00abcdef - vpc_id: - description: The ID of the VPC the instance is in. - returned: always - type: str - sample: vpc-0011223344 -ids: - description: List of ECS instance IDs - returned: always - type: list - sample: [i-12345er, i-3245fs] -''' - -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect - -HAS_FOOTMARK = False -FOOTMARK_IMP_ERR = None -try: - from footmark.exception import ECSResponseError - HAS_FOOTMARK = True -except ImportError: - FOOTMARK_IMP_ERR = traceback.format_exc() - HAS_FOOTMARK = False - - -def main(): - argument_spec = ecs_argument_spec() - argument_spec.update(dict( - name_prefix=dict(type='str'), - tags=dict(type='dict', aliases=['instance_tags']), - filters=dict(type='dict') - ) - ) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - if HAS_FOOTMARK is False: - module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) - - ecs = ecs_connect(module) - - instances = [] - instance_ids = [] - ids = [] - name_prefix = module.params['name_prefix'] - - filters = module.params['filters'] - if not filters: - filters = {} - for key, value in list(filters.items()): - if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list): - for id in value: - if id not in ids: - ids.append(value) - if ids: - filters['instance_ids'] = ids - if module.params['tags']: - filters['tags'] = module.params['tags'] - - for inst in ecs.describe_instances(**filters): - if name_prefix: - if not str(inst.instance_name).startswith(name_prefix): - continue - volumes = ecs.describe_disks(instance_id=inst.id) - setattr(inst, 'block_device_mappings', volumes) - setattr(inst, 'user_data', inst.describe_user_data()) - instances.append(inst.read()) - instance_ids.append(inst.id) - - module.exit_json(changed=False, ids=instance_ids, instances=instances) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_aa_policy.py b/plugins/modules/cloud/centurylink/clc_aa_policy.py deleted file mode 100644 index 416a4a6c1f..0000000000 --- a/plugins/modules/cloud/centurylink/clc_aa_policy.py +++ /dev/null @@ -1,345 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_aa_policy -short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud. -description: - - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud. -options: - name: - description: - - The name of the Anti Affinity Policy. - type: str - required: True - location: - description: - - Datacenter in which the policy lives/should live. - type: str - required: True - state: - description: - - Whether to create or delete the policy. - type: str - required: False - default: present - choices: ['present','absent'] -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - ---- -- name: Create AA Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create an Anti Affinity Policy - community.general.clc_aa_policy: - name: Hammer Time - location: UK3 - state: present - register: policy - - - name: Debug - ansible.builtin.debug: - var: policy - -- name: Delete AA Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete an Anti Affinity Policy - community.general.clc_aa_policy: - name: Hammer Time - location: UK3 - state: absent - register: policy - - - name: Debug - ansible.builtin.debug: - var: policy -''' - -RETURN = ''' -policy: - description: The anti affinity policy information - returned: success - type: dict - sample: - { - "id":"1a28dd0988984d87b9cd61fa8da15424", - "name":"test_aa_policy", - "location":"UC1", - "links":[ - { - "rel":"self", - "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424", - "verbs":[ - "GET", - "DELETE", - "PUT" - ] - }, - { - "rel":"location", - "href":"/v2/datacenters/wfad/UC1", - "id":"uc1", - "name":"UC1 - US West (Santa Clara)" - } - ] - } -''' - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk: -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcAntiAffinityPolicy: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - self.policy_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), - exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), - exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - location=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - ) - return argument_spec - - # Module Behavior Goodness - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - - self._set_clc_credentials_from_env() - self.policy_dict = self._get_policies_for_datacenter(p) - - if p['state'] == "absent": - changed, policy = self._ensure_policy_is_absent(p) - else: - changed, policy = self._ensure_policy_is_present(p) - - if hasattr(policy, 'data'): - policy = policy.data - elif hasattr(policy, '__dict__'): - policy = policy.__dict__ - - self.module.exit_json(changed=changed, policy=policy) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_policies_for_datacenter(self, p): - """ - Get the Policies for a datacenter by calling the CLC API. - :param p: datacenter to get policies from - :return: policies in the datacenter - """ - response = {} - - policies = self.clc.v2.AntiAffinity.GetAll(location=p['location']) - - for policy in policies: - response[policy.name] = policy - return response - - def _create_policy(self, p): - """ - Create an Anti Affinity Policy using the CLC API. - :param p: datacenter to create policy in - :return: response dictionary from the CLC API. - """ - try: - return self.clc.v2.AntiAffinity.Create( - name=p['name'], - location=p['location']) - except CLCException as ex: - self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format( - p['name'], ex.response_text - )) - - def _delete_policy(self, p): - """ - Delete an Anti Affinity Policy using the CLC API. - :param p: datacenter to delete a policy from - :return: none - """ - try: - policy = self.policy_dict[p['name']] - policy.Delete() - except CLCException as ex: - self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format( - p['name'], ex.response_text - )) - - def _policy_exists(self, policy_name): - """ - Check to see if an Anti Affinity Policy exists - :param policy_name: name of the policy - :return: boolean of if the policy exists - """ - if policy_name in self.policy_dict: - return self.policy_dict.get(policy_name) - - return False - - def _ensure_policy_is_absent(self, p): - """ - Makes sure that a policy is absent - :param p: dictionary of policy name - :return: tuple of if a deletion occurred and the name of the policy that was deleted - """ - changed = False - if self._policy_exists(policy_name=p['name']): - changed = True - if not self.module.check_mode: - self._delete_policy(p) - return changed, None - - def _ensure_policy_is_present(self, p): - """ - Ensures that a policy is present - :param p: dictionary of a policy name - :return: tuple of if an addition occurred and the name of the policy that was added - """ - changed = False - policy = self._policy_exists(policy_name=p['name']) - if not policy: - changed = True - policy = None - if not self.module.check_mode: - policy = self._create_policy(p) - return changed, policy - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(), - supports_check_mode=True) - clc_aa_policy = ClcAntiAffinityPolicy(module) - clc_aa_policy.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_alert_policy.py b/plugins/modules/cloud/centurylink/clc_alert_policy.py deleted file mode 100644 index 424e73cce2..0000000000 --- a/plugins/modules/cloud/centurylink/clc_alert_policy.py +++ /dev/null @@ -1,528 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_alert_policy -short_description: Create or Delete Alert Policies at CenturyLink Cloud. -description: - - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud. -options: - alias: - description: - - The alias of your CLC Account - type: str - required: True - name: - description: - - The name of the alert policy. This is mutually exclusive with id - type: str - id: - description: - - The alert policy id. This is mutually exclusive with name - type: str - alert_recipients: - description: - - A list of recipient email ids to notify the alert. - This is required for state 'present' - type: list - elements: str - metric: - description: - - The metric on which to measure the condition that will trigger the alert. - This is required for state 'present' - type: str - choices: ['cpu','memory','disk'] - duration: - description: - - The length of time in minutes that the condition must exceed the threshold. - This is required for state 'present' - type: str - threshold: - description: - - The threshold that will trigger the alert when the metric equals or exceeds it. - This is required for state 'present' - This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0 - type: int - state: - description: - - Whether to create or delete the policy. - type: str - default: present - choices: ['present','absent'] -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - ---- -- name: Create Alert Policy Example - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create an Alert Policy for disk above 80% for 5 minutes - community.general.clc_alert_policy: - alias: wfad - name: 'alert for disk > 80%' - alert_recipients: - - test1@centurylink.com - - test2@centurylink.com - metric: 'disk' - duration: '00:05:00' - threshold: 80 - state: present - register: policy - - - name: Debug - ansible.builtin.debug: var=policy - -- name: Delete Alert Policy Example - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete an Alert Policy - community.general.clc_alert_policy: - alias: wfad - name: 'alert for disk > 80%' - state: absent - register: policy - - - name: Debug - ansible.builtin.debug: var=policy -''' - -RETURN = ''' -policy: - description: The alert policy information - returned: success - type: dict - sample: - { - "actions": [ - { - "action": "email", - "settings": { - "recipients": [ - "user1@domain.com", - "user1@domain.com" - ] - } - } - ], - "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7", - "links": [ - { - "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7", - "rel": "self", - "verbs": [ - "GET", - "DELETE", - "PUT" - ] - } - ], - "name": "test_alert", - "triggers": [ - { - "duration": "00:05:00", - "metric": "disk", - "threshold": 80.0 - } - ] - } -''' - -__version__ = '${version}' - -import json -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcAlertPolicy: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - self.policy_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(), - id=dict(), - alias=dict(required=True), - alert_recipients=dict(type='list', elements='str'), - metric=dict( - choices=[ - 'cpu', - 'memory', - 'disk']), - duration=dict(type='str'), - threshold=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']) - ) - mutually_exclusive = [ - ['name', 'id'] - ] - return {'argument_spec': argument_spec, - 'mutually_exclusive': mutually_exclusive} - - # Module Behavior Goodness - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - - self._set_clc_credentials_from_env() - self.policy_dict = self._get_alert_policies(p['alias']) - - if p['state'] == 'present': - changed, policy = self._ensure_alert_policy_is_present() - else: - changed, policy = self._ensure_alert_policy_is_absent() - - self.module.exit_json(changed=changed, policy=policy) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_alert_policy_is_present(self): - """ - Ensures that the alert policy is present - :return: (changed, policy) - changed: A flag representing if anything is modified - policy: the created/updated alert policy - """ - changed = False - p = self.module.params - policy_name = p.get('name') - - if not policy_name: - self.module.fail_json(msg='Policy name is a required') - policy = self._alert_policy_exists(policy_name) - if not policy: - changed = True - policy = None - if not self.module.check_mode: - policy = self._create_alert_policy() - else: - changed_u, policy = self._ensure_alert_policy_is_updated(policy) - if changed_u: - changed = True - return changed, policy - - def _ensure_alert_policy_is_absent(self): - """ - Ensures that the alert policy is absent - :return: (changed, None) - changed: A flag representing if anything is modified - """ - changed = False - p = self.module.params - alert_policy_id = p.get('id') - alert_policy_name = p.get('name') - alias = p.get('alias') - if not alert_policy_id and not alert_policy_name: - self.module.fail_json( - msg='Either alert policy id or policy name is required') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id( - self.module, - alert_policy_name) - if alert_policy_id and alert_policy_id in self.policy_dict: - changed = True - if not self.module.check_mode: - self._delete_alert_policy(alias, alert_policy_id) - return changed, None - - def _ensure_alert_policy_is_updated(self, alert_policy): - """ - Ensures the alert policy is updated if anything is changed in the alert policy configuration - :param alert_policy: the target alert policy - :return: (changed, policy) - changed: A flag representing if anything is modified - policy: the updated the alert policy - """ - changed = False - p = self.module.params - alert_policy_id = alert_policy.get('id') - email_list = p.get('alert_recipients') - metric = p.get('metric') - duration = p.get('duration') - threshold = p.get('threshold') - policy = alert_policy - if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \ - (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \ - (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))): - changed = True - elif email_list: - t_email_list = list( - alert_policy.get('actions')[0].get('settings').get('recipients')) - if set(email_list) != set(t_email_list): - changed = True - if changed and not self.module.check_mode: - policy = self._update_alert_policy(alert_policy_id) - return changed, policy - - def _get_alert_policies(self, alias): - """ - Get the alert policies for account alias by calling the CLC API. - :param alias: the account alias - :return: the alert policies for the account alias - """ - response = {} - - policies = self.clc.v2.API.Call('GET', - '/v2/alertPolicies/%s' - % alias) - - for policy in policies.get('items'): - response[policy.get('id')] = policy - return response - - def _create_alert_policy(self): - """ - Create an alert Policy using the CLC API. - :return: response dictionary from the CLC API. - """ - p = self.module.params - alias = p['alias'] - email_list = p['alert_recipients'] - metric = p['metric'] - duration = p['duration'] - threshold = p['threshold'] - policy_name = p['name'] - arguments = json.dumps( - { - 'name': policy_name, - 'actions': [{ - 'action': 'email', - 'settings': { - 'recipients': email_list - } - }], - 'triggers': [{ - 'metric': metric, - 'duration': duration, - 'threshold': threshold - }] - } - ) - try: - result = self.clc.v2.API.Call( - 'POST', - '/v2/alertPolicies/%s' % alias, - arguments) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to create alert policy "{0}". {1}'.format( - policy_name, str(e.response_text))) - return result - - def _update_alert_policy(self, alert_policy_id): - """ - Update alert policy using the CLC API. - :param alert_policy_id: The clc alert policy id - :return: response dictionary from the CLC API. - """ - p = self.module.params - alias = p['alias'] - email_list = p['alert_recipients'] - metric = p['metric'] - duration = p['duration'] - threshold = p['threshold'] - policy_name = p['name'] - arguments = json.dumps( - { - 'name': policy_name, - 'actions': [{ - 'action': 'email', - 'settings': { - 'recipients': email_list - } - }], - 'triggers': [{ - 'metric': metric, - 'duration': duration, - 'threshold': threshold - }] - } - ) - try: - result = self.clc.v2.API.Call( - 'PUT', '/v2/alertPolicies/%s/%s' % - (alias, alert_policy_id), arguments) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to update alert policy "{0}". {1}'.format( - policy_name, str(e.response_text))) - return result - - def _delete_alert_policy(self, alias, policy_id): - """ - Delete an alert policy using the CLC API. - :param alias : the account alias - :param policy_id: the alert policy id - :return: response dictionary from the CLC API. - """ - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/alertPolicies/%s/%s' % - (alias, policy_id), None) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to delete alert policy id "{0}". {1}'.format( - policy_id, str(e.response_text))) - return result - - def _alert_policy_exists(self, policy_name): - """ - Check to see if an alert policy exists - :param policy_name: name of the alert policy - :return: boolean of if the policy exists - """ - result = False - for policy_id in self.policy_dict: - if self.policy_dict.get(policy_id).get('name') == policy_name: - result = self.policy_dict.get(policy_id) - return result - - def _get_alert_policy_id(self, module, alert_policy_name): - """ - retrieves the alert policy id of the account based on the name of the policy - :param module: the AnsibleModule object - :param alert_policy_name: the alert policy name - :return: alert_policy_id: The alert policy id - """ - alert_policy_id = None - for policy_id in self.policy_dict: - if self.policy_dict.get(policy_id).get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = policy_id - else: - return module.fail_json( - msg='multiple alert policies were found with policy name : %s' % alert_policy_name) - return alert_policy_id - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - argument_dict = ClcAlertPolicy._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_alert_policy = ClcAlertPolicy(module) - clc_alert_policy.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_blueprint_package.py b/plugins/modules/cloud/centurylink/clc_blueprint_package.py deleted file mode 100644 index 9e0bfa809c..0000000000 --- a/plugins/modules/cloud/centurylink/clc_blueprint_package.py +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_blueprint_package -short_description: deploys a blue print package on a set of servers in CenturyLink Cloud. -description: - - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud. -options: - server_ids: - description: - - A list of server Ids to deploy the blue print package. - type: list - required: True - elements: str - package_id: - description: - - The package id of the blue print. - type: str - required: True - package_params: - description: - - The dictionary of arguments required to deploy the blue print. - type: dict - default: {} - required: False - state: - description: - - Whether to install or uninstall the package. Currently it supports only "present" for install action. - type: str - required: False - default: present - choices: ['present'] - wait: - description: - - Whether to wait for the tasks to finish before returning. - type: str - default: 'True' - required: False -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Deploy package - community.general.clc_blueprint_package: - server_ids: - - UC1TEST-SERVER1 - - UC1TEST-SERVER2 - package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a - package_params: {} -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SERVER1", - "UC1TEST-SERVER2" - ] -''' - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcBlueprintPackage: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - changed = False - changed_server_ids = [] - self._set_clc_credentials_from_env() - server_ids = p['server_ids'] - package_id = p['package_id'] - package_params = p['package_params'] - state = p['state'] - if state == 'present': - changed, changed_server_ids, request_list = self.ensure_package_installed( - server_ids, package_id, package_params) - self._wait_for_requests_to_complete(request_list) - self.module.exit_json(changed=changed, server_ids=changed_server_ids) - - @staticmethod - def define_argument_spec(): - """ - This function defines the dictionary object required for - package module - :return: the package dictionary object - """ - argument_spec = dict( - server_ids=dict(type='list', elements='str', required=True), - package_id=dict(required=True), - package_params=dict(type='dict', default={}), - wait=dict(default=True), # @FIXME should be bool? - state=dict(default='present', choices=['present']) - ) - return argument_spec - - def ensure_package_installed(self, server_ids, package_id, package_params): - """ - Ensure the package is installed in the given list of servers - :param server_ids: the server list where the package needs to be installed - :param package_id: the blueprint package id - :param package_params: the package arguments - :return: (changed, server_ids, request_list) - changed: A flag indicating if a change was made - server_ids: The list of servers modified - request_list: The list of request objects from clc-sdk - """ - changed = False - request_list = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to get servers from CLC') - for server in servers: - if not self.module.check_mode: - request = self.clc_install_package( - server, - package_id, - package_params) - request_list.append(request) - changed = True - return changed, server_ids, request_list - - def clc_install_package(self, server, package_id, package_params): - """ - Install the package to a given clc server - :param server: The server object where the package needs to be installed - :param package_id: The blue print package id - :param package_params: the required argument dict for the package installation - :return: The result object from the CLC API call - """ - result = None - try: - result = server.ExecutePackage( - package_id=package_id, - parameters=package_params) - except CLCException as ex: - self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format( - package_id, server.id, ex.message - )) - return result - - def _wait_for_requests_to_complete(self, request_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param request_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in request_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process package install request') - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param server_list: the list of server ids - :param message: the error message to raise if there is any error - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - self.module.fail_json(msg=message + ': %s' % ex) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - Main function - :return: None - """ - module = AnsibleModule( - argument_spec=ClcBlueprintPackage.define_argument_spec(), - supports_check_mode=True - ) - clc_blueprint_package = ClcBlueprintPackage(module) - clc_blueprint_package.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_firewall_policy.py b/plugins/modules/cloud/centurylink/clc_firewall_policy.py deleted file mode 100644 index f1f4a2f22a..0000000000 --- a/plugins/modules/cloud/centurylink/clc_firewall_policy.py +++ /dev/null @@ -1,588 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_firewall_policy -short_description: Create/delete/update firewall policies -description: - - Create or delete or update firewall policies on Centurylink Cloud -options: - location: - description: - - Target datacenter for the firewall policy - type: str - required: True - state: - description: - - Whether to create or delete the firewall policy - type: str - default: present - choices: ['present', 'absent'] - source: - description: - - The list of source addresses for traffic on the originating firewall. - This is required when state is 'present' - type: list - elements: str - destination: - description: - - The list of destination addresses for traffic on the terminating firewall. - This is required when state is 'present' - type: list - elements: str - ports: - description: - - The list of ports associated with the policy. - TCP and UDP can take in single ports or port ranges. - - "Example: C(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])." - type: list - elements: str - firewall_policy_id: - description: - - Id of the firewall policy. This is required to update or delete an existing firewall policy - type: str - source_account_alias: - description: - - CLC alias for the source account - type: str - required: True - destination_account_alias: - description: - - CLC alias for the destination account - type: str - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - type: str - default: 'True' - enabled: - description: - - Whether the firewall policy is enabled or disabled - type: str - choices: ['True', 'False'] - default: 'True' -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' ---- -- name: Create Firewall Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create / Verify an Firewall Policy at CenturyLink Cloud - clc_firewall: - source_account_alias: WFAD - location: VA1 - state: present - source: 10.128.216.0/24 - destination: 10.128.216.0/24 - ports: Any - destination_account_alias: WFAD - -- name: Delete Firewall Policy - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete an Firewall Policy at CenturyLink Cloud - clc_firewall: - source_account_alias: WFAD - location: VA1 - state: absent - firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1 -''' - -RETURN = ''' -firewall_policy_id: - description: The fire wall policy id - returned: success - type: str - sample: fc36f1bfd47242e488a9c44346438c05 -firewall_policy: - description: The fire wall policy information - returned: success - type: dict - sample: - { - "destination":[ - "10.1.1.0/24", - "10.2.2.0/24" - ], - "destinationAccount":"wfad", - "enabled":true, - "id":"fc36f1bfd47242e488a9c44346438c05", - "links":[ - { - "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05", - "rel":"self", - "verbs":[ - "GET", - "PUT", - "DELETE" - ] - } - ], - "ports":[ - "any" - ], - "source":[ - "10.1.1.0/24", - "10.2.2.0/24" - ], - "status":"active" - } -''' - -__version__ = '${version}' - -import os -import traceback -from ansible.module_utils.six.moves.urllib.parse import urlparse -from time import sleep - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcFirewallPolicy: - - clc = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.firewall_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - location=dict(required=True), - source_account_alias=dict(required=True), - destination_account_alias=dict(), - firewall_policy_id=dict(), - ports=dict(type='list', elements='str'), - source=dict(type='list', elements='str'), - destination=dict(type='list', elements='str'), - wait=dict(default=True), # @FIXME type=bool - state=dict(default='present', choices=['present', 'absent']), - enabled=dict(default=True, choices=[True, False]) - ) - return argument_spec - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - changed = False - firewall_policy = None - location = self.module.params.get('location') - source_account_alias = self.module.params.get('source_account_alias') - destination_account_alias = self.module.params.get( - 'destination_account_alias') - firewall_policy_id = self.module.params.get('firewall_policy_id') - ports = self.module.params.get('ports') - source = self.module.params.get('source') - destination = self.module.params.get('destination') - wait = self.module.params.get('wait') - state = self.module.params.get('state') - enabled = self.module.params.get('enabled') - - self.firewall_dict = { - 'location': location, - 'source_account_alias': source_account_alias, - 'destination_account_alias': destination_account_alias, - 'firewall_policy_id': firewall_policy_id, - 'ports': ports, - 'source': source, - 'destination': destination, - 'wait': wait, - 'state': state, - 'enabled': enabled} - - self._set_clc_credentials_from_env() - - if state == 'absent': - changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent( - source_account_alias, location, self.firewall_dict) - - elif state == 'present': - changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present( - source_account_alias, location, self.firewall_dict) - - return self.module.exit_json( - changed=changed, - firewall_policy_id=firewall_policy_id, - firewall_policy=firewall_policy) - - @staticmethod - def _get_policy_id_from_response(response): - """ - Method to parse out the policy id from creation response - :param response: response from firewall creation API call - :return: policy_id: firewall policy id from creation call - """ - url = response.get('links')[0]['href'] - path = urlparse(url).path - path_list = os.path.split(path) - policy_id = path_list[-1] - return policy_id - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_firewall_policy_is_present( - self, - source_account_alias, - location, - firewall_dict): - """ - Ensures that a given firewall policy is present - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: dictionary of request parameters for firewall policy - :return: (changed, firewall_policy_id, firewall_policy) - changed: flag for if a change occurred - firewall_policy_id: the firewall policy id that was created/updated - firewall_policy: The firewall_policy object - """ - firewall_policy = None - firewall_policy_id = firewall_dict.get('firewall_policy_id') - - if firewall_policy_id is None: - if not self.module.check_mode: - response = self._create_firewall_policy( - source_account_alias, - location, - firewall_dict) - firewall_policy_id = self._get_policy_id_from_response( - response) - changed = True - else: - firewall_policy = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - if not firewall_policy: - return self.module.fail_json( - msg='Unable to find the firewall policy id : {0}'.format( - firewall_policy_id)) - changed = self._compare_get_request_with_dict( - firewall_policy, - firewall_dict) - if not self.module.check_mode and changed: - self._update_firewall_policy( - source_account_alias, - location, - firewall_policy_id, - firewall_dict) - if changed and firewall_policy_id: - firewall_policy = self._wait_for_requests_to_complete( - source_account_alias, - location, - firewall_policy_id) - return changed, firewall_policy_id, firewall_policy - - def _ensure_firewall_policy_is_absent( - self, - source_account_alias, - location, - firewall_dict): - """ - Ensures that a given firewall policy is removed if present - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: firewall policy to delete - :return: (changed, firewall_policy_id, response) - changed: flag for if a change occurred - firewall_policy_id: the firewall policy id that was deleted - response: response from CLC API call - """ - changed = False - response = [] - firewall_policy_id = firewall_dict.get('firewall_policy_id') - result = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - if result: - if not self.module.check_mode: - response = self._delete_firewall_policy( - source_account_alias, - location, - firewall_policy_id) - changed = True - return changed, firewall_policy_id, response - - def _create_firewall_policy( - self, - source_account_alias, - location, - firewall_dict): - """ - Creates the firewall policy for the given account alias - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: dictionary of request parameters for firewall policy - :return: response from CLC API call - """ - payload = { - 'destinationAccount': firewall_dict.get('destination_account_alias'), - 'source': firewall_dict.get('source'), - 'destination': firewall_dict.get('destination'), - 'ports': firewall_dict.get('ports')} - try: - response = self.clc.v2.API.Call( - 'POST', '/v2-experimental/firewallPolicies/%s/%s' % - (source_account_alias, location), payload) - except APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to create firewall policy. %s" % - str(e.response_text)) - return response - - def _delete_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id): - """ - Deletes a given firewall policy for an account alias in a datacenter - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: firewall policy id to delete - :return: response: response from CLC API call - """ - try: - response = self.clc.v2.API.Call( - 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, location, firewall_policy_id)) - except APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to delete the firewall policy id : {0}. {1}".format( - firewall_policy_id, str(e.response_text))) - return response - - def _update_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id, - firewall_dict): - """ - Updates a firewall policy for a given datacenter and account alias - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: firewall policy id to update - :param firewall_dict: dictionary of request parameters for firewall policy - :return: response: response from CLC API call - """ - try: - response = self.clc.v2.API.Call( - 'PUT', - '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, - location, - firewall_policy_id), - firewall_dict) - except APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to update the firewall policy id : {0}. {1}".format( - firewall_policy_id, str(e.response_text))) - return response - - @staticmethod - def _compare_get_request_with_dict(response, firewall_dict): - """ - Helper method to compare the json response for getting the firewall policy with the request parameters - :param response: response from the get method - :param firewall_dict: dictionary of request parameters for firewall policy - :return: changed: Boolean that returns true if there are differences between - the response parameters and the playbook parameters - """ - - changed = False - - response_dest_account_alias = response.get('destinationAccount') - response_enabled = response.get('enabled') - response_source = response.get('source') - response_dest = response.get('destination') - response_ports = response.get('ports') - request_dest_account_alias = firewall_dict.get( - 'destination_account_alias') - request_enabled = firewall_dict.get('enabled') - if request_enabled is None: - request_enabled = True - request_source = firewall_dict.get('source') - request_dest = firewall_dict.get('destination') - request_ports = firewall_dict.get('ports') - - if ( - response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or ( - response_enabled != request_enabled) or ( - response_source and response_source != request_source) or ( - response_dest and response_dest != request_dest) or ( - response_ports and response_ports != request_ports): - changed = True - return changed - - def _get_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id): - """ - Get back details for a particular firewall policy - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: id of the firewall policy to get - :return: response - The response from CLC API call - """ - response = None - try: - response = self.clc.v2.API.Call( - 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, location, firewall_policy_id)) - except APIFailedResponse as e: - if e.response_status_code != 404: - self.module.fail_json( - msg="Unable to fetch the firewall policy with id : {0}. {1}".format( - firewall_policy_id, str(e.response_text))) - return response - - def _wait_for_requests_to_complete( - self, - source_account_alias, - location, - firewall_policy_id, - wait_limit=50): - """ - Waits until the CLC requests are complete if the wait argument is True - :param source_account_alias: The source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: The firewall policy id - :param wait_limit: The number of times to check the status for completion - :return: the firewall_policy object - """ - wait = self.module.params.get('wait') - count = 0 - firewall_policy = None - while wait: - count += 1 - firewall_policy = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - status = firewall_policy.get('status') - if status == 'active' or count > wait_limit: - wait = False - else: - # wait for 2 seconds - sleep(2) - return firewall_policy - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcFirewallPolicy._define_module_argument_spec(), - supports_check_mode=True) - - clc_firewall = ClcFirewallPolicy(module) - clc_firewall.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_group.py b/plugins/modules/cloud/centurylink/clc_group.py deleted file mode 100644 index 5e131719f5..0000000000 --- a/plugins/modules/cloud/centurylink/clc_group.py +++ /dev/null @@ -1,514 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_group -short_description: Create/delete Server Groups at Centurylink Cloud -description: - - Create or delete Server Groups at Centurylink Centurylink Cloud -options: - name: - description: - - The name of the Server Group - type: str - required: True - description: - description: - - A description of the Server Group - type: str - required: False - parent: - description: - - The parent group of the server group. If parent is not provided, it creates the group at top level. - type: str - required: False - location: - description: - - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter - associated with the account - type: str - required: False - state: - description: - - Whether to create or delete the group - type: str - default: present - choices: ['present', 'absent'] - wait: - description: - - Whether to wait for the tasks to finish before returning. - type: bool - default: True - required: False -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' - -# Create a Server Group - ---- -- name: Create Server Group - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create / Verify a Server Group at CenturyLink Cloud - community.general.clc_group: - name: My Cool Server Group - parent: Default Group - state: present - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc - -# Delete a Server Group -- name: Delete Server Group - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Delete / Verify Absent a Server Group at CenturyLink Cloud - community.general.clc_group: - name: My Cool Server Group - parent: Default Group - state: absent - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc -''' - -RETURN = ''' -group: - description: The group information - returned: success - type: dict - sample: - { - "changeInfo":{ - "createdBy":"service.wfad", - "createdDate":"2015-07-29T18:52:47Z", - "modifiedBy":"service.wfad", - "modifiedDate":"2015-07-29T18:52:47Z" - }, - "customFields":[ - - ], - "description":"test group", - "groups":[ - - ], - "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1", - "links":[ - { - "href":"/v2/groups/wfad", - "rel":"createGroup", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad", - "rel":"createServer", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1", - "rel":"self", - "verbs":[ - "GET", - "PATCH", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", - "id":"086ac1dfe0b6411989e8d1b77c4065f0", - "rel":"parentGroup" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults", - "rel":"defaults", - "verbs":[ - "GET", - "POST" - ] - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing", - "rel":"billing" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive", - "rel":"archiveGroupAction" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics", - "rel":"statistics" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities", - "rel":"upcomingScheduledActivities" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy", - "rel":"horizontalAutoscalePolicyMapping", - "verbs":[ - "GET", - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities", - "rel":"scheduledActivities", - "verbs":[ - "GET", - "POST" - ] - } - ], - "locationId":"UC1", - "name":"test group", - "status":"active", - "type":"default" - } -''' - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcGroup(object): - - clc = None - root_group = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.group_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - location = self.module.params.get('location') - group_name = self.module.params.get('name') - parent_name = self.module.params.get('parent') - group_description = self.module.params.get('description') - state = self.module.params.get('state') - - self._set_clc_credentials_from_env() - self.group_dict = self._get_group_tree_for_datacenter( - datacenter=location) - - if state == "absent": - changed, group, requests = self._ensure_group_is_absent( - group_name=group_name, parent_name=parent_name) - if requests: - self._wait_for_requests_to_complete(requests) - else: - changed, group = self._ensure_group_is_present( - group_name=group_name, parent_name=parent_name, group_description=group_description) - try: - group = group.data - except AttributeError: - group = group_name - self.module.exit_json(changed=changed, group=group) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - description=dict(), - parent=dict(), - location=dict(), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=True)) - - return argument_spec - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_group_is_absent(self, group_name, parent_name): - """ - Ensure that group_name is absent by deleting it if necessary - :param group_name: string - the name of the clc server group to delete - :param parent_name: string - the name of the parent group for group_name - :return: changed, group - """ - changed = False - group = [] - results = [] - - if self._group_exists(group_name=group_name, parent_name=parent_name): - if not self.module.check_mode: - group.append(group_name) - result = self._delete_group(group_name) - results.append(result) - changed = True - return changed, group, results - - def _delete_group(self, group_name): - """ - Delete the provided server group - :param group_name: string - the server group to delete - :return: none - """ - response = None - group, parent = self.group_dict.get(group_name) - try: - response = group.Delete() - except CLCException as ex: - self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format( - group_name, ex.response_text - )) - return response - - def _ensure_group_is_present( - self, - group_name, - parent_name, - group_description): - """ - Checks to see if a server group exists, creates it if it doesn't. - :param group_name: the name of the group to validate/create - :param parent_name: the name of the parent group for group_name - :param group_description: a short description of the server group (used when creating) - :return: (changed, group) - - changed: Boolean- whether a change was made, - group: A clc group object for the group - """ - if not self.root_group: - raise AssertionError("Implementation Error: Root Group not set") - parent = parent_name if parent_name is not None else self.root_group.name - description = group_description - changed = False - group = group_name - - parent_exists = self._group_exists(group_name=parent, parent_name=None) - child_exists = self._group_exists( - group_name=group_name, - parent_name=parent) - - if parent_exists and child_exists: - group, parent = self.group_dict[group_name] - changed = False - elif parent_exists and not child_exists: - if not self.module.check_mode: - group = self._create_group( - group=group, - parent=parent, - description=description) - changed = True - else: - self.module.fail_json( - msg="parent group: " + - parent + - " does not exist") - - return changed, group - - def _create_group(self, group, parent, description): - """ - Create the provided server group - :param group: clc_sdk.Group - the group to create - :param parent: clc_sdk.Parent - the parent group for {group} - :param description: string - a text description of the group - :return: clc_sdk.Group - the created group - """ - response = None - (parent, grandparent) = self.group_dict[parent] - try: - response = parent.Create(name=group, description=description) - except CLCException as ex: - self.module.fail_json(msg='Failed to create group :{0}. {1}'.format( - group, ex.response_text)) - return response - - def _group_exists(self, group_name, parent_name): - """ - Check to see if a group exists - :param group_name: string - the group to check - :param parent_name: string - the parent of group_name - :return: boolean - whether the group exists - """ - result = False - if group_name in self.group_dict: - (group, parent) = self.group_dict[group_name] - if parent_name is None or parent_name == parent.name: - result = True - return result - - def _get_group_tree_for_datacenter(self, datacenter=None): - """ - Walk the tree of groups for a datacenter - :param datacenter: string - the datacenter to walk (ex: 'UC1') - :return: a dictionary of groups and parents - """ - self.root_group = self.clc.v2.Datacenter( - location=datacenter).RootGroup() - return self._walk_groups_recursive( - parent_group=None, - child_group=self.root_group) - - def _walk_groups_recursive(self, parent_group, child_group): - """ - Walk a parent-child tree of groups, starting with the provided child group - :param parent_group: clc_sdk.Group - the parent group to start the walk - :param child_group: clc_sdk.Group - the child group to start the walk - :return: a dictionary of groups and parents - """ - result = {str(child_group): (child_group, parent_group)} - groups = child_group.Subgroups().groups - if len(groups) > 0: - for group in groups: - if group.type != 'default': - continue - - result.update(self._walk_groups_recursive(child_group, group)) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process group request') - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcGroup._define_module_argument_spec(), - supports_check_mode=True) - - clc_group = ClcGroup(module) - clc_group.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_loadbalancer.py b/plugins/modules/cloud/centurylink/clc_loadbalancer.py deleted file mode 100644 index 94a815e6ef..0000000000 --- a/plugins/modules/cloud/centurylink/clc_loadbalancer.py +++ /dev/null @@ -1,937 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_loadbalancer -short_description: Create, Delete shared loadbalancers in CenturyLink Cloud. -description: - - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud. -options: - name: - description: - - The name of the loadbalancer - type: str - required: True - description: - description: - - A description for the loadbalancer - type: str - alias: - description: - - The alias of your CLC Account - type: str - required: True - location: - description: - - The location of the datacenter where the load balancer resides in - type: str - required: True - method: - description: - -The balancing method for the load balancer pool - type: str - choices: ['leastConnection', 'roundRobin'] - persistence: - description: - - The persistence method for the load balancer - type: str - choices: ['standard', 'sticky'] - port: - description: - - Port to configure on the public-facing side of the load balancer pool - type: str - choices: ['80', '443'] - nodes: - description: - - A list of nodes that needs to be added to the load balancer pool - type: list - default: [] - elements: dict - status: - description: - - The status of the loadbalancer - type: str - default: enabled - choices: ['enabled', 'disabled'] - state: - description: - - Whether to create or delete the load balancer pool - type: str - default: present - choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent'] -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples -- name: Create Loadbalancer - hosts: localhost - connection: local - tasks: - - name: Actually Create things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.123 - privatePort: 80 - state: present - -- name: Add node to an existing loadbalancer pool - hosts: localhost - connection: local - tasks: - - name: Actually Create things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.234 - privatePort: 80 - state: nodes_present - -- name: Remove node from an existing loadbalancer pool - hosts: localhost - connection: local - tasks: - - name: Actually Create things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.234 - privatePort: 80 - state: nodes_absent - -- name: Delete LoadbalancerPool - hosts: localhost - connection: local - tasks: - - name: Actually Delete things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.123 - privatePort: 80 - state: port_absent - -- name: Delete Loadbalancer - hosts: localhost - connection: local - tasks: - - name: Actually Delete things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.123 - privatePort: 80 - state: absent -''' - -RETURN = ''' -loadbalancer: - description: The load balancer result object from CLC - returned: success - type: dict - sample: - { - "description":"test-lb", - "id":"ab5b18cb81e94ab9925b61d1ca043fb5", - "ipAddress":"66.150.174.197", - "links":[ - { - "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5", - "rel":"self", - "verbs":[ - "GET", - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools", - "rel":"pools", - "verbs":[ - "GET", - "POST" - ] - } - ], - "name":"test-lb", - "pools":[ - - ], - "status":"enabled" - } -''' - -__version__ = '${version}' - -import json -import os -import traceback -from time import sleep - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcLoadBalancer: - - clc = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.lb_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - changed = False - result_lb = None - loadbalancer_name = self.module.params.get('name') - loadbalancer_alias = self.module.params.get('alias') - loadbalancer_location = self.module.params.get('location') - loadbalancer_description = self.module.params.get('description') - loadbalancer_port = self.module.params.get('port') - loadbalancer_method = self.module.params.get('method') - loadbalancer_persistence = self.module.params.get('persistence') - loadbalancer_nodes = self.module.params.get('nodes') - loadbalancer_status = self.module.params.get('status') - state = self.module.params.get('state') - - if loadbalancer_description is None: - loadbalancer_description = loadbalancer_name - - self._set_clc_credentials_from_env() - - self.lb_dict = self._get_loadbalancer_list( - alias=loadbalancer_alias, - location=loadbalancer_location) - - if state == 'present': - changed, result_lb, lb_id = self.ensure_loadbalancer_present( - name=loadbalancer_name, - alias=loadbalancer_alias, - location=loadbalancer_location, - description=loadbalancer_description, - status=loadbalancer_status) - if loadbalancer_port: - changed, result_pool, pool_id = self.ensure_loadbalancerpool_present( - lb_id=lb_id, - alias=loadbalancer_alias, - location=loadbalancer_location, - method=loadbalancer_method, - persistence=loadbalancer_persistence, - port=loadbalancer_port) - - if loadbalancer_nodes: - changed, result_nodes = self.ensure_lbpool_nodes_set( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - elif state == 'absent': - changed, result_lb = self.ensure_loadbalancer_absent( - name=loadbalancer_name, - alias=loadbalancer_alias, - location=loadbalancer_location) - - elif state == 'port_absent': - changed, result_lb = self.ensure_loadbalancerpool_absent( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port) - - elif state == 'nodes_present': - changed, result_lb = self.ensure_lbpool_nodes_present( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - - elif state == 'nodes_absent': - changed, result_lb = self.ensure_lbpool_nodes_absent( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - - self.module.exit_json(changed=changed, loadbalancer=result_lb) - - def ensure_loadbalancer_present( - self, name, alias, location, description, status): - """ - Checks to see if a load balancer exists and creates one if it does not. - :param name: Name of loadbalancer - :param alias: Alias of account - :param location: Datacenter - :param description: Description of loadbalancer - :param status: Enabled / Disabled - :return: (changed, result, lb_id) - changed: Boolean whether a change was made - result: The result object from the CLC load balancer request - lb_id: The load balancer id - """ - changed = False - result = name - lb_id = self._loadbalancer_exists(name=name) - if not lb_id: - if not self.module.check_mode: - result = self.create_loadbalancer(name=name, - alias=alias, - location=location, - description=description, - status=status) - lb_id = result.get('id') - changed = True - - return changed, result, lb_id - - def ensure_loadbalancerpool_present( - self, lb_id, alias, location, method, persistence, port): - """ - Checks to see if a load balancer pool exists and creates one if it does not. - :param lb_id: The loadbalancer id - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param method: the load balancing method - :param persistence: the load balancing persistence type - :param port: the port that the load balancer will listen on - :return: (changed, group, pool_id) - - changed: Boolean whether a change was made - result: The result from the CLC API call - pool_id: The string id of the load balancer pool - """ - changed = False - result = port - if not lb_id: - return changed, None, None - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if not pool_id: - if not self.module.check_mode: - result = self.create_loadbalancerpool( - alias=alias, - location=location, - lb_id=lb_id, - method=method, - persistence=persistence, - port=port) - pool_id = result.get('id') - changed = True - - return changed, result, pool_id - - def ensure_loadbalancer_absent(self, name, alias, location): - """ - Checks to see if a load balancer exists and deletes it if it does - :param name: Name of the load balancer - :param alias: Alias of account - :param location: Datacenter - :return: (changed, result) - changed: Boolean whether a change was made - result: The result from the CLC API Call - """ - changed = False - result = name - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - if not self.module.check_mode: - result = self.delete_loadbalancer(alias=alias, - location=location, - name=name) - changed = True - return changed, result - - def ensure_loadbalancerpool_absent(self, alias, location, name, port): - """ - Checks to see if a load balancer pool exists and deletes it if it does - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer listens on - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = None - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - changed = True - if not self.module.check_mode: - result = self.delete_loadbalancerpool( - alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id) - else: - result = "Pool doesn't exist" - else: - result = "LB Doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool - and set the nodes if any in the list those doesn't exist - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: The list of nodes to be updated to the pool - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - result = {} - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_check=nodes) - if not nodes_exist: - changed = True - result = self.set_loadbalancernodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: the list of nodes to be added - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - changed, result = self.add_lbpool_nodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_add=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool and removes them if found any - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: the list of nodes to be removed - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - changed, result = self.remove_lbpool_nodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_remove=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def create_loadbalancer(self, name, alias, location, description, status): - """ - Create a loadbalancer w/ params - :param name: Name of loadbalancer - :param alias: Alias of account - :param location: Datacenter - :param description: Description for loadbalancer to be created - :param status: Enabled / Disabled - :return: result: The result from the CLC API call - """ - result = None - try: - result = self.clc.v2.API.Call('POST', - '/v2/sharedLoadBalancers/%s/%s' % (alias, - location), - json.dumps({"name": name, - "description": description, - "status": status})) - sleep(1) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to create load balancer "{0}". {1}'.format( - name, str(e.response_text))) - return result - - def create_loadbalancerpool( - self, alias, location, lb_id, method, persistence, port): - """ - Creates a pool on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param method: the load balancing method - :param persistence: the load balancing persistence type - :param port: the port that the load balancer will listen on - :return: result: The result from the create API call - """ - result = None - try: - result = self.clc.v2.API.Call( - 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % - (alias, location, lb_id), json.dumps( - { - "port": port, "method": method, "persistence": persistence - })) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to create pool for load balancer id "{0}". {1}'.format( - lb_id, str(e.response_text))) - return result - - def delete_loadbalancer(self, alias, location, name): - """ - Delete CLC loadbalancer - :param alias: Alias for account - :param location: Datacenter - :param name: Name of the loadbalancer to delete - :return: result: The result from the CLC API call - """ - result = None - lb_id = self._get_loadbalancer_id(name=name) - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' % - (alias, location, lb_id)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to delete load balancer "{0}". {1}'.format( - name, str(e.response_text))) - return result - - def delete_loadbalancerpool(self, alias, location, lb_id, pool_id): - """ - Delete the pool on the provided load balancer - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the load balancer pool - :return: result: The result from the delete API call - """ - result = None - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' % - (alias, location, lb_id, pool_id)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to delete pool for load balancer id "{0}". {1}'.format( - lb_id, str(e.response_text))) - return result - - def _get_loadbalancer_id(self, name): - """ - Retrieves unique ID of loadbalancer - :param name: Name of loadbalancer - :return: Unique ID of the loadbalancer - """ - id = None - for lb in self.lb_dict: - if lb.get('name') == name: - id = lb.get('id') - return id - - def _get_loadbalancer_list(self, alias, location): - """ - Retrieve a list of loadbalancers - :param alias: Alias for account - :param location: Datacenter - :return: JSON data for all loadbalancers at datacenter - """ - result = None - try: - result = self.clc.v2.API.Call( - 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to fetch load balancers for account: {0}. {1}'.format( - alias, str(e.response_text))) - return result - - def _loadbalancer_exists(self, name): - """ - Verify a loadbalancer exists - :param name: Name of loadbalancer - :return: False or the ID of the existing loadbalancer - """ - result = False - - for lb in self.lb_dict: - if lb.get('name') == name: - result = lb.get('id') - return result - - def _loadbalancerpool_exists(self, alias, location, port, lb_id): - """ - Checks to see if a pool exists on the specified port on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param port: the port to check and see if it exists - :param lb_id: the id string of the provided load balancer - :return: result: The id string of the pool or False - """ - result = False - try: - pool_list = self.clc.v2.API.Call( - 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % - (alias, location, lb_id)) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format( - lb_id, str(e.response_text))) - for pool in pool_list: - if int(pool.get('port')) == int(port): - result = pool.get('id') - return result - - def _loadbalancerpool_nodes_exists( - self, alias, location, lb_id, pool_id, nodes_to_check): - """ - Checks to see if a set of nodes exists on the specified port on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the provided load balancer - :param pool_id: the id string of the load balancer pool - :param nodes_to_check: the list of nodes to check for - :return: result: True / False indicating if the given nodes exist - """ - result = False - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_check: - if not node.get('status'): - node['status'] = 'enabled' - if node in nodes: - result = True - else: - result = False - return result - - def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes): - """ - Updates nodes to the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes: a list of dictionaries containing the nodes to set - :return: result: The result from the CLC API call - """ - result = None - if not lb_id: - return result - if not self.module.check_mode: - try: - result = self.clc.v2.API.Call('PUT', - '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' - % (alias, location, lb_id, pool_id), json.dumps(nodes)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format( - pool_id, str(e.response_text))) - return result - - def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add): - """ - Add nodes to the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes_to_add: a list of dictionaries containing the nodes to add - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = {} - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_add: - if not node.get('status'): - node['status'] = 'enabled' - if node not in nodes: - changed = True - nodes.append(node) - if changed is True and not self.module.check_mode: - result = self.set_loadbalancernodes( - alias, - location, - lb_id, - pool_id, - nodes) - return changed, result - - def remove_lbpool_nodes( - self, alias, location, lb_id, pool_id, nodes_to_remove): - """ - Removes nodes from the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes_to_remove: a list of dictionaries containing the nodes to remove - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = {} - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_remove: - if not node.get('status'): - node['status'] = 'enabled' - if node in nodes: - changed = True - nodes.remove(node) - if changed is True and not self.module.check_mode: - result = self.set_loadbalancernodes( - alias, - location, - lb_id, - pool_id, - nodes) - return changed, result - - def _get_lbpool_nodes(self, alias, location, lb_id, pool_id): - """ - Return the list of nodes available to the provided load balancer pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :return: result: The list of nodes - """ - result = None - try: - result = self.clc.v2.API.Call('GET', - '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' - % (alias, location, lb_id, pool_id)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format( - pool_id, str(e.response_text))) - return result - - @staticmethod - def define_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - description=dict(), - location=dict(required=True), - alias=dict(required=True), - port=dict(choices=[80, 443]), - method=dict(choices=['leastConnection', 'roundRobin']), - persistence=dict(choices=['standard', 'sticky']), - nodes=dict(type='list', default=[], elements='dict'), - status=dict(default='enabled', choices=['enabled', 'disabled']), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'port_absent', - 'nodes_present', - 'nodes_absent']) - ) - return argument_spec - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(), - supports_check_mode=True) - clc_loadbalancer = ClcLoadBalancer(module) - clc_loadbalancer.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_modify_server.py b/plugins/modules/cloud/centurylink/clc_modify_server.py deleted file mode 100644 index 27cdf614ec..0000000000 --- a/plugins/modules/cloud/centurylink/clc_modify_server.py +++ /dev/null @@ -1,967 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_modify_server -short_description: modify servers in CenturyLink Cloud. -description: - - An Ansible module to modify servers in CenturyLink Cloud. -options: - server_ids: - description: - - A list of server Ids to modify. - type: list - required: True - elements: str - cpu: - description: - - How many CPUs to update on the server - type: str - memory: - description: - - Memory (in GB) to set to the server. - type: str - anti_affinity_policy_id: - description: - - The anti affinity policy id to be set for a hyper scale server. - This is mutually exclusive with 'anti_affinity_policy_name' - type: str - anti_affinity_policy_name: - description: - - The anti affinity policy name to be set for a hyper scale server. - This is mutually exclusive with 'anti_affinity_policy_id' - type: str - alert_policy_id: - description: - - The alert policy id to be associated to the server. - This is mutually exclusive with 'alert_policy_name' - type: str - alert_policy_name: - description: - - The alert policy name to be associated to the server. - This is mutually exclusive with 'alert_policy_id' - type: str - state: - description: - - The state to insure that the provided resources are in. - type: str - default: 'present' - choices: ['present', 'absent'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - type: bool - default: 'yes' -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Set the cpu count to 4 on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - cpu: 4 - state: present - -- name: Set the memory to 8GB on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - memory: 8 - state: present - -- name: Set the anti affinity policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - anti_affinity_policy_name: 'aa_policy' - state: present - -- name: Remove the anti affinity policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - anti_affinity_policy_name: 'aa_policy' - state: absent - -- name: Add the alert policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - alert_policy_name: 'alert_policy' - state: present - -- name: Remove the alert policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - alert_policy_name: 'alert_policy' - state: absent - -- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - cpu: 8 - memory: 16 - state: present -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -servers: - description: The list of server objects that are changed - returned: success - type: list - sample: - [ - { - "changeInfo":{ - "createdBy":"service.wfad", - "createdDate":1438196820, - "modifiedBy":"service.wfad", - "modifiedDate":1438196820 - }, - "description":"test-server", - "details":{ - "alertPolicies":[ - - ], - "cpu":1, - "customFields":[ - - ], - "diskCount":3, - "disks":[ - { - "id":"0:0", - "partitionPaths":[ - - ], - "sizeGB":1 - }, - { - "id":"0:1", - "partitionPaths":[ - - ], - "sizeGB":2 - }, - { - "id":"0:2", - "partitionPaths":[ - - ], - "sizeGB":14 - } - ], - "hostName":"", - "inMaintenanceMode":false, - "ipAddresses":[ - { - "internal":"10.1.1.1" - } - ], - "memoryGB":1, - "memoryMB":1024, - "partitions":[ - - ], - "powerState":"started", - "snapshots":[ - - ], - "storageGB":17 - }, - "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", - "id":"test-server", - "ipaddress":"10.120.45.23", - "isTemplate":false, - "links":[ - { - "href":"/v2/servers/wfad/test-server", - "id":"test-server", - "rel":"self", - "verbs":[ - "GET", - "PATCH", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", - "id":"086ac1dfe0b6411989e8d1b77c4065f0", - "rel":"group" - }, - { - "href":"/v2/accounts/wfad", - "id":"wfad", - "rel":"account" - }, - { - "href":"/v2/billing/wfad/serverPricing/test-server", - "rel":"billing" - }, - { - "href":"/v2/servers/wfad/test-server/publicIPAddresses", - "rel":"publicIPAddresses", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/credentials", - "rel":"credentials" - }, - { - "href":"/v2/servers/wfad/test-server/statistics", - "rel":"statistics" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", - "rel":"upcomingScheduledActivities" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", - "rel":"scheduledActivities", - "verbs":[ - "GET", - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/capabilities", - "rel":"capabilities" - }, - { - "href":"/v2/servers/wfad/test-server/alertPolicies", - "rel":"alertPolicyMappings", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", - "rel":"antiAffinityPolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", - "rel":"cpuAutoscalePolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - } - ], - "locationId":"UC1", - "name":"test-server", - "os":"ubuntu14_64Bit", - "osType":"Ubuntu 14 64-bit", - "status":"active", - "storageType":"standard", - "type":"standard" - } - ] -''' - -__version__ = '${version}' - -import json -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcModifyServer: - clc = clc_sdk - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - self._set_clc_credentials_from_env() - - p = self.module.params - cpu = p.get('cpu') - memory = p.get('memory') - state = p.get('state') - if state == 'absent' and (cpu or memory): - return self.module.fail_json( - msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments') - - server_ids = p['server_ids'] - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of instances to modify: %s' % - server_ids) - - (changed, server_dict_array, changed_server_ids) = self._modify_servers( - server_ids=server_ids) - - self.module.exit_json( - changed=changed, - server_ids=changed_server_ids, - servers=server_dict_array) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - server_ids=dict(type='list', required=True, elements='str'), - state=dict(default='present', choices=['present', 'absent']), - cpu=dict(), - memory=dict(), - anti_affinity_policy_id=dict(), - anti_affinity_policy_name=dict(), - alert_policy_id=dict(), - alert_policy_name=dict(), - wait=dict(type='bool', default=True) - ) - mutually_exclusive = [ - ['anti_affinity_policy_id', 'anti_affinity_policy_name'], - ['alert_policy_id', 'alert_policy_name'] - ] - return {"argument_spec": argument_spec, - "mutually_exclusive": mutually_exclusive} - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param server_list: The list of server ids - :param message: the error message to throw in case of any error - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - return self.module.fail_json(msg=message + ': %s' % ex.message) - - def _modify_servers(self, server_ids): - """ - modify the servers configuration on the provided list - :param server_ids: list of servers to modify - :return: a list of dictionaries with server information about the servers that were modified - """ - p = self.module.params - state = p.get('state') - server_params = { - 'cpu': p.get('cpu'), - 'memory': p.get('memory'), - 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), - 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'), - 'alert_policy_id': p.get('alert_policy_id'), - 'alert_policy_name': p.get('alert_policy_name'), - } - changed = False - server_changed = False - aa_changed = False - ap_changed = False - server_dict_array = [] - result_server_ids = [] - request_list = [] - changed_servers = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return self.module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - for server in servers: - if state == 'present': - server_changed, server_result = self._ensure_server_config( - server, server_params) - if server_result: - request_list.append(server_result) - aa_changed = self._ensure_aa_policy_present( - server, - server_params) - ap_changed = self._ensure_alert_policy_present( - server, - server_params) - elif state == 'absent': - aa_changed = self._ensure_aa_policy_absent( - server, - server_params) - ap_changed = self._ensure_alert_policy_absent( - server, - server_params) - if server_changed or aa_changed or ap_changed: - changed_servers.append(server) - changed = True - - self._wait_for_requests(self.module, request_list) - self._refresh_servers(self.module, changed_servers) - - for server in changed_servers: - server_dict_array.append(server.data) - result_server_ids.append(server.id) - - return changed, server_dict_array, result_server_ids - - def _ensure_server_config( - self, server, server_params): - """ - ensures the server is updated with the provided cpu and memory - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - cpu = server_params.get('cpu') - memory = server_params.get('memory') - changed = False - result = None - - if not cpu: - cpu = server.cpu - if not memory: - memory = server.memory - if memory != server.memory or cpu != server.cpu: - if not self.module.check_mode: - result = self._modify_clc_server( - self.clc, - self.module, - server.id, - cpu, - memory) - changed = True - return changed, result - - @staticmethod - def _modify_clc_server(clc, module, server_id, cpu, memory): - """ - Modify the memory or CPU of a clc server. - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param server_id: id of the server to modify - :param cpu: the new cpu value - :param memory: the new memory value - :return: the result of CLC API call - """ - result = None - acct_alias = clc.v2.Account.GetAlias() - try: - # Update the server configuration - job_obj = clc.v2.API.Call('PATCH', - 'servers/%s/%s' % (acct_alias, - server_id), - json.dumps([{"op": "set", - "member": "memory", - "value": memory}, - {"op": "set", - "member": "cpu", - "value": cpu}])) - result = clc.v2.Requests(job_obj) - except APIFailedResponse as ex: - module.fail_json( - msg='Unable to update the server configuration for server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _wait_for_requests(module, request_list): - """ - Block until server provisioning requests are completed. - :param module: the AnsibleModule object - :param request_list: a list of clc-sdk.Request instances - :return: none - """ - wait = module.params.get('wait') - if wait: - # Requests.WaitUntilComplete() returns the count of failed requests - failed_requests_count = sum( - [request.WaitUntilComplete() for request in request_list]) - - if failed_requests_count > 0: - module.fail_json( - msg='Unable to process modify server request') - - @staticmethod - def _refresh_servers(module, servers): - """ - Loop through a list of servers and refresh them. - :param module: the AnsibleModule object - :param servers: list of clc-sdk.Server instances to refresh - :return: none - """ - for server in servers: - try: - server.Refresh() - except CLCException as ex: - module.fail_json(msg='Unable to refresh the server {0}. {1}'.format( - server.id, ex.message - )) - - def _ensure_aa_policy_present( - self, server, server_params): - """ - ensures the server is updated with the provided anti affinity policy - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - acct_alias = self.clc.v2.Account.GetAlias() - - aa_policy_id = server_params.get('anti_affinity_policy_id') - aa_policy_name = server_params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - aa_policy_id = self._get_aa_policy_id_by_name( - self.clc, - self.module, - acct_alias, - aa_policy_name) - current_aa_policy_id = self._get_aa_policy_id_of_server( - self.clc, - self.module, - acct_alias, - server.id) - - if aa_policy_id and aa_policy_id != current_aa_policy_id: - self._modify_aa_policy( - self.clc, - self.module, - acct_alias, - server.id, - aa_policy_id) - changed = True - return changed - - def _ensure_aa_policy_absent( - self, server, server_params): - """ - ensures the provided anti affinity policy is removed from the server - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - acct_alias = self.clc.v2.Account.GetAlias() - aa_policy_id = server_params.get('anti_affinity_policy_id') - aa_policy_name = server_params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - aa_policy_id = self._get_aa_policy_id_by_name( - self.clc, - self.module, - acct_alias, - aa_policy_name) - current_aa_policy_id = self._get_aa_policy_id_of_server( - self.clc, - self.module, - acct_alias, - server.id) - - if aa_policy_id and aa_policy_id == current_aa_policy_id: - self._delete_aa_policy( - self.clc, - self.module, - acct_alias, - server.id) - changed = True - return changed - - @staticmethod - def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id): - """ - modifies the anti affinity policy of the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param aa_policy_id: the anti affinity policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('PUT', - 'servers/%s/%s/antiAffinityPolicy' % ( - acct_alias, - server_id), - json.dumps({"id": aa_policy_id})) - except APIFailedResponse as ex: - module.fail_json( - msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _delete_aa_policy(clc, module, acct_alias, server_id): - """ - Delete the anti affinity policy of the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('DELETE', - 'servers/%s/%s/antiAffinityPolicy' % ( - acct_alias, - server_id), - json.dumps({})) - except APIFailedResponse as ex: - module.fail_json( - msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name): - """ - retrieves the anti affinity policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param aa_policy_name: the anti affinity policy name - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - aa_policies = clc.v2.API.Call(method='GET', - url='antiAffinityPolicies/%s' % alias) - except APIFailedResponse as ex: - return module.fail_json( - msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format( - alias, str(ex.response_text))) - for aa_policy in aa_policies.get('items'): - if aa_policy.get('name') == aa_policy_name: - if not aa_policy_id: - aa_policy_id = aa_policy.get('id') - else: - return module.fail_json( - msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name) - if not aa_policy_id: - module.fail_json( - msg='No anti affinity policy was found with policy name : %s' % aa_policy_name) - return aa_policy_id - - @staticmethod - def _get_aa_policy_id_of_server(clc, module, alias, server_id): - """ - retrieves the anti affinity policy id of the server based on the CLC server id - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param server_id: the CLC server id - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - result = clc.v2.API.Call( - method='GET', url='servers/%s/%s/antiAffinityPolicy' % - (alias, server_id)) - aa_policy_id = result.get('id') - except APIFailedResponse as ex: - if ex.response_status_code != 404: - module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format( - server_id, str(ex.response_text))) - return aa_policy_id - - def _ensure_alert_policy_present( - self, server, server_params): - """ - ensures the server is updated with the provided alert policy - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - acct_alias = self.clc.v2.Account.GetAlias() - alert_policy_id = server_params.get('alert_policy_id') - alert_policy_name = server_params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id_by_name( - self.clc, - self.module, - acct_alias, - alert_policy_name) - if alert_policy_id and not self._alert_policy_exists( - server, alert_policy_id): - self._add_alert_policy_to_server( - self.clc, - self.module, - acct_alias, - server.id, - alert_policy_id) - changed = True - return changed - - def _ensure_alert_policy_absent( - self, server, server_params): - """ - ensures the alert policy is removed from the server - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - - acct_alias = self.clc.v2.Account.GetAlias() - alert_policy_id = server_params.get('alert_policy_id') - alert_policy_name = server_params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id_by_name( - self.clc, - self.module, - acct_alias, - alert_policy_name) - - if alert_policy_id and self._alert_policy_exists( - server, alert_policy_id): - self._remove_alert_policy_to_server( - self.clc, - self.module, - acct_alias, - server.id, - alert_policy_id) - changed = True - return changed - - @staticmethod - def _add_alert_policy_to_server( - clc, module, acct_alias, server_id, alert_policy_id): - """ - add the alert policy to CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param alert_policy_id: the alert policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('POST', - 'servers/%s/%s/alertPolicies' % ( - acct_alias, - server_id), - json.dumps({"id": alert_policy_id})) - except APIFailedResponse as ex: - module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _remove_alert_policy_to_server( - clc, module, acct_alias, server_id, alert_policy_id): - """ - remove the alert policy to the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param alert_policy_id: the alert policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('DELETE', - 'servers/%s/%s/alertPolicies/%s' - % (acct_alias, server_id, alert_policy_id)) - except APIFailedResponse as ex: - module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): - """ - retrieves the alert policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param alert_policy_name: the alert policy name - :return: alert_policy_id: The alert policy id - """ - alert_policy_id = None - try: - alert_policies = clc.v2.API.Call(method='GET', - url='alertPolicies/%s' % alias) - except APIFailedResponse as ex: - return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format( - alias, str(ex.response_text))) - for alert_policy in alert_policies.get('items'): - if alert_policy.get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = alert_policy.get('id') - else: - return module.fail_json( - msg='multiple alert policies were found with policy name : %s' % alert_policy_name) - return alert_policy_id - - @staticmethod - def _alert_policy_exists(server, alert_policy_id): - """ - Checks if the alert policy exists for the server - :param server: the clc server object - :param alert_policy_id: the alert policy - :return: True: if the given alert policy id associated to the server, False otherwise - """ - result = False - alert_policies = server.alertPolicies - if alert_policies: - for alert_policy in alert_policies: - if alert_policy.get('id') == alert_policy_id: - result = True - return result - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - - argument_dict = ClcModifyServer._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_modify_server = ClcModifyServer(module) - clc_modify_server.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_publicip.py b/plugins/modules/cloud/centurylink/clc_publicip.py deleted file mode 100644 index 3b4fcc4eed..0000000000 --- a/plugins/modules/cloud/centurylink/clc_publicip.py +++ /dev/null @@ -1,361 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_publicip -short_description: Add and Delete public ips on servers in CenturyLink Cloud. -description: - - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud. -options: - protocol: - description: - - The protocol that the public IP will listen for. - type: str - default: TCP - choices: ['TCP', 'UDP', 'ICMP'] - ports: - description: - - A list of ports to expose. This is required when state is 'present' - type: list - elements: int - server_ids: - description: - - A list of servers to create public ips on. - type: list - required: True - elements: str - state: - description: - - Determine whether to create or delete public IPs. If present module will not create a second public ip if one - already exists. - type: str - default: present - choices: ['present', 'absent'] - wait: - description: - - Whether to wait for the tasks to finish before returning. - type: bool - default: 'yes' -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Add Public IP to Server - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create Public IP For Servers - community.general.clc_publicip: - protocol: TCP - ports: - - 80 - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - state: present - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc - -- name: Delete Public IP from Server - hosts: localhost - gather_facts: False - connection: local - tasks: - - name: Create Public IP For Servers - community.general.clc_publicip: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - state: absent - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -''' - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcPublicIp(object): - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - self._set_clc_credentials_from_env() - params = self.module.params - server_ids = params['server_ids'] - ports = params['ports'] - protocol = params['protocol'] - state = params['state'] - - if state == 'present': - changed, changed_server_ids, requests = self.ensure_public_ip_present( - server_ids=server_ids, protocol=protocol, ports=ports) - elif state == 'absent': - changed, changed_server_ids, requests = self.ensure_public_ip_absent( - server_ids=server_ids) - else: - return self.module.fail_json(msg="Unknown State: " + state) - self._wait_for_requests_to_complete(requests) - return self.module.exit_json(changed=changed, - server_ids=changed_server_ids) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - server_ids=dict(type='list', required=True, elements='str'), - protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']), - ports=dict(type='list', elements='int'), - wait=dict(type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), - ) - return argument_spec - - def ensure_public_ip_present(self, server_ids, protocol, ports): - """ - Ensures the given server ids having the public ip available - :param server_ids: the list of server ids - :param protocol: the ip protocol - :param ports: the list of ports to expose - :return: (changed, changed_server_ids, results) - changed: A flag indicating if there is any change - changed_server_ids : the list of server ids that are changed - results: The result list from clc public ip call - """ - changed = False - results = [] - changed_server_ids = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.PublicIPs().public_ips) == 0] - ports_to_expose = [{'protocol': protocol, 'port': port} - for port in ports] - for server in servers_to_change: - if not self.module.check_mode: - result = self._add_publicip_to_server(server, ports_to_expose) - results.append(result) - changed_server_ids.append(server.id) - changed = True - return changed, changed_server_ids, results - - def _add_publicip_to_server(self, server, ports_to_expose): - result = None - try: - result = server.PublicIPs().Add(ports_to_expose) - except CLCException as ex: - self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def ensure_public_ip_absent(self, server_ids): - """ - Ensures the given server ids having the public ip removed if there is any - :param server_ids: the list of server ids - :return: (changed, changed_server_ids, results) - changed: A flag indicating if there is any change - changed_server_ids : the list of server ids that are changed - results: The result list from clc public ip call - """ - changed = False - results = [] - changed_server_ids = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.PublicIPs().public_ips) > 0] - for server in servers_to_change: - if not self.module.check_mode: - result = self._remove_publicip_from_server(server) - results.append(result) - changed_server_ids.append(server.id) - changed = True - return changed, changed_server_ids, results - - def _remove_publicip_from_server(self, server): - result = None - try: - for ip_address in server.PublicIPs().public_ips: - result = ip_address.Delete() - except CLCException as ex: - self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process public ip request') - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_servers_from_clc(self, server_ids, message): - """ - Gets list of servers form CLC api - """ - try: - return self.clc.v2.Servers(server_ids).servers - except CLCException as exception: - self.module.fail_json(msg=message + ': %s' % exception) - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcPublicIp._define_module_argument_spec(), - supports_check_mode=True - ) - clc_public_ip = ClcPublicIp(module) - clc_public_ip.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_server.py b/plugins/modules/cloud/centurylink/clc_server.py deleted file mode 100644 index b58e39edd7..0000000000 --- a/plugins/modules/cloud/centurylink/clc_server.py +++ /dev/null @@ -1,1562 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_server -short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud. -description: - - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud. -options: - additional_disks: - description: - - The list of additional disks for the server - type: list - elements: dict - default: [] - add_public_ip: - description: - - Whether to add a public ip to the server - type: bool - default: 'no' - alias: - description: - - The account alias to provision the servers under. - type: str - anti_affinity_policy_id: - description: - - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'. - type: str - anti_affinity_policy_name: - description: - - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'. - type: str - alert_policy_id: - description: - - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'. - type: str - alert_policy_name: - description: - - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'. - type: str - count: - description: - - The number of servers to build (mutually exclusive with exact_count) - default: 1 - type: int - count_group: - description: - - Required when exact_count is specified. The Server Group use to determine how many servers to deploy. - type: str - cpu: - description: - - How many CPUs to provision on the server - default: 1 - type: int - cpu_autoscale_policy_id: - description: - - The autoscale policy to assign to the server. - type: str - custom_fields: - description: - - The list of custom fields to set on the server. - type: list - default: [] - elements: dict - description: - description: - - The description to set for the server. - type: str - exact_count: - description: - - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group, - creating and deleting them to reach that count. Requires count_group to be set. - type: int - group: - description: - - The Server Group to create servers under. - type: str - default: 'Default Group' - ip_address: - description: - - The IP Address for the server. One is assigned if not provided. - type: str - location: - description: - - The Datacenter to create servers in. - type: str - managed_os: - description: - - Whether to create the server as 'Managed' or not. - type: bool - default: 'no' - required: False - memory: - description: - - Memory in GB. - type: int - default: 1 - name: - description: - - A 1 to 6 character identifier to use for the server. This is required when state is 'present' - type: str - network_id: - description: - - The network UUID on which to create servers. - type: str - packages: - description: - - The list of blue print packages to run on the server after its created. - type: list - elements: dict - default: [] - password: - description: - - Password for the administrator / root user - type: str - primary_dns: - description: - - Primary DNS used by the server. - type: str - public_ip_protocol: - description: - - The protocol to use for the public ip if add_public_ip is set to True. - type: str - default: 'TCP' - choices: ['TCP', 'UDP', 'ICMP'] - public_ip_ports: - description: - - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True. - type: list - elements: dict - default: [] - secondary_dns: - description: - - Secondary DNS used by the server. - type: str - server_ids: - description: - - Required for started, stopped, and absent states. - A list of server Ids to insure are started, stopped, or absent. - type: list - default: [] - elements: str - source_server_password: - description: - - The password for the source server if a clone is specified. - type: str - state: - description: - - The state to insure that the provided resources are in. - type: str - default: 'present' - choices: ['present', 'absent', 'started', 'stopped'] - storage_type: - description: - - The type of storage to attach to the server. - type: str - default: 'standard' - choices: ['standard', 'hyperscale'] - template: - description: - - The template to use for server creation. Will search for a template if a partial string is provided. - This is required when state is 'present' - type: str - ttl: - description: - - The time to live for the server in seconds. The server will be deleted when this time expires. - type: str - type: - description: - - The type of server to create. - type: str - default: 'standard' - choices: ['standard', 'hyperscale', 'bareMetal'] - configuration_id: - description: - - Only required for bare metal servers. - Specifies the identifier for the specific configuration type of bare metal server to deploy. - type: str - os_type: - description: - - Only required for bare metal servers. - Specifies the OS to provision with the bare metal server. - type: str - choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - type: bool - default: 'yes' -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Provision a single Ubuntu Server - community.general.clc_server: - name: test - template: ubuntu-14-64 - count: 1 - group: Default Group - state: present - -- name: Ensure 'Default Group' has exactly 5 servers - community.general.clc_server: - name: test - template: ubuntu-14-64 - exact_count: 5 - count_group: Default Group - group: Default Group - -- name: Stop a Server - community.general.clc_server: - server_ids: - - UC1ACCT-TEST01 - state: stopped - -- name: Start a Server - community.general.clc_server: - server_ids: - - UC1ACCT-TEST01 - state: started - -- name: Delete a Server - community.general.clc_server: - server_ids: - - UC1ACCT-TEST01 - state: absent -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are created - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -partially_created_server_ids: - description: The list of server ids that are partially created - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -servers: - description: The list of server objects returned from CLC - returned: success - type: list - sample: - [ - { - "changeInfo":{ - "createdBy":"service.wfad", - "createdDate":1438196820, - "modifiedBy":"service.wfad", - "modifiedDate":1438196820 - }, - "description":"test-server", - "details":{ - "alertPolicies":[ - - ], - "cpu":1, - "customFields":[ - - ], - "diskCount":3, - "disks":[ - { - "id":"0:0", - "partitionPaths":[ - - ], - "sizeGB":1 - }, - { - "id":"0:1", - "partitionPaths":[ - - ], - "sizeGB":2 - }, - { - "id":"0:2", - "partitionPaths":[ - - ], - "sizeGB":14 - } - ], - "hostName":"", - "inMaintenanceMode":false, - "ipAddresses":[ - { - "internal":"10.1.1.1" - } - ], - "memoryGB":1, - "memoryMB":1024, - "partitions":[ - - ], - "powerState":"started", - "snapshots":[ - - ], - "storageGB":17 - }, - "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", - "id":"test-server", - "ipaddress":"10.120.45.23", - "isTemplate":false, - "links":[ - { - "href":"/v2/servers/wfad/test-server", - "id":"test-server", - "rel":"self", - "verbs":[ - "GET", - "PATCH", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", - "id":"086ac1dfe0b6411989e8d1b77c4065f0", - "rel":"group" - }, - { - "href":"/v2/accounts/wfad", - "id":"wfad", - "rel":"account" - }, - { - "href":"/v2/billing/wfad/serverPricing/test-server", - "rel":"billing" - }, - { - "href":"/v2/servers/wfad/test-server/publicIPAddresses", - "rel":"publicIPAddresses", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/credentials", - "rel":"credentials" - }, - { - "href":"/v2/servers/wfad/test-server/statistics", - "rel":"statistics" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", - "rel":"upcomingScheduledActivities" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", - "rel":"scheduledActivities", - "verbs":[ - "GET", - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/capabilities", - "rel":"capabilities" - }, - { - "href":"/v2/servers/wfad/test-server/alertPolicies", - "rel":"alertPolicyMappings", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", - "rel":"antiAffinityPolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", - "rel":"cpuAutoscalePolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - } - ], - "locationId":"UC1", - "name":"test-server", - "os":"ubuntu14_64Bit", - "osType":"Ubuntu 14 64-bit", - "status":"active", - "storageType":"standard", - "type":"standard" - } - ] -''' - -__version__ = '${version}' - -import json -import os -import time -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcServer: - clc = clc_sdk - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.group_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - changed = False - new_server_ids = [] - server_dict_array = [] - - self._set_clc_credentials_from_env() - self.module.params = self._validate_module_params( - self.clc, - self.module) - p = self.module.params - state = p.get('state') - - # - # Handle each state - # - partial_servers_ids = [] - if state == 'absent': - server_ids = p['server_ids'] - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of instances to delete: %s' % - server_ids) - - (changed, - server_dict_array, - new_server_ids) = self._delete_servers(module=self.module, - clc=self.clc, - server_ids=server_ids) - - elif state in ('started', 'stopped'): - server_ids = p.get('server_ids') - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of servers to run: %s' % - server_ids) - - (changed, - server_dict_array, - new_server_ids) = self._start_stop_servers(self.module, - self.clc, - server_ids) - - elif state == 'present': - # Changed is always set to true when provisioning new instances - if not p.get('template') and p.get('type') != 'bareMetal': - return self.module.fail_json( - msg='template parameter is required for new instance') - - if p.get('exact_count') is None: - (server_dict_array, - new_server_ids, - partial_servers_ids, - changed) = self._create_servers(self.module, - self.clc) - else: - (server_dict_array, - new_server_ids, - partial_servers_ids, - changed) = self._enforce_count(self.module, - self.clc) - - self.module.exit_json( - changed=changed, - server_ids=new_server_ids, - partially_created_server_ids=partial_servers_ids, - servers=server_dict_array) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(), - template=dict(), - group=dict(default='Default Group'), - network_id=dict(), - location=dict(), - cpu=dict(default=1, type='int'), - memory=dict(default=1, type='int'), - alias=dict(), - password=dict(no_log=True), - ip_address=dict(), - storage_type=dict( - default='standard', - choices=[ - 'standard', - 'hyperscale']), - type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']), - primary_dns=dict(), - secondary_dns=dict(), - additional_disks=dict(type='list', default=[], elements='dict'), - custom_fields=dict(type='list', default=[], elements='dict'), - ttl=dict(), - managed_os=dict(type='bool', default=False), - description=dict(), - source_server_password=dict(no_log=True), - cpu_autoscale_policy_id=dict(), - anti_affinity_policy_id=dict(), - anti_affinity_policy_name=dict(), - alert_policy_id=dict(), - alert_policy_name=dict(), - packages=dict(type='list', default=[], elements='dict'), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'started', - 'stopped']), - count=dict(type='int', default=1), - exact_count=dict(type='int', ), - count_group=dict(), - server_ids=dict(type='list', default=[], elements='str'), - add_public_ip=dict(type='bool', default=False), - public_ip_protocol=dict( - default='TCP', - choices=[ - 'TCP', - 'UDP', - 'ICMP']), - public_ip_ports=dict(type='list', default=[], elements='dict'), - configuration_id=dict(), - os_type=dict(choices=[ - 'redHat6_64Bit', - 'centOS6_64Bit', - 'windows2012R2Standard_64Bit', - 'ubuntu14_64Bit' - ]), - wait=dict(type='bool', default=True)) - - mutually_exclusive = [ - ['exact_count', 'count'], - ['exact_count', 'state'], - ['anti_affinity_policy_id', 'anti_affinity_policy_name'], - ['alert_policy_id', 'alert_policy_name'], - ] - return {"argument_spec": argument_spec, - "mutually_exclusive": mutually_exclusive} - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _validate_module_params(clc, module): - """ - Validate the module params, and lookup default values. - :param clc: clc-sdk instance to use - :param module: module to validate - :return: dictionary of validated params - """ - params = module.params - datacenter = ClcServer._find_datacenter(clc, module) - - ClcServer._validate_types(module) - ClcServer._validate_name(module) - - params['alias'] = ClcServer._find_alias(clc, module) - params['cpu'] = ClcServer._find_cpu(clc, module) - params['memory'] = ClcServer._find_memory(clc, module) - params['description'] = ClcServer._find_description(module) - params['ttl'] = ClcServer._find_ttl(clc, module) - params['template'] = ClcServer._find_template_id(module, datacenter) - params['group'] = ClcServer._find_group(module, datacenter).id - params['network_id'] = ClcServer._find_network_id(module, datacenter) - params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id( - clc, - module) - params['alert_policy_id'] = ClcServer._find_alert_policy_id( - clc, - module) - - return params - - @staticmethod - def _find_datacenter(clc, module): - """ - Find the datacenter by calling the CLC API. - :param clc: clc-sdk instance to use - :param module: module to validate - :return: clc-sdk.Datacenter instance - """ - location = module.params.get('location') - try: - if not location: - account = clc.v2.Account() - location = account.data.get('primaryDataCenter') - data_center = clc.v2.Datacenter(location) - return data_center - except CLCException: - module.fail_json(msg="Unable to find location: {0}".format(location)) - - @staticmethod - def _find_alias(clc, module): - """ - Find or Validate the Account Alias by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: clc-sdk.Account instance - """ - alias = module.params.get('alias') - if not alias: - try: - alias = clc.v2.Account.GetAlias() - except CLCException as ex: - module.fail_json(msg='Unable to find account alias. {0}'.format( - ex.message - )) - return alias - - @staticmethod - def _find_cpu(clc, module): - """ - Find or validate the CPU value by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: Int value for CPU - """ - cpu = module.params.get('cpu') - group_id = module.params.get('group_id') - alias = module.params.get('alias') - state = module.params.get('state') - - if not cpu and state == 'present': - group = clc.v2.Group(id=group_id, - alias=alias) - if group.Defaults("cpu"): - cpu = group.Defaults("cpu") - else: - module.fail_json( - msg=str("Can\'t determine a default cpu value. Please provide a value for cpu.")) - return cpu - - @staticmethod - def _find_memory(clc, module): - """ - Find or validate the Memory value by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: Int value for Memory - """ - memory = module.params.get('memory') - group_id = module.params.get('group_id') - alias = module.params.get('alias') - state = module.params.get('state') - - if not memory and state == 'present': - group = clc.v2.Group(id=group_id, - alias=alias) - if group.Defaults("memory"): - memory = group.Defaults("memory") - else: - module.fail_json(msg=str( - "Can\'t determine a default memory value. Please provide a value for memory.")) - return memory - - @staticmethod - def _find_description(module): - """ - Set the description module param to name if description is blank - :param module: the module to validate - :return: string description - """ - description = module.params.get('description') - if not description: - description = module.params.get('name') - return description - - @staticmethod - def _validate_types(module): - """ - Validate that type and storage_type are set appropriately, and fail if not - :param module: the module to validate - :return: none - """ - state = module.params.get('state') - server_type = module.params.get( - 'type').lower() if module.params.get('type') else None - storage_type = module.params.get( - 'storage_type').lower() if module.params.get('storage_type') else None - - if state == "present": - if server_type == "standard" and storage_type not in ( - "standard", "premium"): - module.fail_json( - msg=str("Standard VMs must have storage_type = 'standard' or 'premium'")) - - if server_type == "hyperscale" and storage_type != "hyperscale": - module.fail_json( - msg=str("Hyperscale VMs must have storage_type = 'hyperscale'")) - - @staticmethod - def _validate_name(module): - """ - Validate that name is the correct length if provided, fail if it's not - :param module: the module to validate - :return: none - """ - server_name = module.params.get('name') - state = module.params.get('state') - - if state == 'present' and ( - len(server_name) < 1 or len(server_name) > 6): - module.fail_json(msg=str( - "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6")) - - @staticmethod - def _find_ttl(clc, module): - """ - Validate that TTL is > 3600 if set, and fail if not - :param clc: clc-sdk instance to use - :param module: module to validate - :return: validated ttl - """ - ttl = module.params.get('ttl') - - if ttl: - if ttl <= 3600: - return module.fail_json(msg=str("Ttl cannot be <= 3600")) - else: - ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl) - return ttl - - @staticmethod - def _find_template_id(module, datacenter): - """ - Find the template id by calling the CLC API. - :param module: the module to validate - :param datacenter: the datacenter to search for the template - :return: a valid clc template id - """ - lookup_template = module.params.get('template') - state = module.params.get('state') - type = module.params.get('type') - result = None - - if state == 'present' and type != 'bareMetal': - try: - result = datacenter.Templates().Search(lookup_template)[0].id - except CLCException: - module.fail_json( - msg=str( - "Unable to find a template: " + - lookup_template + - " in location: " + - datacenter.id)) - return result - - @staticmethod - def _find_network_id(module, datacenter): - """ - Validate the provided network id or return a default. - :param module: the module to validate - :param datacenter: the datacenter to search for a network id - :return: a valid network id - """ - network_id = module.params.get('network_id') - - if not network_id: - try: - network_id = datacenter.Networks().networks[0].id - # -- added for clc-sdk 2.23 compatibility - # datacenter_networks = clc_sdk.v2.Networks( - # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks']) - # network_id = datacenter_networks.networks[0].id - # -- end - except CLCException: - module.fail_json( - msg=str( - "Unable to find a network in location: " + - datacenter.id)) - - return network_id - - @staticmethod - def _find_aa_policy_id(clc, module): - """ - Validate if the anti affinity policy exist for the given name and throw error if not - :param clc: the clc-sdk instance - :param module: the module to validate - :return: aa_policy_id: the anti affinity policy id of the given name. - """ - aa_policy_id = module.params.get('anti_affinity_policy_id') - aa_policy_name = module.params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - alias = module.params.get('alias') - aa_policy_id = ClcServer._get_anti_affinity_policy_id( - clc, - module, - alias, - aa_policy_name) - if not aa_policy_id: - module.fail_json( - msg='No anti affinity policy was found with policy name : %s' % aa_policy_name) - return aa_policy_id - - @staticmethod - def _find_alert_policy_id(clc, module): - """ - Validate if the alert policy exist for the given name and throw error if not - :param clc: the clc-sdk instance - :param module: the module to validate - :return: alert_policy_id: the alert policy id of the given name. - """ - alert_policy_id = module.params.get('alert_policy_id') - alert_policy_name = module.params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alias = module.params.get('alias') - alert_policy_id = ClcServer._get_alert_policy_id_by_name( - clc=clc, - module=module, - alias=alias, - alert_policy_name=alert_policy_name - ) - if not alert_policy_id: - module.fail_json( - msg='No alert policy exist with name : %s' % alert_policy_name) - return alert_policy_id - - def _create_servers(self, module, clc, override_count=None): - """ - Create New Servers in CLC cloud - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :return: a list of dictionaries with server information about the servers that were created - """ - p = module.params - request_list = [] - servers = [] - server_dict_array = [] - created_server_ids = [] - partial_created_servers_ids = [] - - add_public_ip = p.get('add_public_ip') - public_ip_protocol = p.get('public_ip_protocol') - public_ip_ports = p.get('public_ip_ports') - - params = { - 'name': p.get('name'), - 'template': p.get('template'), - 'group_id': p.get('group'), - 'network_id': p.get('network_id'), - 'cpu': p.get('cpu'), - 'memory': p.get('memory'), - 'alias': p.get('alias'), - 'password': p.get('password'), - 'ip_address': p.get('ip_address'), - 'storage_type': p.get('storage_type'), - 'type': p.get('type'), - 'primary_dns': p.get('primary_dns'), - 'secondary_dns': p.get('secondary_dns'), - 'additional_disks': p.get('additional_disks'), - 'custom_fields': p.get('custom_fields'), - 'ttl': p.get('ttl'), - 'managed_os': p.get('managed_os'), - 'description': p.get('description'), - 'source_server_password': p.get('source_server_password'), - 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'), - 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), - 'packages': p.get('packages'), - 'configuration_id': p.get('configuration_id'), - 'os_type': p.get('os_type') - } - - count = override_count if override_count else p.get('count') - - changed = False if count == 0 else True - - if not changed: - return server_dict_array, created_server_ids, partial_created_servers_ids, changed - for i in range(0, count): - if not module.check_mode: - req = self._create_clc_server(clc=clc, - module=module, - server_params=params) - server = req.requests[0].Server() - request_list.append(req) - servers.append(server) - - self._wait_for_requests(module, request_list) - self._refresh_servers(module, servers) - - ip_failed_servers = self._add_public_ip_to_servers( - module=module, - should_add_public_ip=add_public_ip, - servers=servers, - public_ip_protocol=public_ip_protocol, - public_ip_ports=public_ip_ports) - ap_failed_servers = self._add_alert_policy_to_servers(clc=clc, - module=module, - servers=servers) - - for server in servers: - if server in ip_failed_servers or server in ap_failed_servers: - partial_created_servers_ids.append(server.id) - else: - # reload server details - server = clc.v2.Server(server.id) - server.data['ipaddress'] = server.details[ - 'ipAddresses'][0]['internal'] - - if add_public_ip and len(server.PublicIPs().public_ips) > 0: - server.data['publicip'] = str( - server.PublicIPs().public_ips[0]) - created_server_ids.append(server.id) - server_dict_array.append(server.data) - - return server_dict_array, created_server_ids, partial_created_servers_ids, changed - - def _enforce_count(self, module, clc): - """ - Enforce that there is the right number of servers in the provided group. - Starts or stops servers as necessary. - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :return: a list of dictionaries with server information about the servers that were created or deleted - """ - p = module.params - changed = False - count_group = p.get('count_group') - datacenter = ClcServer._find_datacenter(clc, module) - exact_count = p.get('exact_count') - server_dict_array = [] - partial_servers_ids = [] - changed_server_ids = [] - - # fail here if the exact count was specified without filtering - # on a group, as this may lead to a undesired removal of instances - if exact_count and count_group is None: - return module.fail_json( - msg="you must use the 'count_group' option with exact_count") - - servers, running_servers = ClcServer._find_running_servers_by_group( - module, datacenter, count_group) - - if len(running_servers) == exact_count: - changed = False - - elif len(running_servers) < exact_count: - to_create = exact_count - len(running_servers) - server_dict_array, changed_server_ids, partial_servers_ids, changed \ - = self._create_servers(module, clc, override_count=to_create) - - for server in server_dict_array: - running_servers.append(server) - - elif len(running_servers) > exact_count: - to_remove = len(running_servers) - exact_count - all_server_ids = sorted([x.id for x in running_servers]) - remove_ids = all_server_ids[0:to_remove] - - (changed, server_dict_array, changed_server_ids) \ - = ClcServer._delete_servers(module, clc, remove_ids) - - return server_dict_array, changed_server_ids, partial_servers_ids, changed - - @staticmethod - def _wait_for_requests(module, request_list): - """ - Block until server provisioning requests are completed. - :param module: the AnsibleModule object - :param request_list: a list of clc-sdk.Request instances - :return: none - """ - wait = module.params.get('wait') - if wait: - # Requests.WaitUntilComplete() returns the count of failed requests - failed_requests_count = sum( - [request.WaitUntilComplete() for request in request_list]) - - if failed_requests_count > 0: - module.fail_json( - msg='Unable to process server request') - - @staticmethod - def _refresh_servers(module, servers): - """ - Loop through a list of servers and refresh them. - :param module: the AnsibleModule object - :param servers: list of clc-sdk.Server instances to refresh - :return: none - """ - for server in servers: - try: - server.Refresh() - except CLCException as ex: - module.fail_json(msg='Unable to refresh the server {0}. {1}'.format( - server.id, ex.message - )) - - @staticmethod - def _add_public_ip_to_servers( - module, - should_add_public_ip, - servers, - public_ip_protocol, - public_ip_ports): - """ - Create a public IP for servers - :param module: the AnsibleModule object - :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False - :param servers: List of servers to add public ips to - :param public_ip_protocol: a protocol to allow for the public ips - :param public_ip_ports: list of ports to allow for the public ips - :return: none - """ - failed_servers = [] - if not should_add_public_ip: - return failed_servers - - ports_lst = [] - request_list = [] - server = None - - for port in public_ip_ports: - ports_lst.append( - {'protocol': public_ip_protocol, 'port': port}) - try: - if not module.check_mode: - for server in servers: - request = server.PublicIPs().Add(ports_lst) - request_list.append(request) - except APIFailedResponse: - failed_servers.append(server) - ClcServer._wait_for_requests(module, request_list) - return failed_servers - - @staticmethod - def _add_alert_policy_to_servers(clc, module, servers): - """ - Associate the alert policy to servers - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param servers: List of servers to add alert policy to - :return: failed_servers: the list of servers which failed while associating alert policy - """ - failed_servers = [] - p = module.params - alert_policy_id = p.get('alert_policy_id') - alias = p.get('alias') - - if alert_policy_id and not module.check_mode: - for server in servers: - try: - ClcServer._add_alert_policy_to_server( - clc=clc, - alias=alias, - server_id=server.id, - alert_policy_id=alert_policy_id) - except CLCException: - failed_servers.append(server) - return failed_servers - - @staticmethod - def _add_alert_policy_to_server( - clc, alias, server_id, alert_policy_id): - """ - Associate an alert policy to a clc server - :param clc: the clc-sdk instance to use - :param alias: the clc account alias - :param server_id: The clc server id - :param alert_policy_id: the alert policy id to be associated to the server - :return: none - """ - try: - clc.v2.API.Call( - method='POST', - url='servers/%s/%s/alertPolicies' % (alias, server_id), - payload=json.dumps( - { - 'id': alert_policy_id - })) - except APIFailedResponse as e: - raise CLCException( - 'Failed to associate alert policy to the server : {0} with Error {1}'.format( - server_id, str(e.response_text))) - - @staticmethod - def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): - """ - Returns the alert policy id for the given alert policy name - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the clc account alias - :param alert_policy_name: the name of the alert policy - :return: alert_policy_id: the alert policy id - """ - alert_policy_id = None - policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias) - if not policies: - return alert_policy_id - for policy in policies.get('items'): - if policy.get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = policy.get('id') - else: - return module.fail_json( - msg='multiple alert policies were found with policy name : %s' % alert_policy_name) - return alert_policy_id - - @staticmethod - def _delete_servers(module, clc, server_ids): - """ - Delete the servers on the provided list - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :param server_ids: list of servers to delete - :return: a list of dictionaries with server information about the servers that were deleted - """ - terminated_server_ids = [] - server_dict_array = [] - request_list = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = clc.v2.Servers(server_ids).Servers() - for server in servers: - if not module.check_mode: - request_list.append(server.Delete()) - ClcServer._wait_for_requests(module, request_list) - - for server in servers: - terminated_server_ids.append(server.id) - - return True, server_dict_array, terminated_server_ids - - @staticmethod - def _start_stop_servers(module, clc, server_ids): - """ - Start or Stop the servers on the provided list - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :param server_ids: list of servers to start or stop - :return: a list of dictionaries with server information about the servers that were started or stopped - """ - p = module.params - state = p.get('state') - changed = False - changed_servers = [] - server_dict_array = [] - result_server_ids = [] - request_list = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = clc.v2.Servers(server_ids).Servers() - for server in servers: - if server.powerState != state: - changed_servers.append(server) - if not module.check_mode: - request_list.append( - ClcServer._change_server_power_state( - module, - server, - state)) - changed = True - - ClcServer._wait_for_requests(module, request_list) - ClcServer._refresh_servers(module, changed_servers) - - for server in set(changed_servers + servers): - try: - server.data['ipaddress'] = server.details[ - 'ipAddresses'][0]['internal'] - server.data['publicip'] = str( - server.PublicIPs().public_ips[0]) - except (KeyError, IndexError): - pass - - server_dict_array.append(server.data) - result_server_ids.append(server.id) - - return changed, server_dict_array, result_server_ids - - @staticmethod - def _change_server_power_state(module, server, state): - """ - Change the server powerState - :param module: the module to check for intended state - :param server: the server to start or stop - :param state: the intended powerState for the server - :return: the request object from clc-sdk call - """ - result = None - try: - if state == 'started': - result = server.PowerOn() - else: - # Try to shut down the server and fall back to power off when unable to shut down. - result = server.ShutDown() - if result and hasattr(result, 'requests') and result.requests[0]: - return result - else: - result = server.PowerOff() - except CLCException: - module.fail_json( - msg='Unable to change power state for server {0}'.format( - server.id)) - return result - - @staticmethod - def _find_running_servers_by_group(module, datacenter, count_group): - """ - Find a list of running servers in the provided group - :param module: the AnsibleModule object - :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group - :param count_group: the group to count the servers - :return: list of servers, and list of running servers - """ - group = ClcServer._find_group( - module=module, - datacenter=datacenter, - lookup_group=count_group) - - servers = group.Servers().Servers() - running_servers = [] - - for server in servers: - if server.status == 'active' and server.powerState == 'started': - running_servers.append(server) - - return servers, running_servers - - @staticmethod - def _find_group(module, datacenter, lookup_group=None): - """ - Find a server group in a datacenter by calling the CLC API - :param module: the AnsibleModule instance - :param datacenter: clc-sdk.Datacenter instance to search for the group - :param lookup_group: string name of the group to search for - :return: clc-sdk.Group instance - """ - if not lookup_group: - lookup_group = module.params.get('group') - try: - return datacenter.Groups().Get(lookup_group) - except CLCException: - pass - - # The search above only acts on the main - result = ClcServer._find_group_recursive( - module, - datacenter.Groups(), - lookup_group) - - if result is None: - module.fail_json( - msg=str( - "Unable to find group: " + - lookup_group + - " in location: " + - datacenter.id)) - - return result - - @staticmethod - def _find_group_recursive(module, group_list, lookup_group): - """ - Find a server group by recursively walking the tree - :param module: the AnsibleModule instance to use - :param group_list: a list of groups to search - :param lookup_group: the group to look for - :return: list of groups - """ - result = None - for group in group_list.groups: - subgroups = group.Subgroups() - try: - return subgroups.Get(lookup_group) - except CLCException: - result = ClcServer._find_group_recursive( - module, - subgroups, - lookup_group) - - if result is not None: - break - - return result - - @staticmethod - def _create_clc_server( - clc, - module, - server_params): - """ - Call the CLC Rest API to Create a Server - :param clc: the clc-python-sdk instance to use - :param module: the AnsibleModule instance to use - :param server_params: a dictionary of params to use to create the servers - :return: clc-sdk.Request object linked to the queued server request - """ - - try: - res = clc.v2.API.Call( - method='POST', - url='servers/%s' % - (server_params.get('alias')), - payload=json.dumps( - { - 'name': server_params.get('name'), - 'description': server_params.get('description'), - 'groupId': server_params.get('group_id'), - 'sourceServerId': server_params.get('template'), - 'isManagedOS': server_params.get('managed_os'), - 'primaryDNS': server_params.get('primary_dns'), - 'secondaryDNS': server_params.get('secondary_dns'), - 'networkId': server_params.get('network_id'), - 'ipAddress': server_params.get('ip_address'), - 'password': server_params.get('password'), - 'sourceServerPassword': server_params.get('source_server_password'), - 'cpu': server_params.get('cpu'), - 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'), - 'memoryGB': server_params.get('memory'), - 'type': server_params.get('type'), - 'storageType': server_params.get('storage_type'), - 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'), - 'customFields': server_params.get('custom_fields'), - 'additionalDisks': server_params.get('additional_disks'), - 'ttl': server_params.get('ttl'), - 'packages': server_params.get('packages'), - 'configurationId': server_params.get('configuration_id'), - 'osType': server_params.get('os_type')})) - - result = clc.v2.Requests(res) - except APIFailedResponse as ex: - return module.fail_json(msg='Unable to create the server: {0}. {1}'.format( - server_params.get('name'), - ex.response_text - )) - - # - # Patch the Request object so that it returns a valid server - - # Find the server's UUID from the API response - server_uuid = [obj['id'] - for obj in res['links'] if obj['rel'] == 'self'][0] - - # Change the request server method to a _find_server_by_uuid closure so - # that it will work - result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry( - clc, - module, - server_uuid, - server_params.get('alias')) - - return result - - @staticmethod - def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name): - """ - retrieves the anti affinity policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param aa_policy_name: the anti affinity policy name - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - aa_policies = clc.v2.API.Call(method='GET', - url='antiAffinityPolicies/%s' % alias) - except APIFailedResponse as ex: - return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format( - alias, ex.response_text)) - for aa_policy in aa_policies.get('items'): - if aa_policy.get('name') == aa_policy_name: - if not aa_policy_id: - aa_policy_id = aa_policy.get('id') - else: - return module.fail_json( - msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name) - return aa_policy_id - - # - # This is the function that gets patched to the Request.server object using a lamda closure - # - - @staticmethod - def _find_server_by_uuid_w_retry( - clc, module, svr_uuid, alias=None, retries=5, back_out=2): - """ - Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned. - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param svr_uuid: UUID of the server - :param retries: the number of retry attempts to make prior to fail. default is 5 - :param alias: the Account Alias to search - :return: a clc-sdk.Server instance - """ - if not alias: - alias = clc.v2.Account.GetAlias() - - # Wait and retry if the api returns a 404 - while True: - retries -= 1 - try: - server_obj = clc.v2.API.Call( - method='GET', url='servers/%s/%s?uuid=true' % - (alias, svr_uuid)) - server_id = server_obj['id'] - server = clc.v2.Server( - id=server_id, - alias=alias, - server_obj=server_obj) - return server - - except APIFailedResponse as e: - if e.response_status_code != 404: - return module.fail_json( - msg='A failure response was received from CLC API when ' - 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' % - (svr_uuid, e.response_status_code, e.message)) - if retries == 0: - return module.fail_json( - msg='Unable to reach the CLC API after 5 attempts') - time.sleep(back_out) - back_out *= 2 - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - argument_dict = ClcServer._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_server = ClcServer(module) - clc_server.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/centurylink/clc_server_snapshot.py b/plugins/modules/cloud/centurylink/clc_server_snapshot.py deleted file mode 100644 index 4de4c9936e..0000000000 --- a/plugins/modules/cloud/centurylink/clc_server_snapshot.py +++ /dev/null @@ -1,411 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_server_snapshot -short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud. -description: - - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud. -options: - server_ids: - description: - - The list of CLC server Ids. - type: list - required: True - elements: str - expiration_days: - description: - - The number of days to keep the server snapshot before it expires. - type: int - default: 7 - required: False - state: - description: - - The state to insure that the provided resources are in. - type: str - default: 'present' - required: False - choices: ['present', 'absent', 'restore'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - default: 'True' - required: False - type: str -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Create server snapshot - community.general.clc_server_snapshot: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - expiration_days: 10 - wait: True - state: present - -- name: Restore server snapshot - community.general.clc_server_snapshot: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - wait: True - state: restore - -- name: Delete server snapshot - community.general.clc_server_snapshot: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - wait: True - state: absent -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -''' - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcSnapshot: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - server_ids = p['server_ids'] - expiration_days = p['expiration_days'] - state = p['state'] - request_list = [] - changed = False - changed_servers = [] - - self._set_clc_credentials_from_env() - if state == 'present': - changed, request_list, changed_servers = self.ensure_server_snapshot_present( - server_ids=server_ids, - expiration_days=expiration_days) - elif state == 'absent': - changed, request_list, changed_servers = self.ensure_server_snapshot_absent( - server_ids=server_ids) - elif state == 'restore': - changed, request_list, changed_servers = self.ensure_server_snapshot_restore( - server_ids=server_ids) - - self._wait_for_requests_to_complete(request_list) - return self.module.exit_json( - changed=changed, - server_ids=changed_servers) - - def ensure_server_snapshot_present(self, server_ids, expiration_days): - """ - Ensures the given set of server_ids have the snapshots created - :param server_ids: The list of server_ids to create the snapshot - :param expiration_days: The number of days to keep the snapshot - :return: (changed, request_list, changed_servers) - changed: A flag indicating whether any change was made - request_list: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - request_list = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) == 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - request = self._create_server_snapshot(server, expiration_days) - request_list.append(request) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, request_list, changed_servers - - def _create_server_snapshot(self, server, expiration_days): - """ - Create the snapshot for the CLC server - :param server: the CLC server object - :param expiration_days: The number of days to keep the snapshot - :return: the create request object from CLC API Call - """ - result = None - try: - result = server.CreateSnapshot( - delete_existing=True, - expiration_days=expiration_days) - except CLCException as ex: - self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def ensure_server_snapshot_absent(self, server_ids): - """ - Ensures the given set of server_ids have the snapshots removed - :param server_ids: The list of server_ids to delete the snapshot - :return: (changed, request_list, changed_servers) - changed: A flag indicating whether any change was made - request_list: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - request_list = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) > 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - request = self._delete_server_snapshot(server) - request_list.append(request) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, request_list, changed_servers - - def _delete_server_snapshot(self, server): - """ - Delete snapshot for the CLC server - :param server: the CLC server object - :return: the delete snapshot request object from CLC API - """ - result = None - try: - result = server.DeleteSnapshot() - except CLCException as ex: - self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def ensure_server_snapshot_restore(self, server_ids): - """ - Ensures the given set of server_ids have the snapshots restored - :param server_ids: The list of server_ids to delete the snapshot - :return: (changed, request_list, changed_servers) - changed: A flag indicating whether any change was made - request_list: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - request_list = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) > 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - request = self._restore_server_snapshot(server) - request_list.append(request) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, request_list, changed_servers - - def _restore_server_snapshot(self, server): - """ - Restore snapshot for the CLC server - :param server: the CLC server object - :return: the restore snapshot request object from CLC API - """ - result = None - try: - result = server.RestoreSnapshot() - except CLCException as ex: - self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process server snapshot request') - - @staticmethod - def define_argument_spec(): - """ - This function defines the dictionary object required for - package module - :return: the package dictionary object - """ - argument_spec = dict( - server_ids=dict(type='list', required=True, elements='str'), - expiration_days=dict(default=7, type='int'), - wait=dict(default=True), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'restore']), - ) - return argument_spec - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param server_list: The list of server ids - :param message: The error message to throw in case of any error - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - return self.module.fail_json(msg=message + ': %s' % ex) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - Main function - :return: None - """ - module = AnsibleModule( - argument_spec=ClcSnapshot.define_argument_spec(), - supports_check_mode=True - ) - clc_snapshot = ClcSnapshot(module) - clc_snapshot.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox.py b/plugins/modules/cloud/misc/proxmox.py deleted file mode 100644 index da8783e16d..0000000000 --- a/plugins/modules/cloud/misc/proxmox.py +++ /dev/null @@ -1,780 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: proxmox -short_description: management of instances in Proxmox VE cluster -description: - - allows you to create/delete/stop instances in Proxmox VE cluster - - Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older) - - Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior). -options: - password: - description: - - the instance root password - type: str - hostname: - description: - - the instance hostname - - required only for C(state=present) - - must be unique if vmid is not passed - type: str - ostemplate: - description: - - the template for VM creating - - required only for C(state=present) - type: str - disk: - description: - - This option was previously described as "hard disk size in GB for instance" however several formats describing - a lxc mount are permitted. - - Older versions of Proxmox will accept a numeric value for size using the I(storage) parameter to automatically - choose which storage to allocate from, however new versions enforce the C(:) syntax. - - "Additional options are available by using some combination of the following key-value pairs as a - comma-delimited list C([volume=] [,acl=<1|0>] [,mountoptions=] [,quota=<1|0>] - [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=])." - - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(3). - type: str - cores: - description: - - Specify number of cores per socket. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). - type: int - cpus: - description: - - numbers of allocated cpus for instance - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). - type: int - memory: - description: - - memory size in MB for instance - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512). - type: int - swap: - description: - - swap memory size in MB for instance - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0). - type: int - netif: - description: - - specifies network interfaces for the container. As a hash/dictionary defining interfaces. - type: dict - features: - description: - - Specifies a list of features to be enabled. For valid options, see U(https://pve.proxmox.com/wiki/Linux_Container#pct_options). - - Some features require the use of a privileged container. - type: list - elements: str - version_added: 2.0.0 - mounts: - description: - - specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points - type: dict - ip_address: - description: - - specifies the address the container will be assigned - type: str - onboot: - description: - - specifies whether a VM will be started during system bootup - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no). - type: bool - storage: - description: - - target storage - type: str - default: 'local' - cpuunits: - description: - - CPU weight for a VM - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000). - type: int - nameserver: - description: - - sets DNS server IP address for a container - type: str - searchdomain: - description: - - sets DNS search domain for a container - type: str - timeout: - description: - - timeout for operations - type: int - default: 30 - force: - description: - - forcing operations - - can be used only with states C(present), C(stopped), C(restarted) - - with C(state=present) force option allow to overwrite existing container - - with states C(stopped) , C(restarted) allow to force stop instance - type: bool - default: 'no' - purge: - description: - - Remove container from all related configurations. - - For example backup jobs, replication jobs, or HA. - - Related ACLs and Firewall entries will always be removed. - - Used with state C(absent). - type: bool - default: false - version_added: 2.3.0 - state: - description: - - Indicate desired state of the instance - type: str - choices: ['present', 'started', 'absent', 'stopped', 'restarted'] - default: present - pubkey: - description: - - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions - type: str - unprivileged: - description: - - Indicate if the container should be unprivileged - type: bool - default: 'no' - description: - description: - - Specify the description for the container. Only used on the configuration web interface. - - This is saved as a comment inside the configuration file. - type: str - version_added: '0.2.0' - hookscript: - description: - - Script that will be executed during various steps in the containers lifetime. - type: str - version_added: '0.2.0' - proxmox_default_behavior: - description: - - As of community.general 4.0.0, various options no longer have default values. - These default values caused problems when users expected different behavior from Proxmox - by default or filled options which caused problems when set. - - The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values - are used when the values are not explicitly specified by the user. The new default is C(no_defaults), - which makes sure these options have no defaults. - - This affects the I(disk), I(cores), I(cpus), I(memory), I(onboot), I(swap), I(cpuunits) options. - type: str - default: no_defaults - choices: - - compatibility - - no_defaults - version_added: "1.3.0" - clone: - description: - - ID of the container to be cloned. - - I(description), I(hostname), and I(pool) will be copied from the cloned container if not specified. - - The type of clone created is defined by the I(clone_type) parameter. - - This operator is only supported for Proxmox clusters that use LXC containerization (PVE version >= 4). - type: int - version_added: 4.3.0 - clone_type: - description: - - Type of the clone created. - - C(full) creates a full clone, and I(storage) must be specified. - - C(linked) creates a linked clone, and the cloned container must be a template container. - - C(opportunistic) creates a linked clone if the cloned container is a template container, and a full clone if not. - I(storage) may be specified, if not it will fall back to the default. - type: str - choices: ['full', 'linked', 'opportunistic'] - default: opportunistic - version_added: 4.3.0 -author: Sergei Antipov (@UnderGreen) -extends_documentation_fragment: - - community.general.proxmox.documentation - - community.general.proxmox.selection -''' - -EXAMPLES = r''' -- name: Create new container with minimal options - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - -- name: Create new container with hookscript and description - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - hookscript: 'local:snippets/vm_hook.sh' - description: created with ansible - -- name: Create new container automatically selecting the next available vmid. - community.general.proxmox: - node: 'uk-mc02' - api_user: 'root@pam' - api_password: '1q2w3e' - api_host: 'node1' - password: '123456' - hostname: 'example.org' - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - -- name: Create new container with minimal options with force(it will rewrite existing container) - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - force: yes - -- name: Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before) - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - -- name: Create new container with minimal options defining network interface with dhcp - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}' - -- name: Create new container with minimal options defining network interface with static ip - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}' - -- name: Create new container with minimal options defining a mount with 8GB - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - mounts: '{"mp0":"local:8,mp=/mnt/test/"}' - -- name: Create new container with minimal options defining a cpu core limit - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - cores: 2 - -- name: Create a new container with nesting enabled and allows the use of CIFS/NFS inside the container. - community.general.proxmox: - vmid: 100 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - password: 123456 - hostname: example.org - ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz' - features: - - nesting=1 - - mount=cifs,nfs - -- name: > - Create a linked clone of the template container with id 100. The newly created container with be a - linked clone, because no storage parameter is defined - community.general.proxmox: - vmid: 201 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - clone: 100 - hostname: clone.example.org - -- name: Create a full clone of the container with id 100 - community.general.proxmox: - vmid: 201 - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - clone: 100 - hostname: clone.example.org - storage: local - -- name: Start container - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: started - -- name: > - Start container with mount. You should enter a 90-second timeout because servers - with additional disks take longer to boot - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: started - timeout: 90 - -- name: Stop container - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: stopped - -- name: Stop container with force - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - force: yes - state: stopped - -- name: Restart container(stopped or mounted container you can't restart) - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: restarted - -- name: Remove container - community.general.proxmox: - vmid: 100 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - state: absent -''' - -import time -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - ansible_to_proxmox_bool, proxmox_auth_argument_spec, ProxmoxAnsible) - -VZ_TYPE = None - - -class ProxmoxLxcAnsible(ProxmoxAnsible): - def content_check(self, node, ostemplate, template_store): - return [True for cnt in self.proxmox_api.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate] - - def is_template_container(self, node, vmid): - """Check if the specified container is a template.""" - proxmox_node = self.proxmox_api.nodes(node) - config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get() - return config['template'] - - def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout, clone, **kwargs): - proxmox_node = self.proxmox_api.nodes(node) - - # Remove all empty kwarg entries - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - - if VZ_TYPE == 'lxc': - kwargs['cpulimit'] = cpus - kwargs['rootfs'] = disk - if 'netif' in kwargs: - kwargs.update(kwargs['netif']) - del kwargs['netif'] - if 'mounts' in kwargs: - kwargs.update(kwargs['mounts']) - del kwargs['mounts'] - if 'pubkey' in kwargs: - if self.version() >= LooseVersion('4.2'): - kwargs['ssh-public-keys'] = kwargs['pubkey'] - del kwargs['pubkey'] - else: - kwargs['cpus'] = cpus - kwargs['disk'] = disk - - if clone is not None: - if VZ_TYPE != 'lxc': - self.module.fail_json(changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.") - - clone_is_template = self.is_template_container(node, clone) - - # By default, create a full copy only when the cloned container is not a template. - create_full_copy = not clone_is_template - - # Only accept parameters that are compatible with the clone endpoint. - valid_clone_parameters = ['hostname', 'pool', 'description'] - if self.module.params['storage'] is not None and clone_is_template: - # Cloning a template, so create a full copy instead of a linked copy - create_full_copy = True - elif self.module.params['storage'] is None and not clone_is_template: - # Not cloning a template, but also no defined storage. This isn't possible. - self.module.fail_json(changed=False, msg="Cloned container is not a template, storage needs to be specified.") - - if self.module.params['clone_type'] == 'linked': - if not clone_is_template: - self.module.fail_json(changed=False, msg="'linked' clone type is specified, but cloned container is not a template container.") - # Don't need to do more, by default create_full_copy is set to false already - elif self.module.params['clone_type'] == 'opportunistic': - if not clone_is_template: - # Cloned container is not a template, so we need our 'storage' parameter - valid_clone_parameters.append('storage') - elif self.module.params['clone_type'] == 'full': - create_full_copy = True - valid_clone_parameters.append('storage') - - clone_parameters = {} - - if create_full_copy: - clone_parameters['full'] = '1' - else: - clone_parameters['full'] = '0' - for param in valid_clone_parameters: - if self.module.params[param] is not None: - clone_parameters[param] = self.module.params[param] - - taskid = getattr(proxmox_node, VZ_TYPE)(clone).clone.post(newid=vmid, **clone_parameters) - else: - taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs) - - while timeout: - if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and - proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def start_instance(self, vm, vmid, timeout): - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.start.post() - while timeout: - if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and - self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def stop_instance(self, vm, vmid, timeout, force): - if force: - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1) - else: - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post() - while timeout: - if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and - self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def umount_instance(self, vm, vmid, timeout): - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.umount.post() - while timeout: - if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and - self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - -def main(): - module_args = proxmox_auth_argument_spec() - proxmox_args = dict( - vmid=dict(type='int', required=False), - node=dict(), - pool=dict(), - password=dict(no_log=True), - hostname=dict(), - ostemplate=dict(), - disk=dict(type='str'), - cores=dict(type='int'), - cpus=dict(type='int'), - memory=dict(type='int'), - swap=dict(type='int'), - netif=dict(type='dict'), - mounts=dict(type='dict'), - ip_address=dict(), - onboot=dict(type='bool'), - features=dict(type='list', elements='str'), - storage=dict(default='local'), - cpuunits=dict(type='int'), - nameserver=dict(), - searchdomain=dict(), - timeout=dict(type='int', default=30), - force=dict(type='bool', default=False), - purge=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']), - pubkey=dict(type='str'), - unprivileged=dict(type='bool', default=False), - description=dict(type='str'), - hookscript=dict(type='str'), - proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), - clone=dict(type='int'), - clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']), - ) - module_args.update(proxmox_args) - - module = AnsibleModule( - argument_spec=module_args, - required_if=[ - ('state', 'present', ['node', 'hostname']), - ('state', 'present', ('clone', 'ostemplate'), True), # Require one of clone and ostemplate. Together with mutually_exclusive this ensures that we - # either clone a container or create a new one from a template file. - ], - required_together=[ - ('api_token_id', 'api_token_secret') - ], - required_one_of=[('api_password', 'api_token_id')], - mutually_exclusive=[('clone', 'ostemplate')], # Creating a new container is done either by cloning an existing one, or based on a template. - ) - - proxmox = ProxmoxLxcAnsible(module) - - global VZ_TYPE - VZ_TYPE = 'openvz' if proxmox.version() < LooseVersion('4.0') else 'lxc' - - state = module.params['state'] - vmid = module.params['vmid'] - node = module.params['node'] - disk = module.params['disk'] - cpus = module.params['cpus'] - memory = module.params['memory'] - swap = module.params['swap'] - storage = module.params['storage'] - hostname = module.params['hostname'] - if module.params['ostemplate'] is not None: - template_store = module.params['ostemplate'].split(":")[0] - timeout = module.params['timeout'] - clone = module.params['clone'] - - if module.params['proxmox_default_behavior'] == 'compatibility': - old_default_values = dict( - disk="3", - cores=1, - cpus=1, - memory=512, - swap=0, - onboot=False, - cpuunits=1000, - ) - for param, value in old_default_values.items(): - if module.params[param] is None: - module.params[param] = value - - # If vmid not set get the Next VM id from ProxmoxAPI - # If hostname is set get the VM id from ProxmoxAPI - if not vmid and state == 'present': - vmid = proxmox.get_nextvmid() - elif not vmid and hostname: - vmid = proxmox.get_vmid(hostname) - elif not vmid: - module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) - - # Create a new container - if state == 'present' and clone is None: - try: - if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']: - module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid) - # If no vmid was passed, there cannot be another VM named 'hostname' - if (not module.params['vmid'] and - proxmox.get_vmid(hostname, ignore_missing=True) and - not module.params['force']): - vmid = proxmox.get_vmid(hostname) - module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) - elif not proxmox.get_node(node): - module.fail_json(msg="node '%s' not exists in cluster" % node) - elif not proxmox.content_check(node, module.params['ostemplate'], template_store): - module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s" - % (module.params['ostemplate'], node, template_store)) - except Exception as e: - module.fail_json(msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) - - try: - proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone, - cores=module.params['cores'], - pool=module.params['pool'], - password=module.params['password'], - hostname=module.params['hostname'], - ostemplate=module.params['ostemplate'], - netif=module.params['netif'], - mounts=module.params['mounts'], - ip_address=module.params['ip_address'], - onboot=ansible_to_proxmox_bool(module.params['onboot']), - cpuunits=module.params['cpuunits'], - nameserver=module.params['nameserver'], - searchdomain=module.params['searchdomain'], - force=ansible_to_proxmox_bool(module.params['force']), - pubkey=module.params['pubkey'], - features=",".join(module.params['features']) if module.params['features'] is not None else None, - unprivileged=ansible_to_proxmox_bool(module.params['unprivileged']), - description=module.params['description'], - hookscript=module.params['hookscript']) - - module.exit_json(changed=True, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) - except Exception as e: - module.fail_json(msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) - - # Clone a container - elif state == 'present' and clone is not None: - try: - if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']: - module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid) - # If no vmid was passed, there cannot be another VM named 'hostname' - if (not module.params['vmid'] and - proxmox.get_vmid(hostname, ignore_missing=True) and - not module.params['force']): - vmid = proxmox.get_vmid(hostname) - module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) - if not proxmox.get_vm(clone, ignore_missing=True): - module.exit_json(changed=False, msg="Container to be cloned does not exist") - except Exception as e: - module.fail_json(msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) - - try: - proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone) - - module.exit_json(changed=True, msg="Cloned VM %s from %s" % (vmid, clone)) - except Exception as e: - module.fail_json(msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) - - elif state == 'started': - try: - vm = proxmox.get_vm(vmid) - if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': - module.exit_json(changed=False, msg="VM %s is already running" % vmid) - - if proxmox.start_instance(vm, vmid, timeout): - module.exit_json(changed=True, msg="VM %s started" % vmid) - except Exception as e: - module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'stopped': - try: - vm = proxmox.get_vm(vmid) - - if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': - if module.params['force']: - if proxmox.umount_instance(vm, vmid, timeout): - module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) - else: - module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. " - "You can use force option to umount it.") % vmid) - - if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': - module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid) - - if proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']): - module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) - except Exception as e: - module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'restarted': - try: - vm = proxmox.get_vm(vmid) - - vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] - if vm_status in ['stopped', 'mounted']: - module.exit_json(changed=False, msg="VM %s is not running" % vmid) - - if (proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']) and - proxmox.start_instance(vm, vmid, timeout)): - module.exit_json(changed=True, msg="VM %s is restarted" % vmid) - except Exception as e: - module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'absent': - try: - vm = proxmox.get_vm(vmid, ignore_missing=True) - if not vm: - module.exit_json(changed=False, msg="VM %s does not exist" % vmid) - - vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] - if vm_status == 'running': - module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid) - - if vm_status == 'mounted': - module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) - - delete_params = {} - - if module.params['purge']: - delete_params['purge'] = 1 - - taskid = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE).delete(vmid, **delete_params) - - while timeout: - task_status = proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).status.get() - if (task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK'): - module.exit_json(changed=True, msg="VM %s removed" % vmid) - timeout -= 1 - if timeout == 0: - module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' - % proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - except Exception as e: - module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e))) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_domain_info.py b/plugins/modules/cloud/misc/proxmox_domain_info.py deleted file mode 100644 index 675b04a41e..0000000000 --- a/plugins/modules/cloud/misc/proxmox_domain_info.py +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Tristan Le Guern (@tleguern) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: proxmox_domain_info -short_description: Retrieve information about one or more Proxmox VE domains -version_added: 1.3.0 -description: - - Retrieve information about one or more Proxmox VE domains. -options: - domain: - description: - - Restrict results to a specific authentication realm. - aliases: ['realm', 'name'] - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: community.general.proxmox.documentation -''' - - -EXAMPLES = ''' -- name: List existing domains - community.general.proxmox_domain_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_domains - -- name: Retrieve information about the pve domain - community.general.proxmox_domain_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - domain: pve - register: proxmox_domain_pve -''' - - -RETURN = ''' -proxmox_domains: - description: List of authentication domains. - returned: always, but can be empty - type: list - elements: dict - contains: - comment: - description: Short description of the realm. - returned: on success - type: str - realm: - description: Realm name. - returned: on success - type: str - type: - description: Realm type. - returned: on success - type: str - digest: - description: Realm hash. - returned: on success, can be absent - type: str -''' - - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxDomainInfoAnsible(ProxmoxAnsible): - def get_domain(self, realm): - try: - domain = self.proxmox_api.access.domains.get(realm) - except Exception: - self.module.fail_json(msg="Domain '%s' does not exist" % realm) - domain['realm'] = realm - return domain - - def get_domains(self): - domains = self.proxmox_api.access.domains.get() - return domains - - -def proxmox_domain_info_argument_spec(): - return dict( - domain=dict(type='str', aliases=['realm', 'name']), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - domain_info_args = proxmox_domain_info_argument_spec() - module_args.update(domain_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - proxmox = ProxmoxDomainInfoAnsible(module) - domain = module.params['domain'] - - if domain: - domains = [proxmox.get_domain(realm=domain)] - else: - domains = proxmox.get_domains() - result['proxmox_domains'] = domains - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_group_info.py b/plugins/modules/cloud/misc/proxmox_group_info.py deleted file mode 100644 index 58b56e856c..0000000000 --- a/plugins/modules/cloud/misc/proxmox_group_info.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Tristan Le Guern -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: proxmox_group_info -short_description: Retrieve information about one or more Proxmox VE groups -version_added: 1.3.0 -description: - - Retrieve information about one or more Proxmox VE groups -options: - group: - description: - - Restrict results to a specific group. - aliases: ['groupid', 'name'] - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: community.general.proxmox.documentation -''' - - -EXAMPLES = ''' -- name: List existing groups - community.general.proxmox_group_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_groups - -- name: Retrieve information about the admin group - community.general.proxmox_group_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - group: admin - register: proxmox_group_admin -''' - - -RETURN = ''' -proxmox_groups: - description: List of groups. - returned: always, but can be empty - type: list - elements: dict - contains: - comment: - description: Short description of the group. - returned: on success, can be absent - type: str - groupid: - description: Group name. - returned: on success - type: str - users: - description: List of users in the group. - returned: on success - type: list - elements: str -''' - - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxGroupInfoAnsible(ProxmoxAnsible): - def get_group(self, groupid): - try: - group = self.proxmox_api.access.groups.get(groupid) - except Exception: - self.module.fail_json(msg="Group '%s' does not exist" % groupid) - group['groupid'] = groupid - return ProxmoxGroup(group) - - def get_groups(self): - groups = self.proxmox_api.access.groups.get() - return [ProxmoxGroup(group) for group in groups] - - -class ProxmoxGroup: - def __init__(self, group): - self.group = dict() - # Data representation is not the same depending on API calls - for k, v in group.items(): - if k == 'users' and isinstance(v, str): - self.group['users'] = v.split(',') - elif k == 'members': - self.group['users'] = group['members'] - else: - self.group[k] = v - - -def proxmox_group_info_argument_spec(): - return dict( - group=dict(type='str', aliases=['groupid', 'name']), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - group_info_args = proxmox_group_info_argument_spec() - module_args.update(group_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - proxmox = ProxmoxGroupInfoAnsible(module) - group = module.params['group'] - - if group: - groups = [proxmox.get_group(groupid=group)] - else: - groups = proxmox.get_groups() - result['proxmox_groups'] = [group.group for group in groups] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py deleted file mode 100644 index 2354cb916f..0000000000 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ /dev/null @@ -1,1408 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2016, Abdoul Bah (@helldorado) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: proxmox_kvm -short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster. -description: - - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster. - - Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior). -author: "Abdoul Bah (@helldorado) " -options: - acpi: - description: - - Specify if ACPI should be enabled/disabled. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes). - type: bool - agent: - description: - - Specify if the QEMU Guest Agent should be enabled/disabled. - type: bool - args: - description: - - Pass arbitrary arguments to kvm. - - This option is for experts only! - - If I(proxmox_default_behavior) is set to C(compatiblity), this option has a default of - C(-serial unix:/var/run/qemu-server/.serial,server,nowait). - type: str - autostart: - description: - - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API). - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no). - type: bool - balloon: - description: - - Specify the amount of RAM for the VM in MB. - - Using zero disables the balloon driver. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0). - type: int - bios: - description: - - Specify the BIOS implementation. - type: str - choices: ['seabios', 'ovmf'] - boot: - description: - - Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n). - - You can combine to set order. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(cnd). - type: str - bootdisk: - description: - - Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+) - type: str - cicustom: - description: - - 'cloud-init: Specify custom files to replace the automatically generated ones at start.' - type: str - version_added: 1.3.0 - cipassword: - description: - - 'cloud-init: password of default user to create.' - type: str - version_added: 1.3.0 - citype: - description: - - 'cloud-init: Specifies the cloud-init configuration format.' - - The default depends on the configured operating system type (C(ostype)). - - We use the C(nocloud) format for Linux, and C(configdrive2) for Windows. - type: str - choices: ['nocloud', 'configdrive2'] - version_added: 1.3.0 - ciuser: - description: - - 'cloud-init: username of default user to create.' - type: str - version_added: 1.3.0 - clone: - description: - - Name of VM to be cloned. If C(vmid) is setted, C(clone) can take arbitrary value but required for initiating the clone. - type: str - cores: - description: - - Specify number of cores per socket. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). - type: int - cpu: - description: - - Specify emulated CPU type. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(kvm64). - type: str - cpulimit: - description: - - Specify if CPU usage will be limited. Value 0 indicates no CPU limit. - - If the computer has 2 CPUs, it has total of '2' CPU time - type: int - cpuunits: - description: - - Specify CPU weight for a VM. - - You can disable fair-scheduler configuration by setting this to 0 - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000). - type: int - delete: - description: - - Specify a list of settings you want to delete. - type: str - description: - description: - - Specify the description for the VM. Only used on the configuration web interface. - - This is saved as comment inside the configuration file. - type: str - digest: - description: - - Specify if to prevent changes if current configuration file has different SHA1 digest. - - This can be used to prevent concurrent modifications. - type: str - efidisk0: - description: - - Specify a hash/dictionary of EFI disk options. - - Requires I(bios=ovmf) to be set to be able to use it. - type: dict - suboptions: - storage: - description: - - C(storage) is the storage identifier where to create the disk. - type: str - format: - description: - - C(format) is the drive's backing file's data format. Please refer to the Proxmox VE Administrator Guide, - section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest - version, tables 3 to 14) to find out format supported by the provided storage backend. - type: str - efitype: - description: - - C(efitype) indicates the size of the EFI disk. - - C(2m) will allow for a 2MB EFI disk, which will be enough to persist boot order and new boot entries. - - C(4m) will allow for a 4MB EFI disk, which will additionally allow to store EFI keys in order to enable - Secure Boot - type: str - choices: - - 2m - - 4m - pre_enrolled_keys: - description: - - C(pre_enrolled_keys) indicates whether EFI keys for Secure Boot should be enrolled C(1) in the VM firmware - upon creation or not (0). - - If set to C(1), Secure Boot will also be enabled by default when the VM is created. - type: bool - version_added: 4.5.0 - force: - description: - - Allow to force stop VM. - - Can be used with states C(stopped), C(restarted) and C(absent). - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no). - type: bool - format: - description: - - Target drive's backing file's data format. - - Used only with clone - - Use I(format=unspecified) and I(full=false) for a linked clone. - - Please refer to the Proxmox VE Administrator Guide, section Proxmox VE Storage (see - U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format - supported by the provided storage backend. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(qcow2). - If I(proxmox_default_behavior) is set to C(no_defaults), not specifying this option is equivalent to setting it to C(unspecified). - type: str - choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ] - freeze: - description: - - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution). - type: bool - full: - description: - - Create a full copy of all disk. This is always done when you clone a normal VM. - - For VM templates, we try to create a linked clone by default. - - Used only with clone - type: bool - default: 'yes' - hostpci: - description: - - Specify a hash/dictionary of map host pci devices into guest. C(hostpci='{"key":"value", "key":"value"}'). - - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N. - - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0""). - - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers). - - C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model). - - C(rombar=boolean) I(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map. - - C(x-vga=boolean) I(default=0) Enable vfio-vga device support. - - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care. - type: dict - hotplug: - description: - - Selectively enable hotplug features. - - This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb'). - - Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb'). - type: str - hugepages: - description: - - Enable/disable hugepages memory. - type: str - choices: ['any', '2', '1024'] - ide: - description: - - A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide='{"key":"value", "key":"value"}'). - - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE - Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for - the latest version, tables 3 to 14) to find out format supported by the provided storage backend. - type: dict - ipconfig: - description: - - 'cloud-init: Set the IP configuration.' - - A hash/dictionary of network ip configurations. C(ipconfig='{"key":"value", "key":"value"}'). - - Keys allowed are - C(ipconfig[n]) where 0 ≤ n ≤ network interfaces. - - Values allowed are - C("[gw=] [,gw6=] [,ip=] [,ip6=]"). - - 'cloud-init: Specify IP addresses and gateways for the corresponding interface.' - - IP addresses use CIDR notation, gateways are optional but they should be in the same subnet of specified IP address. - - The special string 'dhcp' can be used for IP addresses to use DHCP, in which case no explicit gateway should be provided. - - For IPv6 the special string 'auto' can be used to use stateless autoconfiguration. - - If cloud-init is enabled and neither an IPv4 nor an IPv6 address is specified, it defaults to using dhcp on IPv4. - type: dict - version_added: 1.3.0 - keyboard: - description: - - Sets the keyboard layout for VNC server. - type: str - kvm: - description: - - Enable/disable KVM hardware virtualization. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes). - type: bool - localtime: - description: - - Sets the real time clock to local time. - - This is enabled by default if ostype indicates a Microsoft OS. - type: bool - lock: - description: - - Lock/unlock the VM. - type: str - choices: ['migrate', 'backup', 'snapshot', 'rollback'] - machine: - description: - - Specifies the Qemu machine type. - - type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?)) - type: str - memory: - description: - - Memory size in MB for instance. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512). - type: int - migrate_downtime: - description: - - Sets maximum tolerated downtime (in seconds) for migrations. - type: int - migrate_speed: - description: - - Sets maximum speed (in MB/s) for migrations. - - A value of 0 is no limit. - type: int - name: - description: - - Specifies the VM name. Only used on the configuration web interface. - - Required only for C(state=present). - type: str - nameservers: - description: - - 'cloud-init: DNS server IP address(es).' - - If unset, PVE host settings are used. - type: list - elements: str - version_added: 1.3.0 - net: - description: - - A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}'). - - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N. - - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",bridge="value",rate="value",tag="value",firewall="1|0",trunks="vlanid""). - - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3). - - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified. - - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'. - - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'. - - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services. - type: dict - newid: - description: - - VMID for the clone. Used only with clone. - - If newid is not set, the next available VM ID will be fetched from ProxmoxAPI. - type: int - numa: - description: - - A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}'). - - Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N. - - Values allowed are - C("cpu="",hostnodes="",memory="number",policy="(bind|interleave|preferred)""). - - C(cpus) CPUs accessing this NUMA node. - - C(hostnodes) Host NUMA nodes to use. - - C(memory) Amount of memory this NUMA node provides. - - C(policy) NUMA allocation policy. - type: dict - numa_enabled: - description: - - Enables NUMA. - type: bool - onboot: - description: - - Specifies whether a VM will be started during system bootup. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes). - type: bool - ostype: - description: - - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems. - - The l26 is Linux 2.6/3.X Kernel. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(l26). - type: str - choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris'] - parallel: - description: - - A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}'). - - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2. - - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+"). - type: dict - protection: - description: - - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations. - type: bool - reboot: - description: - - Allow reboot. If set to C(yes), the VM exit on reboot. - type: bool - revert: - description: - - Revert a pending change. - type: str - sata: - description: - - A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}'). - - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE - Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for - the latest version, tables 3 to 14) to find out format supported by the provided storage backend. - type: dict - scsi: - description: - - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}'). - - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE - Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for - the latest version, tables 3 to 14) to find out format supported by the provided storage backend. - type: dict - scsihw: - description: - - Specifies the SCSI controller model. - type: str - choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi'] - searchdomains: - description: - - 'cloud-init: Sets DNS search domain(s).' - - If unset, PVE host settings are used. - type: list - elements: str - version_added: 1.3.0 - serial: - description: - - A hash/dictionary of serial device to create inside the VM. C('{"key":"value", "key":"value"}'). - - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3. - - Values allowed are - C((/dev/.+|socket)). - - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care. - type: dict - shares: - description: - - Rets amount of memory shares for auto-ballooning. (0 - 50000). - - The larger the number is, the more memory this VM gets. - - The number is relative to weights of all other running VMs. - - Using 0 disables auto-ballooning, this means no limit. - type: int - skiplock: - description: - - Ignore locks - - Only root is allowed to use this option. - type: bool - smbios: - description: - - Specifies SMBIOS type 1 fields. - type: str - snapname: - description: - - The name of the snapshot. Used only with clone. - type: str - sockets: - description: - - Sets the number of CPU sockets. (1 - N). - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1). - type: int - sshkeys: - description: - - 'cloud-init: SSH key to assign to the default user. NOT TESTED with multiple keys but a multi-line value should work.' - type: str - version_added: 1.3.0 - startdate: - description: - - Sets the initial date of the real time clock. - - Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25'). - type: str - startup: - description: - - Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]). - - Order is a non-negative number defining the general startup order. - - Shutdown in done with reverse ordering. - type: str - state: - description: - - Indicates desired state of the instance. - - If C(current), the current state of the VM will be fetched. You can access it with C(results.status) - type: str - choices: ['present', 'started', 'absent', 'stopped', 'restarted','current'] - default: present - storage: - description: - - Target storage for full clone. - type: str - tablet: - description: - - Enables/disables the USB tablet device. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no). - type: bool - tags: - description: - - List of tags to apply to the VM instance. - - Tags must start with C([a-z0-9_]) followed by zero or more of the following characters C([a-z0-9_-+.]). - - Tags are only available in Proxmox 6+. - type: list - elements: str - version_added: 2.3.0 - target: - description: - - Target node. Only allowed if the original VM is on shared storage. - - Used only with clone - type: str - tdf: - description: - - Enables/disables time drift fix. - type: bool - template: - description: - - Enables/disables the template. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no). - type: bool - timeout: - description: - - Timeout for operations. - type: int - default: 30 - update: - description: - - If C(yes), the VM will be updated with new value. - - Cause of the operations of the API and security reasons, I have disabled the update of the following parameters - - C(net, virtio, ide, sata, scsi). Per example updating C(net) update the MAC address and C(virtio) create always new disk... - - Update of C(pool) is disabled. It needs an additional API endpoint not covered by this module. - type: bool - default: 'no' - vcpus: - description: - - Sets number of hotplugged vcpus. - type: int - vga: - description: - - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'. - - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(std). - type: str - choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4'] - virtio: - description: - - A hash/dictionary of volume used as VIRTIO hard disk. C(virtio='{"key":"value", "key":"value"}'). - - Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15. - - Values allowed are - C("storage:size,format=value"). - - C(storage) is the storage identifier where to create the disk. - - C(size) is the size of the disk in GB. - - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE - Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) - for the latest version, tables 3 to 14) to find out format supported by the provided storage backend. - type: dict - watchdog: - description: - - Creates a virtual hardware watchdog device. - type: str - proxmox_default_behavior: - description: - - As of community.general 4.0.0, various options no longer have default values. - These default values caused problems when users expected different behavior from Proxmox - by default or filled options which caused problems when set. - - The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values - are used when the values are not explicitly specified by the user. The new default is C(no_defaults), - which makes sure these options have no defaults. - - This affects the I(acpi), I(autostart), I(balloon), I(boot), I(cores), I(cpu), - I(cpuunits), I(force), I(format), I(kvm), I(memory), I(onboot), I(ostype), I(sockets), - I(tablet), I(template), I(vga), options. - type: str - default: no_defaults - choices: - - compatibility - - no_defaults - version_added: "1.3.0" -extends_documentation_fragment: - - community.general.proxmox.documentation - - community.general.proxmox.selection -''' - -EXAMPLES = ''' -- name: Create new VM with minimal options - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - -- name: Create new VM with minimal options and given vmid - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - vmid: 100 - -- name: Create new VM with two network interface options - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - net: - net0: 'virtio,bridge=vmbr1,rate=200' - net1: 'e1000,bridge=vmbr2' - -- name: Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - net: - net0: 'virtio,bridge=vmbr1,rate=200' - virtio: - virtio0: 'VMs_LVM:10' - virtio1: 'VMs:2,format=qcow2' - virtio2: 'VMs:5,format=raw' - cores: 4 - vcpus: 2 - -- name: Create VM with 1 10GB SATA disk and an EFI disk, with Secure Boot disabled by default - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - sata: - sata0: 'VMs_LVM:10,format=raw' - bios: ovmf - efidisk0: - storage: VMs_LVM_thin - format: raw - efitype: 4m - pre_enrolled_keys: False - -- name: Create VM with 1 10GB SATA disk and an EFI disk, with Secure Boot enabled by default - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - sata: - sata0: 'VMs_LVM:10,format=raw' - bios: ovmf - efidisk0: - storage: VMs_LVM - format: raw - efitype: 4m - pre_enrolled_keys: 1 - -- name: > - Clone VM with only source VM name. - The VM source is spynal. - The target VM name is zavala - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - clone: spynal - name: zavala - node: sabrewulf - storage: VMs - format: qcow2 - timeout: 500 - -- name: > - Create linked clone VM with only source VM name. - The VM source is spynal. - The target VM name is zavala - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - clone: spynal - name: zavala - node: sabrewulf - storage: VMs - full: no - format: unspecified - timeout: 500 - -- name: Clone VM with source vmid and target newid and raw format - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - clone: arbitrary_name - vmid: 108 - newid: 152 - name: zavala - node: sabrewulf - storage: LVM_STO - format: raw - timeout: 300 - -- name: Create new VM and lock it for snapshot - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - lock: snapshot - -- name: Create new VM and set protection to disable the remove VM and remove disk operations - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - protection: yes - -- name: Create new VM using cloud-init with a username and password - community.general.proxmox_kvm: - node: sabrewulf - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - ide: - ide2: 'local:cloudinit,format=qcow2' - ciuser: mylinuxuser - cipassword: supersecret - searchdomains: 'mydomain.internal' - nameservers: 1.1.1.1 - net: - net0: 'virtio,bridge=vmbr1,tag=77' - ipconfig: - ipconfig0: 'ip=192.168.1.1/24,gw=192.168.1.1' - -- name: Create new VM using Cloud-Init with an ssh key - community.general.proxmox_kvm: - node: sabrewulf - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - ide: - ide2: 'local:cloudinit,format=qcow2' - sshkeys: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILJkVm98B71lD5XHfihwcYHE9TVpsJmK1vR1JcaU82L+' - searchdomains: 'mydomain.internal' - nameservers: - - '1.1.1.1' - - '8.8.8.8' - net: - net0: 'virtio,bridge=vmbr1,tag=77' - ipconfig: - ipconfig0: 'ip=192.168.1.1/24' - -- name: Start VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: started - -- name: Stop VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: stopped - -- name: Stop VM with force - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: stopped - force: yes - -- name: Restart VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: restarted - -- name: Remove VM - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: absent - -- name: Get VM current state - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - state: current - -- name: Update VM configuration - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - cores: 8 - memory: 16384 - update: yes - -- name: Delete QEMU parameters - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - delete: 'args,template,cpulimit' - -- name: Revert a pending change - community.general.proxmox_kvm: - api_user: root@pam - api_password: secret - api_host: helldorado - name: spynal - node: sabrewulf - revert: 'template,cpulimit' -''' - -RETURN = ''' -vmid: - description: The VM vmid. - returned: success - type: int - sample: 115 -status: - description: The current virtual machine status. - returned: success, not clone, not absent, not update - type: str - sample: running -msg: - description: A short message - returned: always - type: str - sample: "VM kropta with vmid = 110 is running" -''' - -import re -import time -import traceback -from ansible.module_utils.six.moves.urllib.parse import quote - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - - -def parse_mac(netstr): - return re.search('=(.*?),', netstr).group(1) - - -def parse_dev(devstr): - return re.search('(.*?)(,|$)', devstr).group(1) - - -class ProxmoxKvmAnsible(ProxmoxAnsible): - def get_vminfo(self, node, vmid, **kwargs): - global results - results = {} - mac = {} - devices = {} - try: - vm = self.proxmox_api.nodes(node).qemu(vmid).config.get() - except Exception as e: - self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) - - # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - - # Convert all dict in kwargs to elements. - # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n] - for k in list(kwargs.keys()): - if isinstance(kwargs[k], dict): - kwargs.update(kwargs[k]) - del kwargs[k] - - # Split information by type - re_net = re.compile(r'net[0-9]') - re_dev = re.compile(r'(virtio|ide|scsi|sata|efidisk)[0-9]') - for k in kwargs.keys(): - if re_net.match(k): - mac[k] = parse_mac(vm[k]) - elif re_dev.match(k): - devices[k] = parse_dev(vm[k]) - - results['mac'] = mac - results['devices'] = devices - results['vmid'] = int(vmid) - - def settings(self, vmid, node, **kwargs): - proxmox_node = self.proxmox_api.nodes(node) - - # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - - return proxmox_node.qemu(vmid).config.set(**kwargs) is None - - def wait_for_task(self, node, taskid): - timeout = self.module.params['timeout'] - - while timeout: - task = self.proxmox_api.nodes(node).tasks(taskid).status.get() - if task['status'] == 'stopped' and task['exitstatus'] == 'OK': - # Wait an extra second as the API can be a ahead of the hypervisor - time.sleep(1) - return True - timeout = timeout - 1 - if timeout == 0: - break - time.sleep(1) - return False - - def create_vm(self, vmid, newid, node, name, memory, cpu, cores, sockets, update, **kwargs): - # Available only in PVE 4 - only_v4 = ['force', 'protection', 'skiplock'] - only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig', 'tags'] - - # valide clone parameters - valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target'] - clone_params = {} - # Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm. - vm_args = "-serial unix:/var/run/qemu-server/{0}.serial,server,nowait".format(vmid) - - proxmox_node = self.proxmox_api.nodes(node) - - # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. - kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool))) - - version = self.version() - pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0] - - # The features work only on PVE 4+ - if pve_major_version < 4: - for p in only_v4: - if p in kwargs: - del kwargs[p] - - # The features work only on PVE 6 - if pve_major_version < 6: - for p in only_v6: - if p in kwargs: - del kwargs[p] - - # 'sshkeys' param expects an urlencoded string - if 'sshkeys' in kwargs: - urlencoded_ssh_keys = quote(kwargs['sshkeys'], safe='') - kwargs['sshkeys'] = str(urlencoded_ssh_keys) - - # If update, don't update disk (virtio, efidisk0, ide, sata, scsi) and network interface - # pool parameter not supported by qemu//config endpoint on "update" (PVE 6.2) - only with "create" - if update: - if 'virtio' in kwargs: - del kwargs['virtio'] - if 'sata' in kwargs: - del kwargs['sata'] - if 'scsi' in kwargs: - del kwargs['scsi'] - if 'ide' in kwargs: - del kwargs['ide'] - if 'efidisk0' in kwargs: - del kwargs['efidisk0'] - if 'net' in kwargs: - del kwargs['net'] - if 'force' in kwargs: - del kwargs['force'] - if 'pool' in kwargs: - del kwargs['pool'] - - # Check that the bios option is set to ovmf if the efidisk0 option is present - if 'efidisk0' in kwargs: - if ('bios' not in kwargs) or ('ovmf' != kwargs['bios']): - self.module.fail_json(msg='efidisk0 cannot be used if bios is not set to ovmf. ') - - # Flatten efidisk0 option to a string so that it's a string which is what Proxmoxer and the API expect - if 'efidisk0' in kwargs: - efidisk0_str = '' - # Regexp to catch underscores in keys name, to replace them after by hypens - hyphen_re = re.compile(r'_') - # If present, the storage definition should be the first argument - if 'storage' in kwargs['efidisk0']: - efidisk0_str += kwargs['efidisk0'].get('storage') + ':1,' - kwargs['efidisk0'].pop('storage') - # Join other elements from the dict as key=value using commas as separator, replacing any underscore in key - # by hyphens (needed for pre_enrolled_keys to pre-enrolled-keys) - efidisk0_str += ','.join([hyphen_re.sub('-', k) + "=" + str(v) for k, v in kwargs['efidisk0'].items() - if 'storage' != k]) - kwargs['efidisk0'] = efidisk0_str - - # Convert all dict in kwargs to elements. - # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n] - for k in list(kwargs.keys()): - if isinstance(kwargs[k], dict): - kwargs.update(kwargs[k]) - del kwargs[k] - - # Rename numa_enabled to numa. According the API documentation - if 'numa_enabled' in kwargs: - kwargs['numa'] = kwargs['numa_enabled'] - del kwargs['numa_enabled'] - - # PVE api expects strings for the following params - if 'nameservers' in self.module.params: - nameservers = self.module.params.pop('nameservers') - if nameservers: - kwargs['nameserver'] = ' '.join(nameservers) - if 'searchdomains' in self.module.params: - searchdomains = self.module.params.pop('searchdomains') - if searchdomains: - kwargs['searchdomain'] = ' '.join(searchdomains) - - # VM tags are expected to be valid and presented as a comma/semi-colon delimited string - if 'tags' in kwargs: - re_tag = re.compile(r'^[a-z0-9_][a-z0-9_\-\+\.]*$') - for tag in kwargs['tags']: - if not re_tag.match(tag): - self.module.fail_json(msg='%s is not a valid tag' % tag) - kwargs['tags'] = ",".join(kwargs['tags']) - - # -args and skiplock require root@pam user - but can not use api tokens - if self.module.params['api_user'] == "root@pam" and self.module.params['args'] is None: - if not update and self.module.params['proxmox_default_behavior'] == 'compatibility': - kwargs['args'] = vm_args - elif self.module.params['api_user'] == "root@pam" and self.module.params['args'] is not None: - kwargs['args'] = self.module.params['args'] - elif self.module.params['api_user'] != "root@pam" and self.module.params['args'] is not None: - self.module.fail_json(msg='args parameter require root@pam user. ') - - if self.module.params['api_user'] != "root@pam" and self.module.params['skiplock'] is not None: - self.module.fail_json(msg='skiplock parameter require root@pam user. ') - - if update: - if proxmox_node.qemu(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None: - return True - else: - return False - elif self.module.params['clone'] is not None: - for param in valid_clone_params: - if self.module.params[param] is not None: - clone_params[param] = self.module.params[param] - clone_params.update(dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool))) - taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params) - else: - taskid = proxmox_node.qemu.create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) - - if not self.wait_for_task(node, taskid): - self.module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - return False - return True - - def start_vm(self, vm): - vmid = vm['vmid'] - proxmox_node = self.proxmox_api.nodes(vm['node']) - taskid = proxmox_node.qemu(vmid).status.start.post() - if not self.wait_for_task(vm['node'], taskid): - self.module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - return False - return True - - def stop_vm(self, vm, force): - vmid = vm['vmid'] - proxmox_node = self.proxmox_api.nodes(vm['node']) - taskid = proxmox_node.qemu(vmid).status.shutdown.post(forceStop=(1 if force else 0)) - if not self.wait_for_task(vm['node'], taskid): - self.module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - return False - return True - - -def main(): - module_args = proxmox_auth_argument_spec() - kvm_args = dict( - acpi=dict(type='bool'), - agent=dict(type='bool'), - args=dict(type='str'), - autostart=dict(type='bool'), - balloon=dict(type='int'), - bios=dict(choices=['seabios', 'ovmf']), - boot=dict(type='str'), - bootdisk=dict(type='str'), - cicustom=dict(type='str'), - cipassword=dict(type='str', no_log=True), - citype=dict(type='str', choices=['nocloud', 'configdrive2']), - ciuser=dict(type='str'), - clone=dict(type='str'), - cores=dict(type='int'), - cpu=dict(type='str'), - cpulimit=dict(type='int'), - cpuunits=dict(type='int'), - delete=dict(type='str'), - description=dict(type='str'), - digest=dict(type='str'), - efidisk0=dict(type='dict', - options=dict( - storage=dict(type='str'), - format=dict(type='str'), - efitype=dict(type='str', choices=['2m', '4m']), - pre_enrolled_keys=dict(type='bool'), - )), - force=dict(type='bool'), - format=dict(type='str', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk', 'unspecified']), - freeze=dict(type='bool'), - full=dict(type='bool', default=True), - hostpci=dict(type='dict'), - hotplug=dict(type='str'), - hugepages=dict(choices=['any', '2', '1024']), - ide=dict(type='dict'), - ipconfig=dict(type='dict'), - keyboard=dict(type='str'), - kvm=dict(type='bool'), - localtime=dict(type='bool'), - lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']), - machine=dict(type='str'), - memory=dict(type='int'), - migrate_downtime=dict(type='int'), - migrate_speed=dict(type='int'), - name=dict(type='str'), - nameservers=dict(type='list', elements='str'), - net=dict(type='dict'), - newid=dict(type='int'), - node=dict(), - numa=dict(type='dict'), - numa_enabled=dict(type='bool'), - onboot=dict(type='bool'), - ostype=dict(choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris']), - parallel=dict(type='dict'), - pool=dict(type='str'), - protection=dict(type='bool'), - reboot=dict(type='bool'), - revert=dict(type='str'), - sata=dict(type='dict'), - scsi=dict(type='dict'), - scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']), - serial=dict(type='dict'), - searchdomains=dict(type='list', elements='str'), - shares=dict(type='int'), - skiplock=dict(type='bool'), - smbios=dict(type='str'), - snapname=dict(type='str'), - sockets=dict(type='int'), - sshkeys=dict(type='str', no_log=False), - startdate=dict(type='str'), - startup=dict(), - state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']), - storage=dict(type='str'), - tablet=dict(type='bool'), - tags=dict(type='list', elements='str'), - target=dict(type='str'), - tdf=dict(type='bool'), - template=dict(type='bool'), - timeout=dict(type='int', default=30), - update=dict(type='bool', default=False), - vcpus=dict(type='int'), - vga=dict(choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']), - virtio=dict(type='dict'), - vmid=dict(type='int'), - watchdog=dict(), - proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), - ) - module_args.update(kvm_args) - - module = AnsibleModule( - argument_spec=module_args, - mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')], - required_together=[('api_token_id', 'api_token_secret')], - required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')], - required_if=[('state', 'present', ['node'])], - ) - - clone = module.params['clone'] - cpu = module.params['cpu'] - cores = module.params['cores'] - delete = module.params['delete'] - memory = module.params['memory'] - name = module.params['name'] - newid = module.params['newid'] - node = module.params['node'] - revert = module.params['revert'] - sockets = module.params['sockets'] - state = module.params['state'] - update = bool(module.params['update']) - vmid = module.params['vmid'] - validate_certs = module.params['validate_certs'] - - if module.params['proxmox_default_behavior'] == 'compatibility': - old_default_values = dict( - acpi=True, - autostart=False, - balloon=0, - boot='cnd', - cores=1, - cpu='kvm64', - cpuunits=1000, - format='qcow2', - kvm=True, - memory=512, - ostype='l26', - sockets=1, - tablet=False, - template=False, - vga='std', - ) - for param, value in old_default_values.items(): - if module.params[param] is None: - module.params[param] = value - - if module.params['format'] == 'unspecified': - module.params['format'] = None - - proxmox = ProxmoxKvmAnsible(module) - - # If vmid is not defined then retrieve its value from the vm name, - # the cloned vm name or retrieve the next free VM id from ProxmoxAPI. - if not vmid: - if state == 'present' and not update and not clone and not delete and not revert: - try: - vmid = proxmox.get_nextvmid() - except Exception: - module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name)) - else: - clone_target = clone or name - vmid = proxmox.get_vmid(clone_target, ignore_missing=True) - - if clone is not None: - # If newid is not defined then retrieve the next free id from ProxmoxAPI - if not newid: - try: - newid = proxmox.get_nextvmid() - except Exception: - module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name)) - - # Ensure source VM name exists when cloning - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % clone) - - # Ensure source VM id exists when cloning - proxmox.get_vm(vmid) - - # Ensure the choosen VM name doesn't already exist when cloning - existing_vmid = proxmox.get_vmid(name, ignore_missing=True) - if existing_vmid: - module.exit_json(changed=False, vmid=existing_vmid, msg="VM with name <%s> already exists" % name) - - # Ensure the choosen VM id doesn't already exist when cloning - if proxmox.get_vm(newid, ignore_missing=True): - module.exit_json(changed=False, vmid=vmid, msg="vmid %s with VM name %s already exists" % (newid, name)) - - if delete is not None: - try: - proxmox.settings(vmid, node, delete=delete) - module.exit_json(changed=True, vmid=vmid, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e)) - - if revert is not None: - try: - proxmox.settings(vmid, node, revert=revert) - module.exit_json(changed=True, vmid=vmid, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e)) - - if state == 'present': - try: - if proxmox.get_vm(vmid, ignore_missing=True) and not (update or clone): - module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid) - elif proxmox.get_vmid(name, ignore_missing=True) and not (update or clone): - module.exit_json(changed=False, vmid=proxmox.get_vmid(name), msg="VM with name <%s> already exists" % name) - elif not (node, name): - module.fail_json(msg='node, name is mandatory for creating/updating vm') - elif not proxmox.get_node(node): - module.fail_json(msg="node '%s' does not exist in cluster" % node) - - proxmox.create_vm(vmid, newid, node, name, memory, cpu, cores, sockets, update, - acpi=module.params['acpi'], - agent=module.params['agent'], - autostart=module.params['autostart'], - balloon=module.params['balloon'], - bios=module.params['bios'], - boot=module.params['boot'], - bootdisk=module.params['bootdisk'], - cicustom=module.params['cicustom'], - cipassword=module.params['cipassword'], - citype=module.params['citype'], - ciuser=module.params['ciuser'], - cpulimit=module.params['cpulimit'], - cpuunits=module.params['cpuunits'], - description=module.params['description'], - digest=module.params['digest'], - efidisk0=module.params['efidisk0'], - force=module.params['force'], - freeze=module.params['freeze'], - hostpci=module.params['hostpci'], - hotplug=module.params['hotplug'], - hugepages=module.params['hugepages'], - ide=module.params['ide'], - ipconfig=module.params['ipconfig'], - keyboard=module.params['keyboard'], - kvm=module.params['kvm'], - localtime=module.params['localtime'], - lock=module.params['lock'], - machine=module.params['machine'], - migrate_downtime=module.params['migrate_downtime'], - migrate_speed=module.params['migrate_speed'], - net=module.params['net'], - numa=module.params['numa'], - numa_enabled=module.params['numa_enabled'], - onboot=module.params['onboot'], - ostype=module.params['ostype'], - parallel=module.params['parallel'], - pool=module.params['pool'], - protection=module.params['protection'], - reboot=module.params['reboot'], - sata=module.params['sata'], - scsi=module.params['scsi'], - scsihw=module.params['scsihw'], - serial=module.params['serial'], - shares=module.params['shares'], - skiplock=module.params['skiplock'], - smbios1=module.params['smbios'], - snapname=module.params['snapname'], - sshkeys=module.params['sshkeys'], - startdate=module.params['startdate'], - startup=module.params['startup'], - tablet=module.params['tablet'], - tags=module.params['tags'], - target=module.params['target'], - tdf=module.params['tdf'], - template=module.params['template'], - vcpus=module.params['vcpus'], - vga=module.params['vga'], - virtio=module.params['virtio'], - watchdog=module.params['watchdog']) - - if not clone: - proxmox.get_vminfo(node, vmid, - ide=module.params['ide'], - net=module.params['net'], - sata=module.params['sata'], - scsi=module.params['scsi'], - virtio=module.params['virtio']) - if update: - module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid)) - elif clone is not None: - module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) - else: - module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results) - except Exception as e: - if update: - module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e)) - elif clone is not None: - module.fail_json(vmid=vmid, msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e)) - else: - module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e)) - - elif state == 'started': - status = {} - try: - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - vm = proxmox.get_vm(vmid) - status['status'] = vm['status'] - if vm['status'] == 'running': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid, **status) - - if proxmox.start_vm(vm): - module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid, **status) - except Exception as e: - module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status) - - elif state == 'stopped': - status = {} - try: - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - - vm = proxmox.get_vm(vmid) - - status['status'] = vm['status'] - if vm['status'] == 'stopped': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is already stopped" % vmid, **status) - - if proxmox.stop_vm(vm, force=module.params['force']): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid, **status) - except Exception as e: - module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status) - - elif state == 'restarted': - status = {} - try: - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - - vm = proxmox.get_vm(vmid) - status['status'] = vm['status'] - if vm['status'] == 'stopped': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status) - - if proxmox.stop_vm(vm, force=module.params['force']) and proxmox.start_vm(vm): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status) - except Exception as e: - module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e), **status) - - elif state == 'absent': - status = {} - try: - vm = proxmox.get_vm(vmid, ignore_missing=True) - if not vm: - module.exit_json(changed=False, vmid=vmid) - - proxmox_node = proxmox.proxmox_api.nodes(vm['node']) - status['status'] = vm['status'] - if vm['status'] == 'running': - if module.params['force']: - proxmox.stop_vm(vm, True) - else: - module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion or use force=yes." % vmid) - taskid = proxmox_node.qemu.delete(vmid) - if not proxmox.wait_for_task(vm['node'], taskid): - module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - else: - module.exit_json(changed=True, vmid=vmid, msg="VM %s removed" % vmid) - except Exception as e: - module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e)) - - elif state == 'current': - status = {} - if not vmid: - module.fail_json(msg='VM with name = %s does not exist in cluster' % name) - vm = proxmox.get_vm(vmid) - if not name: - name = vm.get('name', '(unnamed)') - current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status'] - status['status'] = current - if status: - module.exit_json(changed=False, vmid=vmid, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_nic.py b/plugins/modules/cloud/misc/proxmox_nic.py deleted file mode 100644 index e83d0dfef1..0000000000 --- a/plugins/modules/cloud/misc/proxmox_nic.py +++ /dev/null @@ -1,304 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2021, Lammert Hellinga (@Kogelvis) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: proxmox_nic -short_description: Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster. -version_added: 3.1.0 -description: - - Allows you to create/update/delete a NIC on Qemu(KVM) Virtual Machines in a Proxmox VE cluster. -author: "Lammert Hellinga (@Kogelvis) " -options: - bridge: - description: - - Add this interface to the specified bridge device. The Proxmox VE default bridge is called C(vmbr0). - type: str - firewall: - description: - - Whether this interface should be protected by the firewall. - type: bool - default: false - interface: - description: - - Name of the interface, should be C(net[n]) where C(1 ≤ n ≤ 31). - type: str - required: true - link_down: - description: - - Whether this interface should be disconnected (like pulling the plug). - type: bool - default: false - mac: - description: - - C(XX:XX:XX:XX:XX:XX) should be a unique MAC address. This is automatically generated if not specified. - - When not specified this module will keep the MAC address the same when changing an existing interface. - type: str - model: - description: - - The NIC emulator model. - type: str - choices: ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', - 'rtl8139', 'virtio', 'vmxnet3'] - default: virtio - mtu: - description: - - Force MTU, for C(virtio) model only, setting will be ignored otherwise. - - Set to C(1) to use the bridge MTU. - - Value should be C(1 ≤ n ≤ 65520). - type: int - name: - description: - - Specifies the VM name. Only used on the configuration web interface. - - Required only for I(state=present). - type: str - queues: - description: - - Number of packet queues to be used on the device. - - Value should be C(0 ≤ n ≤ 16). - type: int - rate: - description: - - Rate limit in MBps (MegaBytes per second) as floating point number. - type: float - state: - description: - - Indicates desired state of the NIC. - type: str - choices: ['present', 'absent'] - default: present - tag: - description: - - VLAN tag to apply to packets on this interface. - - Value should be C(1 ≤ n ≤ 4094). - type: int - trunks: - description: - - List of VLAN trunks to pass through this interface. - type: list - elements: int - vmid: - description: - - Specifies the instance ID. - type: int -extends_documentation_fragment: - - community.general.proxmox.documentation -''' - -EXAMPLES = ''' -- name: Create NIC net0 targeting the vm by name - community.general.proxmox_nic: - api_user: root@pam - api_password: secret - api_host: proxmoxhost - name: my_vm - interface: net0 - bridge: vmbr0 - tag: 3 - -- name: Create NIC net0 targeting the vm by id - community.general.proxmox_nic: - api_user: root@pam - api_password: secret - api_host: proxmoxhost - vmid: 103 - interface: net0 - bridge: vmbr0 - mac: "12:34:56:C0:FF:EE" - firewall: true - -- name: Delete NIC net0 targeting the vm by name - community.general.proxmox_nic: - api_user: root@pam - api_password: secret - api_host: proxmoxhost - name: my_vm - interface: net0 - state: absent -''' - -RETURN = ''' -vmid: - description: The VM vmid. - returned: success - type: int - sample: 115 -msg: - description: A short message - returned: always - type: str - sample: "Nic net0 unchanged on VM with vmid 103" -''' - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxNicAnsible(ProxmoxAnsible): - def update_nic(self, vmid, interface, model, **kwargs): - vm = self.get_vm(vmid) - - try: - vminfo = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.get() - except Exception as e: - self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) - - if interface in vminfo: - # Convert the current config to a dictionary - config = vminfo[interface].split(',') - config.sort() - - config_current = {} - - for i in config: - kv = i.split('=') - try: - config_current[kv[0]] = kv[1] - except IndexError: - config_current[kv[0]] = '' - - # determine the current model nic and mac-address - models = ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', - 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', 'rtl8139', 'virtio', 'vmxnet3'] - current_model = set(models) & set(config_current.keys()) - current_model = current_model.pop() - current_mac = config_current[current_model] - - # build nic config string - config_provided = "{0}={1}".format(model, current_mac) - else: - config_provided = model - - if kwargs['mac']: - config_provided = "{0}={1}".format(model, kwargs['mac']) - - if kwargs['bridge']: - config_provided += ",bridge={0}".format(kwargs['bridge']) - - if kwargs['firewall']: - config_provided += ",firewall=1" - - if kwargs['link_down']: - config_provided += ',link_down=1' - - if kwargs['mtu']: - config_provided += ",mtu={0}".format(kwargs['mtu']) - if model != 'virtio': - self.module.warn( - 'Ignoring MTU for nic {0} on VM with vmid {1}, ' - 'model should be set to \'virtio\': '.format(interface, vmid)) - - if kwargs['queues']: - config_provided += ",queues={0}".format(kwargs['queues']) - - if kwargs['rate']: - config_provided += ",rate={0}".format(kwargs['rate']) - - if kwargs['tag']: - config_provided += ",tag={0}".format(kwargs['tag']) - - if kwargs['trunks']: - config_provided += ",trunks={0}".format(';'.join(str(x) for x in kwargs['trunks'])) - - net = {interface: config_provided} - vm = self.get_vm(vmid) - - if ((interface not in vminfo) or (vminfo[interface] != config_provided)): - if not self.module.check_mode: - self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(**net) - return True - - return False - - def delete_nic(self, vmid, interface): - vm = self.get_vm(vmid) - vminfo = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.get() - - if interface in vminfo: - if not self.module.check_mode: - self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(vmid=vmid, delete=interface) - return True - - return False - - -def main(): - module_args = proxmox_auth_argument_spec() - nic_args = dict( - bridge=dict(type='str'), - firewall=dict(type='bool', default=False), - interface=dict(type='str', required=True), - link_down=dict(type='bool', default=False), - mac=dict(type='str'), - model=dict(choices=['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', - 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', - 'rtl8139', 'virtio', 'vmxnet3'], default='virtio'), - mtu=dict(type='int'), - name=dict(type='str'), - queues=dict(type='int'), - rate=dict(type='float'), - state=dict(default='present', choices=['present', 'absent']), - tag=dict(type='int'), - trunks=dict(type='list', elements='int'), - vmid=dict(type='int'), - ) - module_args.update(nic_args) - - module = AnsibleModule( - argument_spec=module_args, - required_together=[('api_token_id', 'api_token_secret')], - required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')], - supports_check_mode=True, - ) - - proxmox = ProxmoxNicAnsible(module) - - interface = module.params['interface'] - model = module.params['model'] - name = module.params['name'] - state = module.params['state'] - vmid = module.params['vmid'] - - # If vmid is not defined then retrieve its value from the vm name, - if not vmid: - vmid = proxmox.get_vmid(name) - - # Ensure VM id exists - proxmox.get_vm(vmid) - - if state == 'present': - try: - if proxmox.update_nic(vmid, interface, model, - bridge=module.params['bridge'], - firewall=module.params['firewall'], - link_down=module.params['link_down'], - mac=module.params['mac'], - mtu=module.params['mtu'], - queues=module.params['queues'], - rate=module.params['rate'], - tag=module.params['tag'], - trunks=module.params['trunks']): - module.exit_json(changed=True, vmid=vmid, msg="Nic {0} updated on VM with vmid {1}".format(interface, vmid)) - else: - module.exit_json(vmid=vmid, msg="Nic {0} unchanged on VM with vmid {1}".format(interface, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to change nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e)) - - elif state == 'absent': - try: - if proxmox.delete_nic(vmid, interface): - module.exit_json(changed=True, vmid=vmid, msg="Nic {0} deleted on VM with vmid {1}".format(interface, vmid)) - else: - module.exit_json(vmid=vmid, msg="Nic {0} does not exist on VM with vmid {1}".format(interface, vmid)) - except Exception as e: - module.fail_json(vmid=vmid, msg='Unable to delete nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_snap.py b/plugins/modules/cloud/misc/proxmox_snap.py deleted file mode 100644 index bf845e5103..0000000000 --- a/plugins/modules/cloud/misc/proxmox_snap.py +++ /dev/null @@ -1,263 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2020, Jeffrey van Pelt (@Thulium-Drake) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: proxmox_snap -short_description: Snapshot management of instances in Proxmox VE cluster -version_added: 2.0.0 -description: - - Allows you to create/delete/restore snapshots from instances in Proxmox VE cluster. - - Supports both KVM and LXC, OpenVZ has not been tested, as it is no longer supported on Proxmox VE. -options: - hostname: - description: - - The instance name. - type: str - vmid: - description: - - The instance id. - - If not set, will be fetched from PromoxAPI based on the hostname. - type: str - state: - description: - - Indicate desired state of the instance snapshot. - - The C(rollback) value was added in community.general 4.8.0. - choices: ['present', 'absent', 'rollback'] - default: present - type: str - force: - description: - - For removal from config file, even if removing disk snapshot fails. - default: no - type: bool - vmstate: - description: - - Snapshot includes RAM. - default: no - type: bool - description: - description: - - Specify the description for the snapshot. Only used on the configuration web interface. - - This is saved as a comment inside the configuration file. - type: str - timeout: - description: - - Timeout for operations. - default: 30 - type: int - snapname: - description: - - Name of the snapshot that has to be created/deleted/restored. - default: 'ansible_snap' - type: str - -notes: - - Requires proxmoxer and requests modules on host. These modules can be installed with pip. - - Supports C(check_mode). -requirements: [ "proxmoxer", "python >= 2.7", "requests" ] -author: Jeffrey van Pelt (@Thulium-Drake) -extends_documentation_fragment: - - community.general.proxmox.documentation -''' - -EXAMPLES = r''' -- name: Create new container snapshot - community.general.proxmox_snap: - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - vmid: 100 - state: present - snapname: pre-updates - -- name: Remove container snapshot - community.general.proxmox_snap: - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - vmid: 100 - state: absent - snapname: pre-updates - -- name: Rollback container snapshot - community.general.proxmox_snap: - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - vmid: 100 - state: rollback - snapname: pre-updates -''' - -RETURN = r'''#''' - -import time -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR) - - -class ProxmoxSnapAnsible(ProxmoxAnsible): - def snapshot(self, vm, vmid): - return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).snapshot - - def snapshot_create(self, vm, vmid, timeout, snapname, description, vmstate): - if self.module.check_mode: - return True - - if vm['type'] == 'lxc': - taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description) - else: - taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description, vmstate=int(vmstate)) - while timeout: - status_data = self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get() - if status_data['status'] == 'stopped' and status_data['exitstatus'] == 'OK': - return True - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for creating VM snapshot. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - timeout -= 1 - return False - - def snapshot_remove(self, vm, vmid, timeout, snapname, force): - if self.module.check_mode: - return True - - taskid = self.snapshot(vm, vmid).delete(snapname, force=int(force)) - while timeout: - status_data = self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get() - if status_data['status'] == 'stopped' and status_data['exitstatus'] == 'OK': - return True - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for removing VM snapshot. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - timeout -= 1 - return False - - def snapshot_rollback(self, vm, vmid, timeout, snapname): - if self.module.check_mode: - return True - - taskid = self.snapshot(vm, vmid)(snapname).post("rollback") - while timeout: - status_data = self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get() - if status_data['status'] == 'stopped' and status_data['exitstatus'] == 'OK': - return True - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for rolling back VM snapshot. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - timeout -= 1 - return False - - -def main(): - module_args = proxmox_auth_argument_spec() - snap_args = dict( - vmid=dict(required=False), - hostname=dict(), - timeout=dict(type='int', default=30), - state=dict(default='present', choices=['present', 'absent', 'rollback']), - description=dict(type='str'), - snapname=dict(type='str', default='ansible_snap'), - force=dict(type='bool', default='no'), - vmstate=dict(type='bool', default='no'), - ) - module_args.update(snap_args) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) - - proxmox = ProxmoxSnapAnsible(module) - - state = module.params['state'] - vmid = module.params['vmid'] - hostname = module.params['hostname'] - description = module.params['description'] - snapname = module.params['snapname'] - timeout = module.params['timeout'] - force = module.params['force'] - vmstate = module.params['vmstate'] - - # If hostname is set get the VM id from ProxmoxAPI - if not vmid and hostname: - vmid = proxmox.get_vmid(hostname) - elif not vmid: - module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) - - vm = proxmox.get_vm(vmid) - - if state == 'present': - try: - for i in proxmox.snapshot(vm, vmid).get(): - if i['name'] == snapname: - module.exit_json(changed=False, msg="Snapshot %s is already present" % snapname) - - if proxmox.snapshot_create(vm, vmid, timeout, snapname, description, vmstate): - if module.check_mode: - module.exit_json(changed=False, msg="Snapshot %s would be created" % snapname) - else: - module.exit_json(changed=True, msg="Snapshot %s created" % snapname) - - except Exception as e: - module.fail_json(msg="Creating snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e))) - - elif state == 'absent': - try: - snap_exist = False - - for i in proxmox.snapshot(vm, vmid).get(): - if i['name'] == snapname: - snap_exist = True - continue - - if not snap_exist: - module.exit_json(changed=False, msg="Snapshot %s does not exist" % snapname) - else: - if proxmox.snapshot_remove(vm, vmid, timeout, snapname, force): - if module.check_mode: - module.exit_json(changed=False, msg="Snapshot %s would be removed" % snapname) - else: - module.exit_json(changed=True, msg="Snapshot %s removed" % snapname) - - except Exception as e: - module.fail_json(msg="Removing snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e))) - elif state == 'rollback': - try: - snap_exist = False - - for i in proxmox.snapshot(vm, vmid).get(): - if i['name'] == snapname: - snap_exist = True - continue - - if not snap_exist: - module.exit_json(changed=False, msg="Snapshot %s does not exist" % snapname) - if proxmox.snapshot_rollback(vm, vmid, timeout, snapname): - if module.check_mode: - module.exit_json(changed=True, msg="Snapshot %s would be rolled back" % snapname) - else: - module.exit_json(changed=True, msg="Snapshot %s rolled back" % snapname) - - except Exception as e: - module.fail_json(msg="Rollback of snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e))) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_storage_info.py b/plugins/modules/cloud/misc/proxmox_storage_info.py deleted file mode 100644 index 265b6fbaf1..0000000000 --- a/plugins/modules/cloud/misc/proxmox_storage_info.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Tristan Le Guern (@tleguern) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: proxmox_storage_info -short_description: Retrieve information about one or more Proxmox VE storages -version_added: 2.2.0 -description: - - Retrieve information about one or more Proxmox VE storages. -options: - storage: - description: - - Only return informations on a specific storage. - aliases: ['name'] - type: str - type: - description: - - Filter on a specifc storage type. - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: community.general.proxmox.documentation -notes: - - Storage specific options can be returned by this module, please look at the documentation at U(https://pve.proxmox.com/wiki/Storage). -''' - - -EXAMPLES = ''' -- name: List existing storages - community.general.proxmox_storage_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_storages - -- name: List NFS storages only - community.general.proxmox_storage_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - type: nfs - register: proxmox_storages_nfs - -- name: Retrieve information about the lvm2 storage - community.general.proxmox_storage_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - storage: lvm2 - register: proxmox_storage_lvm -''' - - -RETURN = ''' -proxmox_storages: - description: List of storage pools. - returned: on success - type: list - elements: dict - contains: - content: - description: Proxmox content types available in this storage - returned: on success - type: list - elements: str - digest: - description: Storage's digest - returned: on success - type: str - nodes: - description: List of nodes associated to this storage - returned: on success, if storage is not local - type: list - elements: str - path: - description: Physical path to this storage - returned: on success - type: str - prune-backups: - description: Backup retention options - returned: on success - type: list - elements: dict - shared: - description: Is this storage shared - returned: on success - type: bool - storage: - description: Storage name - returned: on success - type: str - type: - description: Storage type - returned: on success - type: str -''' - - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool) - - -class ProxmoxStorageInfoAnsible(ProxmoxAnsible): - def get_storage(self, storage): - try: - storage = self.proxmox_api.storage.get(storage) - except Exception: - self.module.fail_json(msg="Storage '%s' does not exist" % storage) - return ProxmoxStorage(storage) - - def get_storages(self, type=None): - storages = self.proxmox_api.storage.get(type=type) - storages = [ProxmoxStorage(storage) for storage in storages] - return storages - - -class ProxmoxStorage: - def __init__(self, storage): - self.storage = storage - # Convert proxmox representation of lists, dicts and boolean for easier - # manipulation within ansible. - if 'shared' in self.storage: - self.storage['shared'] = proxmox_to_ansible_bool(self.storage['shared']) - if 'content' in self.storage: - self.storage['content'] = self.storage['content'].split(',') - if 'nodes' in self.storage: - self.storage['nodes'] = self.storage['nodes'].split(',') - if 'prune-backups' in storage: - options = storage['prune-backups'].split(',') - self.storage['prune-backups'] = dict() - for option in options: - k, v = option.split('=') - self.storage['prune-backups'][k] = v - - -def proxmox_storage_info_argument_spec(): - return dict( - storage=dict(type='str', aliases=['name']), - type=dict(type='str'), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - storage_info_args = proxmox_storage_info_argument_spec() - module_args.update(storage_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - mutually_exclusive=[('storage', 'type')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - proxmox = ProxmoxStorageInfoAnsible(module) - storage = module.params['storage'] - storagetype = module.params['type'] - - if storage: - storages = [proxmox.get_storage(storage)] - else: - storages = proxmox.get_storages(type=storagetype) - result['proxmox_storages'] = [storage.storage for storage in storages] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_tasks_info.py b/plugins/modules/cloud/misc/proxmox_tasks_info.py deleted file mode 100644 index ff3bf6869a..0000000000 --- a/plugins/modules/cloud/misc/proxmox_tasks_info.py +++ /dev/null @@ -1,183 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Andreas Botzner (@paginabianca) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: proxmox_tasks_info -short_description: Retrieve information about one or more Proxmox VE tasks -version_added: 3.8.0 -description: - - Retrieve information about one or more Proxmox VE tasks. -author: 'Andreas Botzner (@paginabianca) ' -options: - node: - description: - - Node where to get tasks. - required: true - type: str - task: - description: - - Return specific task. - aliases: ['upid', 'name'] - type: str -extends_documentation_fragment: - - community.general.proxmox.documentation -''' - - -EXAMPLES = ''' -- name: List tasks on node01 - community.general.proxmox_task_info: - api_host: proxmoxhost - api_user: root@pam - api_password: '{{ password | default(omit) }}' - api_token_id: '{{ token_id | default(omit) }}' - api_token_secret: '{{ token_secret | default(omit) }}' - node: node01 - register: result - -- name: Retrieve information about specific tasks on node01 - community.general.proxmox_task_info: - api_host: proxmoxhost - api_user: root@pam - api_password: '{{ password | default(omit) }}' - api_token_id: '{{ token_id | default(omit) }}' - api_token_secret: '{{ token_secret | default(omit) }}' - task: 'UPID:node01:00003263:16167ACE:621EE230:srvreload:networking:root@pam:' - node: node01 - register: proxmox_tasks -''' - - -RETURN = ''' -proxmox_tasks: - description: List of tasks. - returned: on success - type: list - elements: dict - contains: - id: - description: ID of the task. - returned: on success - type: str - node: - description: Node name. - returned: on success - type: str - pid: - description: PID of the task. - returned: on success - type: int - pstart: - description: pastart of the task. - returned: on success - type: int - starttime: - description: Starting time of the task. - returned: on success - type: int - type: - description: Type of the task. - returned: on success - type: str - upid: - description: UPID of the task. - returned: on success - type: str - user: - description: User that owns the task. - returned: on success - type: str - endtime: - description: Endtime of the task. - returned: on success, can be absent - type: int - status: - description: Status of the task. - returned: on success, can be absent - type: str - failed: - description: If the task failed. - returned: when status is defined - type: bool -msg: - description: Short message. - returned: on failure - type: str - sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode' -''' - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxTaskInfoAnsible(ProxmoxAnsible): - def get_task(self, upid, node): - tasks = self.get_tasks(node) - for task in tasks: - if task.info['upid'] == upid: - return [task] - - def get_tasks(self, node): - tasks = self.proxmox_api.nodes(node).tasks.get() - return [ProxmoxTask(task) for task in tasks] - - -class ProxmoxTask: - def __init__(self, task): - self.info = dict() - for k, v in task.items(): - if k == 'status' and isinstance(v, str): - self.info[k] = v - if v != 'OK': - self.info['failed'] = True - else: - self.info[k] = v - - -def proxmox_task_info_argument_spec(): - return dict( - task=dict(type='str', aliases=['upid', 'name'], required=False), - node=dict(type='str', required=True), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - task_info_args = proxmox_task_info_argument_spec() - module_args.update(task_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_together=[('api_token_id', 'api_token_secret'), - ('api_user', 'api_password')], - required_one_of=[('api_password', 'api_token_id')], - supports_check_mode=True) - result = dict(changed=False) - - proxmox = ProxmoxTaskInfoAnsible(module) - upid = module.params['task'] - node = module.params['node'] - if upid: - tasks = proxmox.get_task(upid=upid, node=node) - else: - tasks = proxmox.get_tasks(node=node) - if tasks is not None: - result['proxmox_tasks'] = [task.info for task in tasks] - module.exit_json(**result) - else: - result['msg'] = 'Task: {0} does not exist on node: {1}.'.format( - upid, node) - module.fail_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_template.py b/plugins/modules/cloud/misc/proxmox_template.py deleted file mode 100644 index 32ff8e7edb..0000000000 --- a/plugins/modules/cloud/misc/proxmox_template.py +++ /dev/null @@ -1,240 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: proxmox_template -short_description: management of OS templates in Proxmox VE cluster -description: - - allows you to upload/delete templates in Proxmox VE cluster -options: - node: - description: - - Proxmox VE node on which to operate. - type: str - src: - description: - - path to uploaded file - - required only for C(state=present) - type: path - template: - description: - - the template name - - Required for state C(absent) to delete a template. - - Required for state C(present) to download an appliance container template (pveam). - type: str - content_type: - description: - - content type - - required only for C(state=present) - type: str - default: 'vztmpl' - choices: ['vztmpl', 'iso'] - storage: - description: - - target storage - type: str - default: 'local' - timeout: - description: - - timeout for operations - type: int - default: 30 - force: - description: - - can be used only with C(state=present), exists template will be overwritten - type: bool - default: 'no' - state: - description: - - Indicate desired state of the template - type: str - choices: ['present', 'absent'] - default: present -notes: - - Requires proxmoxer and requests modules on host. This modules can be installed with pip. -author: Sergei Antipov (@UnderGreen) -extends_documentation_fragment: community.general.proxmox.documentation -''' - -EXAMPLES = ''' -- name: Upload new openvz template with minimal options - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - src: ~/ubuntu-14.04-x86_64.tar.gz - -- name: > - Upload new openvz template with minimal options use environment - PROXMOX_PASSWORD variable(you should export it before) - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_host: node1 - src: ~/ubuntu-14.04-x86_64.tar.gz - -- name: Upload new openvz template with all options and force overwrite - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - storage: local - content_type: vztmpl - src: ~/ubuntu-14.04-x86_64.tar.gz - force: yes - -- name: Delete template with minimal options - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - template: ubuntu-14.04-x86_64.tar.gz - state: absent - -- name: Download proxmox appliance container template - community.general.proxmox_template: - node: uk-mc02 - api_user: root@pam - api_password: 1q2w3e - api_host: node1 - storage: local - content_type: vztmpl - template: ubuntu-20.04-standard_20.04-1_amd64.tar.gz -''' - -import os -import time - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible) - - -class ProxmoxTemplateAnsible(ProxmoxAnsible): - def get_template(self, node, storage, content_type, template): - return [True for tmpl in self.proxmox_api.nodes(node).storage(storage).content.get() - if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)] - - def task_status(self, node, taskid, timeout): - """ - Check the task status and wait until the task is completed or the timeout is reached. - """ - while timeout: - task_status = self.proxmox_api.nodes(node).tasks(taskid).status.get() - if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK': - return True - timeout = timeout - 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for uploading/downloading template. Last line in task before timeout: %s' % - self.proxmox_api.node(node).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def upload_template(self, node, storage, content_type, realpath, timeout): - taskid = self.proxmox_api.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb')) - return self.task_status(node, taskid, timeout) - - def download_template(self, node, storage, template, timeout): - taskid = self.proxmox_api.nodes(node).aplinfo.post(storage=storage, template=template) - return self.task_status(node, taskid, timeout) - - def delete_template(self, node, storage, content_type, template, timeout): - volid = '%s:%s/%s' % (storage, content_type, template) - self.proxmox_api.nodes(node).storage(storage).content.delete(volid) - while timeout: - if not self.get_template(node, storage, content_type, template): - return True - timeout = timeout - 1 - if timeout == 0: - self.module.fail_json(msg='Reached timeout while waiting for deleting template.') - - time.sleep(1) - return False - - -def main(): - module_args = proxmox_auth_argument_spec() - template_args = dict( - node=dict(), - src=dict(type='path'), - template=dict(), - content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']), - storage=dict(default='local'), - timeout=dict(type='int', default=30), - force=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), - ) - module_args.update(template_args) - - module = AnsibleModule( - argument_spec=module_args, - required_together=[('api_token_id', 'api_token_secret')], - required_one_of=[('api_password', 'api_token_id')], - required_if=[('state', 'absent', ['template'])] - ) - - proxmox = ProxmoxTemplateAnsible(module) - - state = module.params['state'] - node = module.params['node'] - storage = module.params['storage'] - timeout = module.params['timeout'] - - if state == 'present': - try: - content_type = module.params['content_type'] - src = module.params['src'] - - # download appliance template - if content_type == 'vztmpl' and not src: - template = module.params['template'] - - if not template: - module.fail_json(msg='template param for downloading appliance template is mandatory') - - if proxmox.get_template(node, storage, content_type, template) and not module.params['force']: - module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template)) - - if proxmox.download_template(node, storage, template, timeout): - module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template)) - - template = os.path.basename(src) - if proxmox.get_template(node, storage, content_type, template) and not module.params['force']: - module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template)) - elif not src: - module.fail_json(msg='src param to uploading template file is mandatory') - elif not (os.path.exists(src) and os.path.isfile(src)): - module.fail_json(msg='template file on path %s not exists' % src) - - if proxmox.upload_template(node, storage, content_type, src, timeout): - module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) - except Exception as e: - module.fail_json(msg="uploading/downloading of template %s failed with exception: %s" % (template, e)) - - elif state == 'absent': - try: - content_type = module.params['content_type'] - template = module.params['template'] - - if not proxmox.get_template(node, storage, content_type, template): - module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template)) - - if proxmox.delete_template(node, storage, content_type, template, timeout): - module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template)) - except Exception as e: - module.fail_json(msg="deleting of template %s failed with exception: %s" % (template, e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/proxmox_user_info.py b/plugins/modules/cloud/misc/proxmox_user_info.py deleted file mode 100644 index d0ee365b7f..0000000000 --- a/plugins/modules/cloud/misc/proxmox_user_info.py +++ /dev/null @@ -1,253 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Tristan Le Guern -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: proxmox_user_info -short_description: Retrieve information about one or more Proxmox VE users -version_added: 1.3.0 -description: - - Retrieve information about one or more Proxmox VE users -options: - domain: - description: - - Restrict results to a specific authentication realm. - aliases: ['realm'] - type: str - user: - description: - - Restrict results to a specific user. - aliases: ['name'] - type: str - userid: - description: - - Restrict results to a specific user ID, which is a concatenation of a user and domain parts. - type: str -author: Tristan Le Guern (@tleguern) -extends_documentation_fragment: community.general.proxmox.documentation -''' - -EXAMPLES = ''' -- name: List existing users - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - register: proxmox_users - -- name: List existing users in the pve authentication realm - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - domain: pve - register: proxmox_users_pve - -- name: Retrieve information about admin@pve - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - userid: admin@pve - register: proxmox_user_admin - -- name: Alternative way to retrieve information about admin@pve - community.general.proxmox_user_info: - api_host: helldorado - api_user: root@pam - api_password: "{{ password | default(omit) }}" - api_token_id: "{{ token_id | default(omit) }}" - api_token_secret: "{{ token_secret | default(omit) }}" - user: admin - domain: pve - register: proxmox_user_admin -''' - - -RETURN = ''' -proxmox_users: - description: List of users. - returned: always, but can be empty - type: list - elements: dict - contains: - comment: - description: Short description of the user. - returned: on success - type: str - domain: - description: User's authentication realm, also the right part of the user ID. - returned: on success - type: str - email: - description: User's email address. - returned: on success - type: str - enabled: - description: User's account state. - returned: on success - type: bool - expire: - description: Expiration date in seconds since EPOCH. Zero means no expiration. - returned: on success - type: int - firstname: - description: User's first name. - returned: on success - type: str - groups: - description: List of groups which the user is a member of. - returned: on success - type: list - elements: str - keys: - description: User's two factor authentication keys. - returned: on success - type: str - lastname: - description: User's last name. - returned: on success - type: str - tokens: - description: List of API tokens associated to the user. - returned: on success - type: list - elements: dict - contains: - comment: - description: Short description of the token. - returned: on success - type: str - expire: - description: Expiration date in seconds since EPOCH. Zero means no expiration. - returned: on success - type: int - privsep: - description: Describe if the API token is further restricted with ACLs or is fully privileged. - returned: on success - type: bool - tokenid: - description: Token name. - returned: on success - type: str - user: - description: User's login name, also the left part of the user ID. - returned: on success - type: str - userid: - description: Proxmox user ID, represented as user@realm. - returned: on success - type: str -''' - - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.proxmox import ( - proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool) - - -class ProxmoxUserInfoAnsible(ProxmoxAnsible): - def get_user(self, userid): - try: - user = self.proxmox_api.access.users.get(userid) - except Exception: - self.module.fail_json(msg="User '%s' does not exist" % userid) - user['userid'] = userid - return ProxmoxUser(user) - - def get_users(self, domain=None): - users = self.proxmox_api.access.users.get(full=1) - users = [ProxmoxUser(user) for user in users] - if domain: - return [user for user in users if user.user['domain'] == domain] - return users - - -class ProxmoxUser: - def __init__(self, user): - self.user = dict() - # Data representation is not the same depending on API calls - for k, v in user.items(): - if k == 'enable': - self.user['enabled'] = proxmox_to_ansible_bool(user['enable']) - elif k == 'userid': - self.user['user'] = user['userid'].split('@')[0] - self.user['domain'] = user['userid'].split('@')[1] - self.user[k] = v - elif k in ['groups', 'tokens'] and (v == '' or v is None): - self.user[k] = [] - elif k == 'groups' and type(v) == str: - self.user['groups'] = v.split(',') - elif k == 'tokens' and type(v) == list: - for token in v: - if 'privsep' in token: - token['privsep'] = proxmox_to_ansible_bool(token['privsep']) - self.user['tokens'] = v - elif k == 'tokens' and type(v) == dict: - self.user['tokens'] = list() - for tokenid, tokenvalues in v.items(): - t = tokenvalues - t['tokenid'] = tokenid - if 'privsep' in tokenvalues: - t['privsep'] = proxmox_to_ansible_bool(tokenvalues['privsep']) - self.user['tokens'].append(t) - else: - self.user[k] = v - - -def proxmox_user_info_argument_spec(): - return dict( - domain=dict(type='str', aliases=['realm']), - user=dict(type='str', aliases=['name']), - userid=dict(type='str'), - ) - - -def main(): - module_args = proxmox_auth_argument_spec() - user_info_args = proxmox_user_info_argument_spec() - module_args.update(user_info_args) - - module = AnsibleModule( - argument_spec=module_args, - required_one_of=[('api_password', 'api_token_id')], - required_together=[('api_token_id', 'api_token_secret')], - mutually_exclusive=[('user', 'userid'), ('domain', 'userid')], - supports_check_mode=True - ) - result = dict( - changed=False - ) - - proxmox = ProxmoxUserInfoAnsible(module) - domain = module.params['domain'] - user = module.params['user'] - if user and domain: - userid = user + '@' + domain - else: - userid = module.params['userid'] - - if userid: - users = [proxmox.get_user(userid=userid)] - else: - users = proxmox.get_users(domain=domain) - result['proxmox_users'] = [user.user for user in users] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/online/online_server_info.py b/plugins/modules/cloud/online/online_server_info.py deleted file mode 100644 index cf218efd29..0000000000 --- a/plugins/modules/cloud/online/online_server_info.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: online_server_info -short_description: Gather information about Online servers. -description: - - Gather information about the servers. - - U(https://www.online.net/en/dedicated-server) -author: - - "Remy Leone (@remyleone)" -extends_documentation_fragment: -- community.general.online - -''' - -EXAMPLES = r''' -- name: Gather Online server information - community.general.online_server_info: - api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f' - register: result - -- ansible.builtin.debug: - msg: "{{ result.online_server_info }}" -''' - -RETURN = r''' -online_server_info: - description: - - Response from Online API. - - "For more details please refer to: U(https://console.online.net/en/api/)." - returned: success - type: list - elements: dict - sample: - "online_server_info": [ - { - "abuse": "abuse@example.com", - "anti_ddos": false, - "bmc": { - "session_key": null - }, - "boot_mode": "normal", - "contacts": { - "owner": "foobar", - "tech": "foobar" - }, - "disks": [ - { - "$ref": "/api/v1/server/hardware/disk/68452" - }, - { - "$ref": "/api/v1/server/hardware/disk/68453" - } - ], - "drive_arrays": [ - { - "disks": [ - { - "$ref": "/api/v1/server/hardware/disk/68452" - }, - { - "$ref": "/api/v1/server/hardware/disk/68453" - } - ], - "raid_controller": { - "$ref": "/api/v1/server/hardware/raidController/9910" - }, - "raid_level": "RAID1" - } - ], - "hardware_watch": true, - "hostname": "sd-42", - "id": 42, - "ip": [ - { - "address": "195.154.172.149", - "mac": "28:92:4a:33:5e:c6", - "reverse": "195-154-172-149.rev.poneytelecom.eu.", - "switch_port_state": "up", - "type": "public" - }, - { - "address": "10.90.53.212", - "mac": "28:92:4a:33:5e:c7", - "reverse": null, - "switch_port_state": "up", - "type": "private" - } - ], - "last_reboot": "2018-08-23T08:32:03.000Z", - "location": { - "block": "A", - "datacenter": "DC3", - "position": 19, - "rack": "A23", - "room": "4 4-4" - }, - "network": { - "ip": [ - "195.154.172.149" - ], - "ipfo": [], - "private": [ - "10.90.53.212" - ] - }, - "offer": "Pro-1-S-SATA", - "os": { - "name": "FreeBSD", - "version": "11.1-RELEASE" - }, - "power": "ON", - "proactive_monitoring": false, - "raid_controllers": [ - { - "$ref": "/api/v1/server/hardware/raidController/9910" - } - ], - "support": "Basic service level" - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.online import ( - Online, OnlineException, online_argument_spec -) - - -class OnlineServerInfo(Online): - - def __init__(self, module): - super(OnlineServerInfo, self).__init__(module) - self.name = 'api/v1/server' - - def _get_server_detail(self, server_path): - try: - return self.get(path=server_path).json - except OnlineException as exc: - self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc)) - - def all_detailed_servers(self): - servers_api_path = self.get_resources() - - server_data = ( - self._get_server_detail(server_api_path) - for server_api_path in servers_api_path - ) - - return [s for s in server_data if s is not None] - - -def main(): - module = AnsibleModule( - argument_spec=online_argument_spec(), - supports_check_mode=True, - ) - - try: - servers_info = OnlineServerInfo(module).all_detailed_servers() - module.exit_json( - online_server_info=servers_info - ) - except OnlineException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/opennebula/one_image.py b/plugins/modules/cloud/opennebula/one_image.py deleted file mode 100644 index 5a80306fd1..0000000000 --- a/plugins/modules/cloud/opennebula/one_image.py +++ /dev/null @@ -1,423 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -(c) 2018, Milan Ilic - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a clone of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: one_image -short_description: Manages OpenNebula images -description: - - Manages OpenNebula images -requirements: - - pyone -options: - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - - transferred over the network unencrypted. - - If not set then the value of the C(ONE_URL) environment variable is used. - type: str - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - - then the value of the C(ONE_USERNAME) environment variable is used. - type: str - api_password: - description: - - Password of the user to login into OpenNebula RPC server. If not set - - then the value of the C(ONE_PASSWORD) environment variable is used. - type: str - id: - description: - - A C(id) of the image you would like to manage. - type: int - name: - description: - - A C(name) of the image you would like to manage. - type: str - state: - description: - - C(present) - state that is used to manage the image - - C(absent) - delete the image - - C(cloned) - clone the image - - C(renamed) - rename the image to the C(new_name) - choices: ["present", "absent", "cloned", "renamed"] - default: present - type: str - enabled: - description: - - Whether the image should be enabled or disabled. - type: bool - new_name: - description: - - A name that will be assigned to the existing or new image. - - In the case of cloning, by default C(new_name) will take the name of the origin image with the prefix 'Copy of'. - type: str -author: - - "Milan Ilic (@ilicmilan)" -''' - -EXAMPLES = ''' -- name: Fetch the IMAGE by id - community.general.one_image: - id: 45 - register: result - -- name: Print the IMAGE properties - ansible.builtin.debug: - var: result - -- name: Rename existing IMAGE - community.general.one_image: - id: 34 - state: renamed - new_name: bar-image - -- name: Disable the IMAGE by id - community.general.one_image: - id: 37 - enabled: no - -- name: Enable the IMAGE by name - community.general.one_image: - name: bar-image - enabled: yes - -- name: Clone the IMAGE by name - community.general.one_image: - name: bar-image - state: cloned - new_name: bar-image-clone - register: result - -- name: Delete the IMAGE by id - community.general.one_image: - id: '{{ result.id }}' - state: absent -''' - -RETURN = ''' -id: - description: image id - type: int - returned: success - sample: 153 -name: - description: image name - type: str - returned: success - sample: app1 -group_id: - description: image's group id - type: int - returned: success - sample: 1 -group_name: - description: image's group name - type: str - returned: success - sample: one-users -owner_id: - description: image's owner id - type: int - returned: success - sample: 143 -owner_name: - description: image's owner name - type: str - returned: success - sample: ansible-test -state: - description: state of image instance - type: str - returned: success - sample: READY -used: - description: is image in use - type: bool - returned: success - sample: true -running_vms: - description: count of running vms that use this image - type: int - returned: success - sample: 7 -''' - -try: - import pyone - HAS_PYONE = True -except ImportError: - HAS_PYONE = False - -from ansible.module_utils.basic import AnsibleModule -import os - - -def get_image(module, client, predicate): - # Filter -2 means fetch all images user can Use - pool = client.imagepool.info(-2, -1, -1, -1) - - for image in pool.IMAGE: - if predicate(image): - return image - - return None - - -def get_image_by_name(module, client, image_name): - return get_image(module, client, lambda image: (image.NAME == image_name)) - - -def get_image_by_id(module, client, image_id): - return get_image(module, client, lambda image: (image.ID == image_id)) - - -def get_image_instance(module, client, requested_id, requested_name): - if requested_id: - return get_image_by_id(module, client, requested_id) - else: - return get_image_by_name(module, client, requested_name) - - -IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] - - -def get_image_info(image): - info = { - 'id': image.ID, - 'name': image.NAME, - 'state': IMAGE_STATES[image.STATE], - 'running_vms': image.RUNNING_VMS, - 'used': bool(image.RUNNING_VMS), - 'user_name': image.UNAME, - 'user_id': image.UID, - 'group_name': image.GNAME, - 'group_id': image.GID, - } - - return info - - -def wait_for_state(module, client, image_id, wait_timeout, state_predicate): - import time - start_time = time.time() - - while (time.time() - start_time) < wait_timeout: - image = client.image.info(image_id) - state = image.STATE - - if state_predicate(state): - return image - - time.sleep(1) - - module.fail_json(msg="Wait timeout has expired!") - - -def wait_for_ready(module, client, image_id, wait_timeout=60): - return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')])) - - -def wait_for_delete(module, client, image_id, wait_timeout=60): - return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')])) - - -def enable_image(module, client, image, enable): - image = client.image.info(image.ID) - changed = False - - state = image.STATE - - if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: - if enable: - module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!") - else: - module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!") - - if ((enable and state != IMAGE_STATES.index('READY')) or - (not enable and state != IMAGE_STATES.index('DISABLED'))): - changed = True - - if changed and not module.check_mode: - client.image.enable(image.ID, enable) - - result = get_image_info(image) - result['changed'] = changed - - return result - - -def clone_image(module, client, image, new_name): - if new_name is None: - new_name = "Copy of " + image.NAME - - tmp_image = get_image_by_name(module, client, new_name) - if tmp_image: - result = get_image_info(tmp_image) - result['changed'] = False - return result - - if image.STATE == IMAGE_STATES.index('DISABLED'): - module.fail_json(msg="Cannot clone DISABLED image") - - if not module.check_mode: - new_id = client.image.clone(image.ID, new_name) - wait_for_ready(module, client, new_id) - image = client.image.info(new_id) - - result = get_image_info(image) - result['changed'] = True - - return result - - -def rename_image(module, client, image, new_name): - if new_name is None: - module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'") - - if new_name == image.NAME: - result = get_image_info(image) - result['changed'] = False - return result - - tmp_image = get_image_by_name(module, client, new_name) - if tmp_image: - module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.ID)) - - if not module.check_mode: - client.image.rename(image.ID, new_name) - - result = get_image_info(image) - result['changed'] = True - return result - - -def delete_image(module, client, image): - - if not image: - return {'changed': False} - - if image.RUNNING_VMS > 0: - module.fail_json(msg="Cannot delete image. There are " + str(image.RUNNING_VMS) + " VMs using it.") - - if not module.check_mode: - client.image.delete(image.ID) - wait_for_delete(module, client, image.ID) - - return {'changed': True} - - -def get_connection_info(module): - - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') - - if not url: - url = os.environ.get('ONE_URL') - - if not username: - username = os.environ.get('ONE_USERNAME') - - if not password: - password = os.environ.get('ONE_PASSWORD') - - if not(url and username and password): - module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") - from collections import namedtuple - - auth_params = namedtuple('auth', ('url', 'username', 'password')) - - return auth_params(url=url, username=username, password=password) - - -def main(): - fields = { - "api_url": {"required": False, "type": "str"}, - "api_username": {"required": False, "type": "str"}, - "api_password": {"required": False, "type": "str", "no_log": True}, - "id": {"required": False, "type": "int"}, - "name": {"required": False, "type": "str"}, - "state": { - "default": "present", - "choices": ['present', 'absent', 'cloned', 'renamed'], - "type": "str" - }, - "enabled": {"required": False, "type": "bool"}, - "new_name": {"required": False, "type": "str"}, - } - - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[['id', 'name']], - supports_check_mode=True) - - if not HAS_PYONE: - module.fail_json(msg='This module requires pyone to work!') - - auth = get_connection_info(module) - params = module.params - id = params.get('id') - name = params.get('name') - state = params.get('state') - enabled = params.get('enabled') - new_name = params.get('new_name') - client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) - - result = {} - - if not id and state == 'renamed': - module.fail_json(msg="Option 'id' is required when the state is 'renamed'") - - image = get_image_instance(module, client, id, name) - if not image and state != 'absent': - if id: - module.fail_json(msg="There is no image with id=" + str(id)) - else: - module.fail_json(msg="There is no image with name=" + name) - - if state == 'absent': - result = delete_image(module, client, image) - else: - result = get_image_info(image) - changed = False - result['changed'] = False - - if enabled is not None: - result = enable_image(module, client, image, enabled) - if state == "cloned": - result = clone_image(module, client, image, new_name) - elif state == "renamed": - result = rename_image(module, client, image, new_name) - - changed = changed or result['changed'] - result['changed'] = changed - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/opennebula/one_image_info.py b/plugins/modules/cloud/opennebula/one_image_info.py deleted file mode 100644 index e03b8ad724..0000000000 --- a/plugins/modules/cloud/opennebula/one_image_info.py +++ /dev/null @@ -1,289 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -(c) 2018, Milan Ilic - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a clone of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- -module: one_image_info -short_description: Gather information on OpenNebula images -description: - - Gather information on OpenNebula images. - - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change. -requirements: - - pyone -options: - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - - transferred over the network unencrypted. - - If not set then the value of the C(ONE_URL) environment variable is used. - type: str - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - - then the value of the C(ONE_USERNAME) environment variable is used. - type: str - api_password: - description: - - Password of the user to login into OpenNebula RPC server. If not set - - then the value of the C(ONE_PASSWORD) environment variable is used. - type: str - ids: - description: - - A list of images ids whose facts you want to gather. - aliases: ['id'] - type: list - elements: str - name: - description: - - A C(name) of the image whose facts will be gathered. - - If the C(name) begins with '~' the C(name) will be used as regex pattern - - which restricts the list of images (whose facts will be returned) whose names match specified regex. - - Also, if the C(name) begins with '~*' case-insensitive matching will be performed. - - See examples for more details. - type: str -author: - - "Milan Ilic (@ilicmilan)" - - "Jan Meerkamp (@meerkampdvv)" -''' - -EXAMPLES = ''' -- name: Gather facts about all images - community.general.one_image_info: - register: result - -- name: Print all images facts - ansible.builtin.debug: - msg: result - -- name: Gather facts about an image using ID - community.general.one_image_info: - ids: - - 123 - -- name: Gather facts about an image using the name - community.general.one_image_info: - name: 'foo-image' - register: foo_image - -- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*' - community.general.one_image_info: - name: '~app-image-.*' - register: app_images - -- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases - community.general.one_image_info: - name: '~*foo-image-.*' - register: foo_images -''' - -RETURN = ''' -images: - description: A list of images info - type: complex - returned: success - contains: - id: - description: image id - type: int - sample: 153 - name: - description: image name - type: str - sample: app1 - group_id: - description: image's group id - type: int - sample: 1 - group_name: - description: image's group name - type: str - sample: one-users - owner_id: - description: image's owner id - type: int - sample: 143 - owner_name: - description: image's owner name - type: str - sample: ansible-test - state: - description: state of image instance - type: str - sample: READY - used: - description: is image in use - type: bool - sample: true - running_vms: - description: count of running vms that use this image - type: int - sample: 7 -''' - -try: - import pyone - HAS_PYONE = True -except ImportError: - HAS_PYONE = False - -from ansible.module_utils.basic import AnsibleModule -import os - - -def get_all_images(client): - pool = client.imagepool.info(-2, -1, -1, -1) - # Filter -2 means fetch all images user can Use - - return pool - - -IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] - - -def get_image_info(image): - info = { - 'id': image.ID, - 'name': image.NAME, - 'state': IMAGE_STATES[image.STATE], - 'running_vms': image.RUNNING_VMS, - 'used': bool(image.RUNNING_VMS), - 'user_name': image.UNAME, - 'user_id': image.UID, - 'group_name': image.GNAME, - 'group_id': image.GID, - } - return info - - -def get_images_by_ids(module, client, ids): - images = [] - pool = get_all_images(client) - - for image in pool.IMAGE: - if str(image.ID) in ids: - images.append(image) - ids.remove(str(image.ID)) - if len(ids) == 0: - break - - if len(ids) > 0: - module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids)) - - return images - - -def get_images_by_name(module, client, name_pattern): - - images = [] - pattern = None - - pool = get_all_images(client) - - if name_pattern.startswith('~'): - import re - if name_pattern[1] == '*': - pattern = re.compile(name_pattern[2:], re.IGNORECASE) - else: - pattern = re.compile(name_pattern[1:]) - - for image in pool.IMAGE: - if pattern is not None: - if pattern.match(image.NAME): - images.append(image) - elif name_pattern == image.NAME: - images.append(image) - break - - # if the specific name is indicated - if pattern is None and len(images) == 0: - module.fail_json(msg="There is no IMAGE with name=" + name_pattern) - - return images - - -def get_connection_info(module): - - url = module.params.get('api_url') - username = module.params.get('api_username') - password = module.params.get('api_password') - - if not url: - url = os.environ.get('ONE_URL') - - if not username: - username = os.environ.get('ONE_USERNAME') - - if not password: - password = os.environ.get('ONE_PASSWORD') - - if not(url and username and password): - module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") - from collections import namedtuple - - auth_params = namedtuple('auth', ('url', 'username', 'password')) - - return auth_params(url=url, username=username, password=password) - - -def main(): - fields = { - "api_url": {"required": False, "type": "str"}, - "api_username": {"required": False, "type": "str"}, - "api_password": {"required": False, "type": "str", "no_log": True}, - "ids": {"required": False, "aliases": ['id'], "type": "list", "elements": "str"}, - "name": {"required": False, "type": "str"}, - } - - module = AnsibleModule(argument_spec=fields, - mutually_exclusive=[['ids', 'name']], - supports_check_mode=True) - - if not HAS_PYONE: - module.fail_json(msg='This module requires pyone to work!') - - auth = get_connection_info(module) - params = module.params - ids = params.get('ids') - name = params.get('name') - client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) - - if ids: - images = get_images_by_ids(module, client, ids) - elif name: - images = get_images_by_name(module, client, name) - else: - images = get_all_images(client).IMAGE - - result = { - 'images': [get_image_info(image) for image in images], - } - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/profitbricks/profitbricks.py b/plugins/modules/cloud/profitbricks/profitbricks.py deleted file mode 100644 index eccedb71f4..0000000000 --- a/plugins/modules/cloud/profitbricks/profitbricks.py +++ /dev/null @@ -1,657 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: profitbricks -short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine. -description: - - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait - for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0 -options: - auto_increment: - description: - - Whether or not to increment a single number in the name for created virtual machines. - type: bool - default: 'yes' - name: - description: - - The name of the virtual machine. - type: str - image: - description: - - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. - type: str - image_password: - description: - - Password set for the administrative user. - type: str - ssh_keys: - description: - - Public SSH keys allowing access to the virtual machine. - type: list - elements: str - datacenter: - description: - - The datacenter to provision this virtual machine. - type: str - cores: - description: - - The number of CPU cores to allocate to the virtual machine. - default: 2 - type: int - ram: - description: - - The amount of memory to allocate to the virtual machine. - default: 2048 - type: int - cpu_family: - description: - - The CPU family type to allocate to the virtual machine. - type: str - default: AMD_OPTERON - choices: [ "AMD_OPTERON", "INTEL_XEON" ] - volume_size: - description: - - The size in GB of the boot volume. - type: int - default: 10 - bus: - description: - - The bus type for the volume. - type: str - default: VIRTIO - choices: [ "IDE", "VIRTIO"] - instance_ids: - description: - - list of instance ids, currently only used when state='absent' to remove instances. - type: list - elements: str - count: - description: - - The number of virtual machines to create. - type: int - default: 1 - location: - description: - - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored. - type: str - default: us/las - choices: [ "us/las", "de/fra", "de/fkb" ] - assign_public_ip: - description: - - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created. - type: bool - default: 'no' - lan: - description: - - The ID of the LAN you wish to add the servers to. - type: int - default: 1 - subscription_user: - description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. - type: str - subscription_password: - description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. - type: str - wait: - description: - - wait for the instance to be in state 'running' before returning - type: bool - default: 'yes' - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - remove_boot_volume: - description: - - remove the bootVolume of the virtual machine you're destroying. - type: bool - default: 'yes' - state: - description: - - create or terminate instances - - 'The choices available are: C(running), C(stopped), C(absent), C(present).' - type: str - default: 'present' - disk_type: - description: - - the type of disk to be allocated. - type: str - choices: [SSD, HDD] - default: HDD - -requirements: - - "profitbricks" - - "python >= 2.6" -author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' - -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Provisioning example -- name: Create three servers and enumerate their names - community.general.profitbricks: - datacenter: Tardis One - name: web%02d.stackpointcloud.com - cores: 4 - ram: 2048 - volume_size: 50 - cpu_family: INTEL_XEON - image: a3eae284-a2fe-11e4-b187-5f1f641608c8 - location: us/las - count: 3 - assign_public_ip: true - -- name: Remove virtual machines - community.general.profitbricks: - datacenter: Tardis One - instance_ids: - - 'web001.stackpointcloud.com' - - 'web002.stackpointcloud.com' - - 'web003.stackpointcloud.com' - wait_timeout: 500 - state: absent - -- name: Start virtual machines - community.general.profitbricks: - datacenter: Tardis One - instance_ids: - - 'web001.stackpointcloud.com' - - 'web002.stackpointcloud.com' - - 'web003.stackpointcloud.com' - wait_timeout: 500 - state: running - -- name: Stop virtual machines - community.general.profitbricks: - datacenter: Tardis One - instance_ids: - - 'web001.stackpointcloud.com' - - 'web002.stackpointcloud.com' - - 'web003.stackpointcloud.com' - wait_timeout: 500 - state: stopped -''' - -import re -import uuid -import time -import traceback - -HAS_PB_SDK = True - -try: - from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.common.text.converters import to_native - - -LOCATIONS = ['us/las', - 'de/fra', - 'de/fkb'] - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def _create_machine(module, profitbricks, datacenter, name): - cores = module.params.get('cores') - ram = module.params.get('ram') - cpu_family = module.params.get('cpu_family') - volume_size = module.params.get('volume_size') - disk_type = module.params.get('disk_type') - image_password = module.params.get('image_password') - ssh_keys = module.params.get('ssh_keys') - bus = module.params.get('bus') - lan = module.params.get('lan') - assign_public_ip = module.params.get('assign_public_ip') - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - location = module.params.get('location') - image = module.params.get('image') - assign_public_ip = module.boolean(module.params.get('assign_public_ip')) - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - if assign_public_ip: - public_found = False - - lans = profitbricks.list_lans(datacenter) - for lan in lans['items']: - if lan['properties']['public']: - public_found = True - lan = lan['id'] - - if not public_found: - i = LAN( - name='public', - public=True) - - lan_response = profitbricks.create_lan(datacenter, i) - _wait_for_completion(profitbricks, lan_response, - wait_timeout, "_create_machine") - lan = lan_response['id'] - - v = Volume( - name=str(uuid.uuid4()).replace('-', '')[:10], - size=volume_size, - image=image, - image_password=image_password, - ssh_keys=ssh_keys, - disk_type=disk_type, - bus=bus) - - n = NIC( - lan=int(lan) - ) - - s = Server( - name=name, - ram=ram, - cores=cores, - cpu_family=cpu_family, - create_volumes=[v], - nics=[n], - ) - - try: - create_server_response = profitbricks.create_server( - datacenter_id=datacenter, server=s) - - _wait_for_completion(profitbricks, create_server_response, - wait_timeout, "create_virtual_machine") - - server_response = profitbricks.get_server( - datacenter_id=datacenter, - server_id=create_server_response['id'], - depth=3 - ) - except Exception as e: - module.fail_json(msg="failed to create the new server: %s" % str(e)) - else: - return server_response - - -def _startstop_machine(module, profitbricks, datacenter_id, server_id): - state = module.params.get('state') - - try: - if state == 'running': - profitbricks.start_server(datacenter_id, server_id) - else: - profitbricks.stop_server(datacenter_id, server_id) - - return True - except Exception as e: - module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e))) - - -def _create_datacenter(module, profitbricks): - datacenter = module.params.get('datacenter') - location = module.params.get('location') - wait_timeout = module.params.get('wait_timeout') - - i = Datacenter( - name=datacenter, - location=location - ) - - try: - datacenter_response = profitbricks.create_datacenter(datacenter=i) - - _wait_for_completion(profitbricks, datacenter_response, - wait_timeout, "_create_datacenter") - - return datacenter_response - except Exception as e: - module.fail_json(msg="failed to create the new server(s): %s" % str(e)) - - -def create_virtual_machine(module, profitbricks): - """ - Create new virtual machine - - module : AnsibleModule object - community.general.profitbricks: authenticated profitbricks object - - Returns: - True if a new virtual machine was created, false otherwise - """ - datacenter = module.params.get('datacenter') - name = module.params.get('name') - auto_increment = module.params.get('auto_increment') - count = module.params.get('count') - lan = module.params.get('lan') - wait_timeout = module.params.get('wait_timeout') - failed = True - datacenter_found = False - - virtual_machines = [] - virtual_machine_ids = [] - - # Locate UUID for datacenter if referenced by name. - datacenter_list = profitbricks.list_datacenters() - datacenter_id = _get_datacenter_id(datacenter_list, datacenter) - if datacenter_id: - datacenter_found = True - - if not datacenter_found: - datacenter_response = _create_datacenter(module, profitbricks) - datacenter_id = datacenter_response['id'] - - _wait_for_completion(profitbricks, datacenter_response, - wait_timeout, "create_virtual_machine") - - if auto_increment: - numbers = set() - count_offset = 1 - - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message, exception=traceback.format_exc()) - - number_range = xrange(count_offset, count_offset + count + len(numbers)) - available_numbers = list(set(number_range).difference(numbers)) - names = [] - numbers_to_use = available_numbers[:count] - for number in numbers_to_use: - names.append(name % number) - else: - names = [name] - - # Prefetch a list of servers for later comparison. - server_list = profitbricks.list_servers(datacenter_id) - for name in names: - # Skip server creation if the server already exists. - if _get_server_id(server_list, name): - continue - - create_response = _create_machine(module, profitbricks, str(datacenter_id), name) - nics = profitbricks.list_nics(datacenter_id, create_response['id']) - for n in nics['items']: - if lan == n['properties']['lan']: - create_response.update({'public_ip': n['properties']['ips'][0]}) - - virtual_machines.append(create_response) - - failed = False - - results = { - 'failed': failed, - 'machines': virtual_machines, - 'action': 'create', - 'instance_ids': { - 'instances': [i['id'] for i in virtual_machines], - } - } - - return results - - -def remove_virtual_machine(module, profitbricks): - """ - Removes a virtual machine. - - This will remove the virtual machine along with the bootVolume. - - module : AnsibleModule object - community.general.profitbricks: authenticated profitbricks object. - - Not yet supported: handle deletion of attached data disks. - - Returns: - True if a new virtual server was deleted, false otherwise - """ - datacenter = module.params.get('datacenter') - instance_ids = module.params.get('instance_ids') - remove_boot_volume = module.params.get('remove_boot_volume') - changed = False - - if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: - module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') - - # Locate UUID for datacenter if referenced by name. - datacenter_list = profitbricks.list_datacenters() - datacenter_id = _get_datacenter_id(datacenter_list, datacenter) - if not datacenter_id: - module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) - - # Prefetch server list for later comparison. - server_list = profitbricks.list_servers(datacenter_id) - for instance in instance_ids: - # Locate UUID for server if referenced by name. - server_id = _get_server_id(server_list, instance) - if server_id: - # Remove the server's boot volume - if remove_boot_volume: - _remove_boot_volume(module, profitbricks, datacenter_id, server_id) - - # Remove the server - try: - server_response = profitbricks.delete_server(datacenter_id, server_id) - except Exception as e: - module.fail_json(msg="failed to terminate the virtual server: %s" % to_native(e), exception=traceback.format_exc()) - else: - changed = True - - return changed - - -def _remove_boot_volume(module, profitbricks, datacenter_id, server_id): - """ - Remove the boot volume from the server - """ - try: - server = profitbricks.get_server(datacenter_id, server_id) - volume_id = server['properties']['bootVolume']['id'] - volume_response = profitbricks.delete_volume(datacenter_id, volume_id) - except Exception as e: - module.fail_json(msg="failed to remove the server's boot volume: %s" % to_native(e), exception=traceback.format_exc()) - - -def startstop_machine(module, profitbricks, state): - """ - Starts or Stops a virtual machine. - - module : AnsibleModule object - community.general.profitbricks: authenticated profitbricks object. - - Returns: - True when the servers process the action successfully, false otherwise. - """ - if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: - module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') - - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - changed = False - - datacenter = module.params.get('datacenter') - instance_ids = module.params.get('instance_ids') - - # Locate UUID for datacenter if referenced by name. - datacenter_list = profitbricks.list_datacenters() - datacenter_id = _get_datacenter_id(datacenter_list, datacenter) - if not datacenter_id: - module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) - - # Prefetch server list for later comparison. - server_list = profitbricks.list_servers(datacenter_id) - for instance in instance_ids: - # Locate UUID of server if referenced by name. - server_id = _get_server_id(server_list, instance) - if server_id: - _startstop_machine(module, profitbricks, datacenter_id, server_id) - changed = True - - if wait: - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - matched_instances = [] - for res in profitbricks.list_servers(datacenter_id)['items']: - if state == 'running': - if res['properties']['vmState'].lower() == state: - matched_instances.append(res) - elif state == 'stopped': - if res['properties']['vmState'].lower() == 'shutoff': - matched_instances.append(res) - - if len(matched_instances) < len(instance_ids): - time.sleep(5) - else: - break - - if wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime()) - - return (changed) - - -def _get_datacenter_id(datacenters, identity): - """ - Fetch and return datacenter UUID by datacenter name if found. - """ - for datacenter in datacenters['items']: - if identity in (datacenter['properties']['name'], datacenter['id']): - return datacenter['id'] - return None - - -def _get_server_id(servers, identity): - """ - Fetch and return server UUID by server name if found. - """ - for server in servers['items']: - if identity in (server['properties']['name'], server['id']): - return server['id'] - return None - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(), - name=dict(), - image=dict(), - cores=dict(type='int', default=2), - ram=dict(type='int', default=2048), - cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'], - default='AMD_OPTERON'), - volume_size=dict(type='int', default=10), - disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), - image_password=dict(no_log=True), - ssh_keys=dict(type='list', elements='str', default=[], no_log=False), - bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), - lan=dict(type='int', default=1), - count=dict(type='int', default=1), - auto_increment=dict(type='bool', default=True), - instance_ids=dict(type='list', elements='str', default=[]), - subscription_user=dict(), - subscription_password=dict(no_log=True), - location=dict(choices=LOCATIONS, default='us/las'), - assign_public_ip=dict(type='bool', default=False), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - remove_boot_volume=dict(type='bool', default=True), - state=dict(default='present'), - ) - ) - - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required ' + - 'for running or stopping machines.') - - try: - (changed) = remove_virtual_machine(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) - - elif state in ('running', 'stopped'): - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required for ' + - 'running or stopping machines.') - try: - (changed) = startstop_machine(module, profitbricks, state) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) - - elif state == 'present': - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for new instance') - if not module.params.get('image'): - module.fail_json(msg='image parameter is required for new instance') - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is ' + - 'required for new instance') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is ' + - 'required for new instance') - - try: - (machine_dict_array) = create_virtual_machine(module, profitbricks) - module.exit_json(**machine_dict_array) - except Exception as e: - module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py b/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py deleted file mode 100644 index 7897ffdeb9..0000000000 --- a/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: profitbricks_datacenter -short_description: Create or destroy a ProfitBricks Virtual Datacenter. -description: - - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency - on profitbricks >= 1.0.0 -options: - name: - description: - - The name of the virtual datacenter. - type: str - description: - description: - - The description of the virtual datacenter. - type: str - required: false - location: - description: - - The datacenter location. - type: str - required: false - default: us/las - choices: [ "us/las", "de/fra", "de/fkb" ] - subscription_user: - description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. - type: str - required: false - subscription_password: - description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. - type: str - required: false - wait: - description: - - wait for the datacenter to be created before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - state: - description: - - Create or terminate datacenters. - - "The available choices are: C(present), C(absent)." - type: str - required: false - default: 'present' - -requirements: [ "profitbricks" ] -author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' -- name: Create a datacenter - community.general.profitbricks_datacenter: - datacenter: Tardis One - wait_timeout: 500 - -- name: Destroy a datacenter (remove all servers, volumes, and other objects in the datacenter) - community.general.profitbricks_datacenter: - datacenter: Tardis One - wait_timeout: 500 - state: absent -''' - -import re -import time - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService, Datacenter -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -LOCATIONS = ['us/las', - 'de/fra', - 'de/fkb'] - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def _remove_datacenter(module, profitbricks, datacenter): - try: - profitbricks.delete_datacenter(datacenter) - except Exception as e: - module.fail_json(msg="failed to remove the datacenter: %s" % str(e)) - - -def create_datacenter(module, profitbricks): - """ - Creates a Datacenter - - This will create a new Datacenter in the specified location. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if a new datacenter was created, false otherwise - """ - name = module.params.get('name') - location = module.params.get('location') - description = module.params.get('description') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - i = Datacenter( - name=name, - location=location, - description=description - ) - - try: - datacenter_response = profitbricks.create_datacenter(datacenter=i) - - if wait: - _wait_for_completion(profitbricks, datacenter_response, - wait_timeout, "_create_datacenter") - - results = { - 'datacenter_id': datacenter_response['id'] - } - - return results - - except Exception as e: - module.fail_json(msg="failed to create the new datacenter: %s" % str(e)) - - -def remove_datacenter(module, profitbricks): - """ - Removes a Datacenter. - - This will remove a datacenter. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the datacenter was deleted, false otherwise - """ - name = module.params.get('name') - changed = False - - if(uuid_match.match(name)): - _remove_datacenter(module, profitbricks, name) - changed = True - else: - datacenters = profitbricks.list_datacenters() - - for d in datacenters['items']: - vdc = profitbricks.get_datacenter(d['id']) - - if name == vdc['properties']['name']: - name = d['id'] - _remove_datacenter(module, profitbricks, name) - changed = True - - return changed - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(), - description=dict(), - location=dict(choices=LOCATIONS, default='us/las'), - subscription_user=dict(), - subscription_password=dict(no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=600, type='int'), - state=dict(default='present'), # @TODO add choices - ) - ) - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is required') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is required') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('name'): - module.fail_json(msg='name parameter is required deleting a virtual datacenter.') - - try: - (changed) = remove_datacenter(module, profitbricks) - module.exit_json( - changed=changed) - except Exception as e: - module.fail_json(msg='failed to set datacenter state: %s' % str(e)) - - elif state == 'present': - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for a new datacenter') - if not module.params.get('location'): - module.fail_json(msg='location parameter is required for a new datacenter') - - try: - (datacenter_dict_array) = create_datacenter(module, profitbricks) - module.exit_json(**datacenter_dict_array) - except Exception as e: - module.fail_json(msg='failed to set datacenter state: %s' % str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/profitbricks/profitbricks_nic.py b/plugins/modules/cloud/profitbricks/profitbricks_nic.py deleted file mode 100644 index 5d98e05e4b..0000000000 --- a/plugins/modules/cloud/profitbricks/profitbricks_nic.py +++ /dev/null @@ -1,289 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: profitbricks_nic -short_description: Create or Remove a NIC. -description: - - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0 -options: - datacenter: - description: - - The datacenter in which to operate. - type: str - required: true - server: - description: - - The server name or ID. - type: str - required: true - name: - description: - - The name or ID of the NIC. This is only required on deletes, but not on create. - - If not specified, it defaults to a value based on UUID4. - type: str - lan: - description: - - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create. - type: str - subscription_user: - description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. - type: str - required: true - subscription_password: - description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. - type: str - required: true - wait: - description: - - wait for the operation to complete before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - state: - description: - - Indicate desired state of the resource - - "The available choices are: C(present), C(absent)." - type: str - required: false - default: 'present' - -requirements: [ "profitbricks" ] -author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' -- name: Create a NIC - community.general.profitbricks_nic: - datacenter: Tardis One - server: node002 - lan: 2 - wait_timeout: 500 - state: present - -- name: Remove a NIC - community.general.profitbricks_nic: - datacenter: Tardis One - server: node002 - name: 7341c2454f - wait_timeout: 500 - state: absent -''' - -import re -import uuid -import time - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService, NIC -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _make_default_name(): - return str(uuid.uuid4()).replace('-', '')[:10] - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def create_nic(module, profitbricks): - """ - Creates a NIC. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the nic creates, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - lan = module.params.get('lan') - name = module.params.get('name') - if name is None: - name = _make_default_name() - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - try: - n = NIC( - name=name, - lan=lan - ) - - nic_response = profitbricks.create_nic(datacenter, server, n) - - if wait: - _wait_for_completion(profitbricks, nic_response, - wait_timeout, "create_nic") - - return nic_response - - except Exception as e: - module.fail_json(msg="failed to create the NIC: %s" % str(e)) - - -def delete_nic(module, profitbricks): - """ - Removes a NIC - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the NIC was removed, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - name = module.params.get('name') - if name is None: - name = _make_default_name() - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - server_found = False - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server_found = True - server = s['id'] - break - - if not server_found: - return False - - # Locate UUID for NIC - nic_found = False - if not (uuid_match.match(name)): - nic_list = profitbricks.list_nics(datacenter, server) - for n in nic_list['items']: - if name == n['properties']['name']: - nic_found = True - name = n['id'] - break - - if not nic_found: - return False - - try: - nic_response = profitbricks.delete_nic(datacenter, server, name) - return nic_response - except Exception as e: - module.fail_json(msg="failed to remove the NIC: %s" % str(e)) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(required=True), - server=dict(required=True), - name=dict(), - lan=dict(), - subscription_user=dict(required=True), - subscription_password=dict(required=True, no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - state=dict(default='present'), - ), - required_if=( - ('state', 'absent', ['name']), - ('state', 'present', ['lan']), - ) - ) - - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - try: - (changed) = delete_nic(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set nic state: %s' % str(e)) - - elif state == 'present': - try: - (nic_dict) = create_nic(module, profitbricks) - module.exit_json(nics=nic_dict) # @FIXME changed not calculated? - except Exception as e: - module.fail_json(msg='failed to set nic state: %s' % str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/profitbricks/profitbricks_volume.py b/plugins/modules/cloud/profitbricks/profitbricks_volume.py deleted file mode 100644 index be1c18b55a..0000000000 --- a/plugins/modules/cloud/profitbricks/profitbricks_volume.py +++ /dev/null @@ -1,432 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: profitbricks_volume -short_description: Create or destroy a volume. -description: - - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0 -options: - datacenter: - description: - - The datacenter in which to create the volumes. - type: str - name: - description: - - The name of the volumes. You can enumerate the names using auto_increment. - type: str - size: - description: - - The size of the volume. - type: int - required: false - default: 10 - bus: - description: - - The bus type. - type: str - required: false - default: VIRTIO - choices: [ "IDE", "VIRTIO"] - image: - description: - - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID. - type: str - image_password: - description: - - Password set for the administrative user. - type: str - required: false - ssh_keys: - description: - - Public SSH keys allowing access to the virtual machine. - type: list - elements: str - required: false - disk_type: - description: - - The disk type of the volume. - type: str - required: false - default: HDD - choices: [ "HDD", "SSD" ] - licence_type: - description: - - The licence type for the volume. This is used when the image is non-standard. - - "The available choices are: C(LINUX), C(WINDOWS), C(UNKNOWN), C(OTHER)." - type: str - required: false - default: UNKNOWN - count: - description: - - The number of volumes you wish to create. - type: int - required: false - default: 1 - auto_increment: - description: - - Whether or not to increment a single number in the name for created virtual machines. - default: yes - type: bool - instance_ids: - description: - - list of instance ids, currently only used when state='absent' to remove instances. - type: list - elements: str - required: false - subscription_user: - description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. - type: str - required: false - subscription_password: - description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. - type: str - required: false - wait: - description: - - wait for the datacenter to be created before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - state: - description: - - create or terminate datacenters - - "The available choices are: C(present), C(absent)." - type: str - required: false - default: 'present' - server: - description: - - Server name to attach the volume to. - type: str - -requirements: [ "profitbricks" ] -author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' -- name: Create multiple volumes - community.general.profitbricks_volume: - datacenter: Tardis One - name: vol%02d - count: 5 - auto_increment: yes - wait_timeout: 500 - state: present - -- name: Remove Volumes - community.general.profitbricks_volume: - datacenter: Tardis One - instance_ids: - - 'vol01' - - 'vol02' - wait_timeout: 500 - state: absent -''' - -import re -import time -import traceback - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService, Volume -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.common.text.converters import to_native - - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def _create_volume(module, profitbricks, datacenter, name): - size = module.params.get('size') - bus = module.params.get('bus') - image = module.params.get('image') - image_password = module.params.get('image_password') - ssh_keys = module.params.get('ssh_keys') - disk_type = module.params.get('disk_type') - licence_type = module.params.get('licence_type') - wait_timeout = module.params.get('wait_timeout') - wait = module.params.get('wait') - - try: - v = Volume( - name=name, - size=size, - bus=bus, - image=image, - image_password=image_password, - ssh_keys=ssh_keys, - disk_type=disk_type, - licence_type=licence_type - ) - - volume_response = profitbricks.create_volume(datacenter, v) - - if wait: - _wait_for_completion(profitbricks, volume_response, - wait_timeout, "_create_volume") - - except Exception as e: - module.fail_json(msg="failed to create the volume: %s" % str(e)) - - return volume_response - - -def _delete_volume(module, profitbricks, datacenter, volume): - try: - profitbricks.delete_volume(datacenter, volume) - except Exception as e: - module.fail_json(msg="failed to remove the volume: %s" % str(e)) - - -def create_volume(module, profitbricks): - """ - Creates a volume. - - This will create a volume in a datacenter. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was created, false otherwise - """ - datacenter = module.params.get('datacenter') - name = module.params.get('name') - auto_increment = module.params.get('auto_increment') - count = module.params.get('count') - - datacenter_found = False - failed = True - volumes = [] - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - datacenter_found = True - break - - if not datacenter_found: - module.fail_json(msg='datacenter could not be found.') - - if auto_increment: - numbers = set() - count_offset = 1 - - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message, exception=traceback.format_exc()) - - number_range = xrange(count_offset, count_offset + count + len(numbers)) - available_numbers = list(set(number_range).difference(numbers)) - names = [] - numbers_to_use = available_numbers[:count] - for number in numbers_to_use: - names.append(name % number) - else: - names = [name] * count - - for name in names: - create_response = _create_volume(module, profitbricks, str(datacenter), name) - volumes.append(create_response) - _attach_volume(module, profitbricks, datacenter, create_response['id']) - failed = False - - results = { - 'failed': failed, - 'volumes': volumes, - 'action': 'create', - 'instance_ids': { - 'instances': [i['id'] for i in volumes], - } - } - - return results - - -def delete_volume(module, profitbricks): - """ - Removes a volume. - - This will create a volume in a datacenter. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was removed, false otherwise - """ - if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: - module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') - - datacenter = module.params.get('datacenter') - changed = False - instance_ids = module.params.get('instance_ids') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - for n in instance_ids: - if(uuid_match.match(n)): - _delete_volume(module, profitbricks, datacenter, n) - changed = True - else: - volumes = profitbricks.list_volumes(datacenter) - for v in volumes['items']: - if n == v['properties']['name']: - volume_id = v['id'] - _delete_volume(module, profitbricks, datacenter, volume_id) - changed = True - - return changed - - -def _attach_volume(module, profitbricks, datacenter, volume): - """ - Attaches a volume. - - This will attach a volume to the server. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was attached, false otherwise - """ - server = module.params.get('server') - - # Locate UUID for Server - if server: - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - - try: - return profitbricks.attach_volume(datacenter, server, volume) - except Exception as e: - module.fail_json(msg='failed to attach volume: %s' % to_native(e), exception=traceback.format_exc()) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(), - server=dict(), - name=dict(), - size=dict(type='int', default=10), - bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), - image=dict(), - image_password=dict(no_log=True), - ssh_keys=dict(type='list', elements='str', default=[], no_log=False), - disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), - licence_type=dict(default='UNKNOWN'), - count=dict(type='int', default=1), - auto_increment=dict(type='bool', default=True), - instance_ids=dict(type='list', elements='str', default=[]), - subscription_user=dict(), - subscription_password=dict(no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - state=dict(default='present'), - ) - ) - - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is required') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is required') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required for running or stopping machines.') - - try: - (changed) = delete_volume(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc()) - - elif state == 'present': - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required for new instance') - if not module.params.get('name'): - module.fail_json(msg='name parameter is required for new instance') - - try: - (volume_dict_array) = create_volume(module, profitbricks) - module.exit_json(**volume_dict_array) - except Exception as e: - module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py b/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py deleted file mode 100644 index 1fb3f3c0e2..0000000000 --- a/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: profitbricks_volume_attachments -short_description: Attach or detach a volume. -description: - - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0 -options: - datacenter: - description: - - The datacenter in which to operate. - type: str - server: - description: - - The name of the server you wish to detach or attach the volume. - type: str - volume: - description: - - The volume name or ID. - type: str - subscription_user: - description: - - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable. - type: str - required: false - subscription_password: - description: - - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable. - type: str - required: false - wait: - description: - - wait for the operation to complete before returning - required: false - default: "yes" - type: bool - wait_timeout: - description: - - how long before wait gives up, in seconds - type: int - default: 600 - state: - description: - - Indicate desired state of the resource - - "The available choices are: C(present), C(absent)." - type: str - required: false - default: 'present' - -requirements: [ "profitbricks" ] -author: Matt Baldwin (@baldwinSPC) -''' - -EXAMPLES = ''' -- name: Attach a volume - community.general.profitbricks_volume_attachments: - datacenter: Tardis One - server: node002 - volume: vol01 - wait_timeout: 500 - state: present - -- name: Detach a volume - community.general.profitbricks_volume_attachments: - datacenter: Tardis One - server: node002 - volume: vol01 - wait_timeout: 500 - state: absent -''' - -import re -import time - -HAS_PB_SDK = True -try: - from profitbricks.client import ProfitBricksService -except ImportError: - HAS_PB_SDK = False - -from ansible.module_utils.basic import AnsibleModule - - -uuid_match = re.compile( - r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) - - -def _wait_for_completion(profitbricks, promise, wait_timeout, msg): - if not promise: - return - wait_timeout = time.time() + wait_timeout - while wait_timeout > time.time(): - time.sleep(5) - operation_result = profitbricks.get_request( - request_id=promise['requestId'], - status=True) - - if operation_result['metadata']['status'] == "DONE": - return - elif operation_result['metadata']['status'] == "FAILED": - raise Exception( - 'Request failed to complete ' + msg + ' "' + str( - promise['requestId']) + '" to complete.') - - raise Exception( - 'Timed out waiting for async operation ' + msg + ' "' + str( - promise['requestId'] - ) + '" to complete.') - - -def attach_volume(module, profitbricks): - """ - Attaches a volume. - - This will attach a volume to the server. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was attached, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - volume = module.params.get('volume') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - - # Locate UUID for Volume - if not (uuid_match.match(volume)): - volume_list = profitbricks.list_volumes(datacenter) - for v in volume_list['items']: - if volume == v['properties']['name']: - volume = v['id'] - break - - return profitbricks.attach_volume(datacenter, server, volume) - - -def detach_volume(module, profitbricks): - """ - Detaches a volume. - - This will remove a volume from the server. - - module : AnsibleModule object - profitbricks: authenticated profitbricks object. - - Returns: - True if the volume was detached, false otherwise - """ - datacenter = module.params.get('datacenter') - server = module.params.get('server') - volume = module.params.get('volume') - - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - # Locate UUID for Server - if not (uuid_match.match(server)): - server_list = profitbricks.list_servers(datacenter) - for s in server_list['items']: - if server == s['properties']['name']: - server = s['id'] - break - - # Locate UUID for Volume - if not (uuid_match.match(volume)): - volume_list = profitbricks.list_volumes(datacenter) - for v in volume_list['items']: - if volume == v['properties']['name']: - volume = v['id'] - break - - return profitbricks.detach_volume(datacenter, server, volume) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - datacenter=dict(), - server=dict(), - volume=dict(), - subscription_user=dict(), - subscription_password=dict(no_log=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - state=dict(default='present'), - ) - ) - - if not HAS_PB_SDK: - module.fail_json(msg='profitbricks required for this module') - - if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is required') - if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is required') - if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required') - if not module.params.get('server'): - module.fail_json(msg='server parameter is required') - if not module.params.get('volume'): - module.fail_json(msg='volume parameter is required') - - subscription_user = module.params.get('subscription_user') - subscription_password = module.params.get('subscription_password') - - profitbricks = ProfitBricksService( - username=subscription_user, - password=subscription_password) - - state = module.params.get('state') - - if state == 'absent': - try: - (changed) = detach_volume(module, profitbricks) - module.exit_json(changed=changed) - except Exception as e: - module.fail_json(msg='failed to set volume_attach state: %s' % str(e)) - elif state == 'present': - try: - attach_volume(module, profitbricks) - module.exit_json() - except Exception as e: - module.fail_json(msg='failed to set volume_attach state: %s' % str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax.py b/plugins/modules/cloud/rackspace/rax.py deleted file mode 100644 index 8c452d9d72..0000000000 --- a/plugins/modules/cloud/rackspace/rax.py +++ /dev/null @@ -1,892 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax -short_description: create / delete an instance in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud instance and optionally - waits for it to be 'running'. -options: - auto_increment: - description: - - Whether or not to increment a single number with the name of the - created servers. Only applicable when used with the I(group) attribute - or meta key. - type: bool - default: 'yes' - boot_from_volume: - description: - - Whether or not to boot the instance from a Cloud Block Storage volume. - If C(yes) and I(image) is specified a new volume will be created at - boot time. I(boot_volume_size) is required with I(image) to create a - new volume at boot time. - type: bool - default: 'no' - boot_volume: - type: str - description: - - Cloud Block Storage ID or Name to use as the boot volume of the - instance - boot_volume_size: - type: int - description: - - Size of the volume to create in Gigabytes. This is only required with - I(image) and I(boot_from_volume). - default: 100 - boot_volume_terminate: - description: - - Whether the I(boot_volume) or newly created volume from I(image) will - be terminated when the server is terminated - type: bool - default: 'no' - config_drive: - description: - - Attach read-only configuration drive to server as label config-2 - type: bool - default: 'no' - count: - type: int - description: - - number of instances to launch - default: 1 - count_offset: - type: int - description: - - number count to start at - default: 1 - disk_config: - type: str - description: - - Disk partitioning strategy - - If not specified it will assume the value C(auto). - choices: - - auto - - manual - exact_count: - description: - - Explicitly ensure an exact count of instances, used with - state=active/present. If specified as C(yes) and I(count) is less than - the servers matched, servers will be deleted to match the count. If - the number of matched servers is fewer than specified in I(count) - additional servers will be added. - type: bool - default: 'no' - extra_client_args: - type: dict - description: - - A hash of key/value pairs to be used when creating the cloudservers - client. This is considered an advanced option, use it wisely and - with caution. - extra_create_args: - type: dict - description: - - A hash of key/value pairs to be used when creating a new server. - This is considered an advanced option, use it wisely and with caution. - files: - type: dict - description: - - Files to insert into the instance. remotefilename:localcontent - flavor: - type: str - description: - - flavor to use for the instance - group: - type: str - description: - - host group to assign to server, is also used for idempotent operations - to ensure a specific number of instances - image: - type: str - description: - - image to use for the instance. Can be an C(id), C(human_id) or C(name). - With I(boot_from_volume), a Cloud Block Storage volume will be created - with this image - instance_ids: - type: list - elements: str - description: - - list of instance ids, currently only used when state='absent' to - remove instances - key_name: - type: str - description: - - key pair to use on the instance - aliases: - - keypair - meta: - type: dict - description: - - A hash of metadata to associate with the instance - name: - type: str - description: - - Name to give the instance - networks: - type: list - elements: str - description: - - The network to attach to the instances. If specified, you must include - ALL networks including the public and private interfaces. Can be C(id) - or C(label). - default: - - public - - private - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - user_data: - type: str - description: - - Data to be uploaded to the servers config drive. This option implies - I(config_drive). Can be a file path or a string - wait: - description: - - wait for the instance to be in state 'running' before returning - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: - - "Jesse Keating (@omgjlk)" - - "Matt Martz (@sivel)" -notes: - - I(exact_count) can be "destructive" if the number of running servers in - the I(group) is larger than that specified in I(count). In such a case, the - I(state) is effectively set to C(absent) and the extra servers are deleted. - In the case of deletion, the returned data structure will have C(action) - set to C(delete), and the oldest servers in the group will be deleted. -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a Cloud Server - gather_facts: False - tasks: - - name: Server build request - local_action: - module: rax - credentials: ~/.raxpub - name: rax-test1 - flavor: 5 - image: b11d9567-e412-4255-96b9-bd63ab23bcfe - key_name: my_rackspace_key - files: - /root/test.txt: /home/localuser/test.txt - wait: yes - state: present - networks: - - private - - public - register: rax - -- name: Build an exact count of cloud servers with incremented names - hosts: local - gather_facts: False - tasks: - - name: Server build requests - local_action: - module: rax - credentials: ~/.raxpub - name: test%03d.example.org - flavor: performance1-1 - image: ubuntu-1204-lts-precise-pangolin - state: present - count: 10 - count_offset: 10 - exact_count: yes - group: test - wait: yes - register: rax -''' - -import json -import os -import re -import time - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (FINAL_STATUSES, rax_argument_spec, rax_find_bootable_volume, - rax_find_image, rax_find_network, rax_find_volume, - rax_required_together, rax_to_dict, setup_rax_module) -from ansible.module_utils.six.moves import xrange -from ansible.module_utils.six import string_types - - -def rax_find_server_image(module, server, image, boot_volume): - if not image and boot_volume: - vol = rax_find_bootable_volume(module, pyrax, server, - exit=False) - if not vol: - return None - volume_image_metadata = vol.volume_image_metadata - vol_image_id = volume_image_metadata.get('image_id') - if vol_image_id: - server_image = rax_find_image(module, pyrax, - vol_image_id, exit=False) - if server_image: - server.image = dict(id=server_image) - - # Match image IDs taking care of boot from volume - if image and not server.image: - vol = rax_find_bootable_volume(module, pyrax, server) - volume_image_metadata = vol.volume_image_metadata - vol_image_id = volume_image_metadata.get('image_id') - if not vol_image_id: - return None - server_image = rax_find_image(module, pyrax, - vol_image_id, exit=False) - if image != server_image: - return None - - server.image = dict(id=server_image) - elif image and server.image['id'] != image: - return None - - return server.image - - -def create(module, names=None, flavor=None, image=None, meta=None, key_name=None, - files=None, wait=True, wait_timeout=300, disk_config=None, - group=None, nics=None, extra_create_args=None, user_data=None, - config_drive=False, existing=None, block_device_mapping_v2=None): - names = [] if names is None else names - meta = {} if meta is None else meta - files = {} if files is None else files - nics = [] if nics is None else nics - extra_create_args = {} if extra_create_args is None else extra_create_args - existing = [] if existing is None else existing - block_device_mapping_v2 = [] if block_device_mapping_v2 is None else block_device_mapping_v2 - - cs = pyrax.cloudservers - changed = False - - if user_data: - config_drive = True - - if user_data and os.path.isfile(os.path.expanduser(user_data)): - try: - user_data = os.path.expanduser(user_data) - f = open(user_data) - user_data = f.read() - f.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % user_data) - - # Handle the file contents - for rpath in files.keys(): - lpath = os.path.expanduser(files[rpath]) - try: - fileobj = open(lpath, 'r') - files[rpath] = fileobj.read() - fileobj.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % lpath) - try: - servers = [] - bdmv2 = block_device_mapping_v2 - for name in names: - servers.append(cs.servers.create(name=name, image=image, - flavor=flavor, meta=meta, - key_name=key_name, - files=files, nics=nics, - disk_config=disk_config, - config_drive=config_drive, - userdata=user_data, - block_device_mapping_v2=bdmv2, - **extra_create_args)) - except Exception as e: - if e.message: - msg = str(e.message) - else: - msg = repr(e) - module.fail_json(msg=msg) - else: - changed = True - - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - for server in servers: - try: - server.get() - except Exception: - server.status = 'ERROR' - - if not filter(lambda s: s.status not in FINAL_STATUSES, - servers): - break - time.sleep(5) - - success = [] - error = [] - timeout = [] - for server in servers: - try: - server.get() - except Exception: - server.status = 'ERROR' - instance = rax_to_dict(server, 'server') - if server.status == 'ACTIVE' or not wait: - success.append(instance) - elif server.status == 'ERROR': - error.append(instance) - elif wait: - timeout.append(instance) - - untouched = [rax_to_dict(s, 'server') for s in existing] - instances = success + untouched - - results = { - 'changed': changed, - 'action': 'create', - 'instances': instances, - 'success': success, - 'error': error, - 'timeout': timeout, - 'instance_ids': { - 'instances': [i['id'] for i in instances], - 'success': [i['id'] for i in success], - 'error': [i['id'] for i in error], - 'timeout': [i['id'] for i in timeout] - } - } - - if timeout: - results['msg'] = 'Timeout waiting for all servers to build' - elif error: - results['msg'] = 'Failed to build all servers' - - if 'msg' in results: - module.fail_json(**results) - else: - module.exit_json(**results) - - -def delete(module, instance_ids=None, wait=True, wait_timeout=300, kept=None): - instance_ids = [] if instance_ids is None else instance_ids - kept = [] if kept is None else kept - - cs = pyrax.cloudservers - - changed = False - instances = {} - servers = [] - - for instance_id in instance_ids: - servers.append(cs.servers.get(instance_id)) - - for server in servers: - try: - server.delete() - except Exception as e: - module.fail_json(msg=e.message) - else: - changed = True - - instance = rax_to_dict(server, 'server') - instances[instance['id']] = instance - - # If requested, wait for server deletion - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - for server in servers: - instance_id = server.id - try: - server.get() - except Exception: - instances[instance_id]['status'] = 'DELETED' - instances[instance_id]['rax_status'] = 'DELETED' - - if not filter(lambda s: s['status'] not in ('', 'DELETED', - 'ERROR'), - instances.values()): - break - - time.sleep(5) - - timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'), - instances.values()) - error = filter(lambda s: s['status'] in ('ERROR'), - instances.values()) - success = filter(lambda s: s['status'] in ('', 'DELETED'), - instances.values()) - - instances = [rax_to_dict(s, 'server') for s in kept] - - results = { - 'changed': changed, - 'action': 'delete', - 'instances': instances, - 'success': success, - 'error': error, - 'timeout': timeout, - 'instance_ids': { - 'instances': [i['id'] for i in instances], - 'success': [i['id'] for i in success], - 'error': [i['id'] for i in error], - 'timeout': [i['id'] for i in timeout] - } - } - - if timeout: - results['msg'] = 'Timeout waiting for all servers to delete' - elif error: - results['msg'] = 'Failed to delete all servers' - - if 'msg' in results: - module.fail_json(**results) - else: - module.exit_json(**results) - - -def cloudservers(module, state=None, name=None, flavor=None, image=None, - meta=None, key_name=None, files=None, wait=True, wait_timeout=300, - disk_config=None, count=1, group=None, instance_ids=None, - exact_count=False, networks=None, count_offset=0, - auto_increment=False, extra_create_args=None, user_data=None, - config_drive=False, boot_from_volume=False, - boot_volume=None, boot_volume_size=None, - boot_volume_terminate=False): - meta = {} if meta is None else meta - files = {} if files is None else files - instance_ids = [] if instance_ids is None else instance_ids - networks = [] if networks is None else networks - extra_create_args = {} if extra_create_args is None else extra_create_args - - cs = pyrax.cloudservers - cnw = pyrax.cloud_networks - if not cnw: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present' or (state == 'absent' and instance_ids is None): - if not boot_from_volume and not boot_volume and not image: - module.fail_json(msg='image is required for the "rax" module') - - for arg, value in dict(name=name, flavor=flavor).items(): - if not value: - module.fail_json(msg='%s is required for the "rax" module' % - arg) - - if boot_from_volume and not image and not boot_volume: - module.fail_json(msg='image or boot_volume are required for the ' - '"rax" with boot_from_volume') - - if boot_from_volume and image and not boot_volume_size: - module.fail_json(msg='boot_volume_size is required for the "rax" ' - 'module with boot_from_volume and image') - - if boot_from_volume and image and boot_volume: - image = None - - servers = [] - - # Add the group meta key - if group and 'group' not in meta: - meta['group'] = group - elif 'group' in meta and group is None: - group = meta['group'] - - # Normalize and ensure all metadata values are strings - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, string_types): - meta[k] = '%s' % v - - # When using state=absent with group, the absent block won't match the - # names properly. Use the exact_count functionality to decrease the count - # to the desired level - was_absent = False - if group is not None and state == 'absent': - exact_count = True - state = 'present' - was_absent = True - - if image: - image = rax_find_image(module, pyrax, image) - - nics = [] - if networks: - for network in networks: - nics.extend(rax_find_network(module, pyrax, network)) - - # act on the state - if state == 'present': - # Idempotent ensurance of a specific count of servers - if exact_count is not False: - # See if we can find servers that match our options - if group is None: - module.fail_json(msg='"group" must be provided when using ' - '"exact_count"') - - if auto_increment: - numbers = set() - - # See if the name is a printf like string, if not append - # %d to the end - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message) - - # regex pattern to match printf formatting - pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) - for server in cs.servers.list(): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, count_offset + count) - available_numbers = list(set(number_range) - .difference(numbers)) - else: # Not auto incrementing - for server in cs.servers.list(): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - if server.metadata.get('group') == group: - servers.append(server) - # available_numbers not needed here, we inspect auto_increment - # again later - - # If state was absent but the count was changed, - # assume we only wanted to remove that number of instances - if was_absent: - diff = len(servers) - count - if diff < 0: - count = 0 - else: - count = diff - - if len(servers) > count: - # We have more servers than we need, set state='absent' - # and delete the extras, this should delete the oldest - state = 'absent' - kept = servers[:count] - del servers[:count] - instance_ids = [] - for server in servers: - instance_ids.append(server.id) - delete(module, instance_ids=instance_ids, wait=wait, - wait_timeout=wait_timeout, kept=kept) - elif len(servers) < count: - # we have fewer servers than we need - if auto_increment: - # auto incrementing server numbers - names = [] - name_slice = count - len(servers) - numbers_to_use = available_numbers[:name_slice] - for number in numbers_to_use: - names.append(name % number) - else: - # We are not auto incrementing server numbers, - # create a list of 'name' that matches how many we need - names = [name] * (count - len(servers)) - else: - # we have the right number of servers, just return info - # about all of the matched servers - instances = [] - instance_ids = [] - for server in servers: - instances.append(rax_to_dict(server, 'server')) - instance_ids.append(server.id) - module.exit_json(changed=False, action=None, - instances=instances, - success=[], error=[], timeout=[], - instance_ids={'instances': instance_ids, - 'success': [], 'error': [], - 'timeout': []}) - else: # not called with exact_count=True - if group is not None: - if auto_increment: - # we are auto incrementing server numbers, but not with - # exact_count - numbers = set() - - # See if the name is a printf like string, if not append - # %d to the end - try: - name % 0 - except TypeError as e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message) - - # regex pattern to match printf formatting - pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) - for server in cs.servers.list(): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, - count_offset + count + len(numbers)) - available_numbers = list(set(number_range) - .difference(numbers)) - names = [] - numbers_to_use = available_numbers[:count] - for number in numbers_to_use: - names.append(name % number) - else: - # Not auto incrementing - names = [name] * count - else: - # No group was specified, and not using exact_count - # Perform more simplistic matching - search_opts = { - 'name': '^%s$' % name, - 'flavor': flavor - } - servers = [] - for server in cs.servers.list(search_opts=search_opts): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - - if not rax_find_server_image(module, server, image, - boot_volume): - continue - - # Ignore servers with non matching metadata - if server.metadata != meta: - continue - servers.append(server) - - if len(servers) >= count: - # We have more servers than were requested, don't do - # anything. Not running with exact_count=True, so we assume - # more is OK - instances = [] - for server in servers: - instances.append(rax_to_dict(server, 'server')) - - instance_ids = [i['id'] for i in instances] - module.exit_json(changed=False, action=None, - instances=instances, success=[], error=[], - timeout=[], - instance_ids={'instances': instance_ids, - 'success': [], 'error': [], - 'timeout': []}) - - # We need more servers to reach out target, create names for - # them, we aren't performing auto_increment here - names = [name] * (count - len(servers)) - - block_device_mapping_v2 = [] - if boot_from_volume: - mapping = { - 'boot_index': '0', - 'delete_on_termination': boot_volume_terminate, - 'destination_type': 'volume', - } - if image: - mapping.update({ - 'uuid': image, - 'source_type': 'image', - 'volume_size': boot_volume_size, - }) - image = None - elif boot_volume: - volume = rax_find_volume(module, pyrax, boot_volume) - mapping.update({ - 'uuid': pyrax.utils.get_id(volume), - 'source_type': 'volume', - }) - block_device_mapping_v2.append(mapping) - - create(module, names=names, flavor=flavor, image=image, - meta=meta, key_name=key_name, files=files, wait=wait, - wait_timeout=wait_timeout, disk_config=disk_config, group=group, - nics=nics, extra_create_args=extra_create_args, - user_data=user_data, config_drive=config_drive, - existing=servers, - block_device_mapping_v2=block_device_mapping_v2) - - elif state == 'absent': - if instance_ids is None: - # We weren't given an explicit list of server IDs to delete - # Let's match instead - search_opts = { - 'name': '^%s$' % name, - 'flavor': flavor - } - for server in cs.servers.list(search_opts=search_opts): - # Ignore DELETED servers - if server.status == 'DELETED': - continue - - if not rax_find_server_image(module, server, image, - boot_volume): - continue - - # Ignore servers with non matching metadata - if meta != server.metadata: - continue - - servers.append(server) - - # Build a list of server IDs to delete - instance_ids = [] - for server in servers: - if len(instance_ids) < count: - instance_ids.append(server.id) - else: - break - - if not instance_ids: - # No server IDs were matched for deletion, or no IDs were - # explicitly provided, just exit and don't do anything - module.exit_json(changed=False, action=None, instances=[], - success=[], error=[], timeout=[], - instance_ids={'instances': [], - 'success': [], 'error': [], - 'timeout': []}) - - delete(module, instance_ids=instance_ids, wait=wait, - wait_timeout=wait_timeout) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - auto_increment=dict(default=True, type='bool'), - boot_from_volume=dict(default=False, type='bool'), - boot_volume=dict(type='str'), - boot_volume_size=dict(type='int', default=100), - boot_volume_terminate=dict(type='bool', default=False), - config_drive=dict(default=False, type='bool'), - count=dict(default=1, type='int'), - count_offset=dict(default=1, type='int'), - disk_config=dict(choices=['auto', 'manual']), - exact_count=dict(default=False, type='bool'), - extra_client_args=dict(type='dict', default={}), - extra_create_args=dict(type='dict', default={}), - files=dict(type='dict', default={}), - flavor=dict(), - group=dict(), - image=dict(), - instance_ids=dict(type='list', elements='str'), - key_name=dict(aliases=['keypair']), - meta=dict(type='dict', default={}), - name=dict(), - networks=dict(type='list', elements='str', default=['public', 'private']), - state=dict(default='present', choices=['present', 'absent']), - user_data=dict(no_log=True), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=300, type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - auto_increment = module.params.get('auto_increment') - boot_from_volume = module.params.get('boot_from_volume') - boot_volume = module.params.get('boot_volume') - boot_volume_size = module.params.get('boot_volume_size') - boot_volume_terminate = module.params.get('boot_volume_terminate') - config_drive = module.params.get('config_drive') - count = module.params.get('count') - count_offset = module.params.get('count_offset') - disk_config = module.params.get('disk_config') - if disk_config: - disk_config = disk_config.upper() - exact_count = module.params.get('exact_count', False) - extra_client_args = module.params.get('extra_client_args') - extra_create_args = module.params.get('extra_create_args') - files = module.params.get('files') - flavor = module.params.get('flavor') - group = module.params.get('group') - image = module.params.get('image') - instance_ids = module.params.get('instance_ids') - key_name = module.params.get('key_name') - meta = module.params.get('meta') - name = module.params.get('name') - networks = module.params.get('networks') - state = module.params.get('state') - user_data = module.params.get('user_data') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - setup_rax_module(module, pyrax) - - if extra_client_args: - pyrax.cloudservers = pyrax.connect_to_cloudservers( - region=pyrax.cloudservers.client.region_name, - **extra_client_args) - client = pyrax.cloudservers.client - if 'bypass_url' in extra_client_args: - client.management_url = extra_client_args['bypass_url'] - - if pyrax.cloudservers is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - cloudservers(module, state=state, name=name, flavor=flavor, - image=image, meta=meta, key_name=key_name, files=files, - wait=wait, wait_timeout=wait_timeout, disk_config=disk_config, - count=count, group=group, instance_ids=instance_ids, - exact_count=exact_count, networks=networks, - count_offset=count_offset, auto_increment=auto_increment, - extra_create_args=extra_create_args, user_data=user_data, - config_drive=config_drive, boot_from_volume=boot_from_volume, - boot_volume=boot_volume, boot_volume_size=boot_volume_size, - boot_volume_terminate=boot_volume_terminate) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_cbs.py b/plugins/modules/cloud/rackspace/rax_cbs.py deleted file mode 100644 index abfda419ed..0000000000 --- a/plugins/modules/cloud/rackspace/rax_cbs.py +++ /dev/null @@ -1,227 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cbs -short_description: Manipulate Rackspace Cloud Block Storage Volumes -description: - - Manipulate Rackspace Cloud Block Storage Volumes -options: - description: - type: str - description: - - Description to give the volume being created - image: - type: str - description: - - image to use for bootable volumes. Can be an C(id), C(human_id) or - C(name). This option requires C(pyrax>=1.9.3) - meta: - type: dict - description: - - A hash of metadata to associate with the volume - name: - type: str - description: - - Name to give the volume being created - required: true - size: - type: int - description: - - Size of the volume to create in Gigabytes - default: 100 - snapshot_id: - type: str - description: - - The id of the snapshot to create the volume from - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - volume_type: - type: str - description: - - Type of the volume being created - choices: - - SATA - - SSD - default: SATA - wait: - description: - - wait for the volume to be in state 'available' before returning - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a Block Storage Volume - gather_facts: False - hosts: local - connection: local - tasks: - - name: Storage volume create request - local_action: - module: rax_cbs - credentials: ~/.raxpub - name: my-volume - description: My Volume - volume_type: SSD - size: 150 - region: DFW - wait: yes - state: present - meta: - app: my-cool-app - register: my_volume -''' - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (VOLUME_STATUS, rax_argument_spec, rax_find_image, rax_find_volume, - rax_required_together, rax_to_dict, setup_rax_module) - - -def cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout, - image): - changed = False - volume = None - instance = {} - - cbs = pyrax.cloud_blockstorage - - if cbs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if image: - # pyrax<1.9.3 did not have support for specifying an image when - # creating a volume which is required for bootable volumes - if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'): - module.fail_json(msg='Creating a bootable volume requires ' - 'pyrax>=1.9.3') - image = rax_find_image(module, pyrax, image) - - volume = rax_find_volume(module, pyrax, name) - - if state == 'present': - if not volume: - kwargs = dict() - if image: - kwargs['image'] = image - try: - volume = cbs.create(name, size=size, volume_type=volume_type, - description=description, - metadata=meta, - snapshot_id=snapshot_id, **kwargs) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - if wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_for_build(volume, interval=5, - attempts=attempts) - - volume.get() - instance = rax_to_dict(volume) - - result = dict(changed=changed, volume=instance) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - elif wait and volume.status not in VOLUME_STATUS: - result['msg'] = 'Timeout waiting on %s' % volume.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - if volume: - instance = rax_to_dict(volume) - try: - volume.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, volume=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - description=dict(type='str'), - image=dict(type='str'), - meta=dict(type='dict', default={}), - name=dict(required=True), - size=dict(type='int', default=100), - snapshot_id=dict(), - state=dict(default='present', choices=['present', 'absent']), - volume_type=dict(choices=['SSD', 'SATA'], default='SATA'), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - description = module.params.get('description') - image = module.params.get('image') - meta = module.params.get('meta') - name = module.params.get('name') - size = module.params.get('size') - snapshot_id = module.params.get('snapshot_id') - state = module.params.get('state') - volume_type = module.params.get('volume_type') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout, - image) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_cbs_attachments.py b/plugins/modules/cloud/rackspace/rax_cbs_attachments.py deleted file mode 100644 index fd21081475..0000000000 --- a/plugins/modules/cloud/rackspace/rax_cbs_attachments.py +++ /dev/null @@ -1,219 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cbs_attachments -short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments -description: - - Manipulate Rackspace Cloud Block Storage Volume Attachments -options: - device: - type: str - description: - - The device path to attach the volume to, e.g. /dev/xvde. - - Before 2.4 this was a required field. Now it can be left to null to auto assign the device name. - volume: - type: str - description: - - Name or id of the volume to attach/detach - required: true - server: - type: str - description: - - Name or id of the server to attach/detach - required: true - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - wait: - description: - - wait for the volume to be in 'in-use'/'available' state before returning - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Attach a Block Storage Volume - gather_facts: False - hosts: local - connection: local - tasks: - - name: Storage volume attach request - local_action: - module: rax_cbs_attachments - credentials: ~/.raxpub - volume: my-volume - server: my-server - device: /dev/xvdd - region: DFW - wait: yes - state: present - register: my_volume -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (NON_CALLABLES, - rax_argument_spec, - rax_find_server, - rax_find_volume, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def cloud_block_storage_attachments(module, state, volume, server, device, - wait, wait_timeout): - cbs = pyrax.cloud_blockstorage - cs = pyrax.cloudservers - - if cbs is None or cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - changed = False - instance = {} - - volume = rax_find_volume(module, pyrax, volume) - - if not volume: - module.fail_json(msg='No matching storage volumes were found') - - if state == 'present': - server = rax_find_server(module, pyrax, server) - - if (volume.attachments and - volume.attachments[0]['server_id'] == server.id): - changed = False - elif volume.attachments: - module.fail_json(msg='Volume is attached to another server') - else: - try: - volume.attach_to_instance(server, mountpoint=device) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - volume.get() - - for key, value in vars(volume).items(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value - - result = dict(changed=changed) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - elif wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_until(volume, 'status', 'in-use', - interval=5, attempts=attempts) - - volume.get() - result['volume'] = rax_to_dict(volume) - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - server = rax_find_server(module, pyrax, server) - - if (volume.attachments and - volume.attachments[0]['server_id'] == server.id): - try: - volume.detach() - if wait: - pyrax.utils.wait_until(volume, 'status', 'available', - interval=3, attempts=0, - verbose=False) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - volume.get() - changed = True - elif volume.attachments: - module.fail_json(msg='Volume is attached to another server') - - result = dict(changed=changed, volume=rax_to_dict(volume)) - - if volume.status == 'error': - result['msg'] = '%s failed to build' % volume.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - module.exit_json(changed=changed, volume=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - device=dict(required=False), - volume=dict(required=True), - server=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - device = module.params.get('device') - volume = module.params.get('volume') - server = module.params.get('server') - state = module.params.get('state') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_block_storage_attachments(module, state, volume, server, device, - wait, wait_timeout) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_cdb.py b/plugins/modules/cloud/rackspace/rax_cdb.py deleted file mode 100644 index a9c3243281..0000000000 --- a/plugins/modules/cloud/rackspace/rax_cdb.py +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cdb -short_description: create/delete or resize a Rackspace Cloud Databases instance -description: - - creates / deletes or resize a Rackspace Cloud Databases instance - and optionally waits for it to be 'running'. The name option needs to be - unique since it's used to identify the instance. -options: - name: - type: str - description: - - Name of the databases server instance - required: yes - flavor: - type: int - description: - - flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB) - default: 1 - volume: - type: int - description: - - Volume size of the database 1-150GB - default: 2 - cdb_type: - type: str - description: - - type of instance (i.e. MySQL, MariaDB, Percona) - default: MySQL - aliases: ['type'] - cdb_version: - type: str - description: - - version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6) - - "The available choices are: C(5.1), C(5.6) and C(10)." - default: '5.6' - aliases: ['version'] - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - wait: - description: - - wait for the instance to be in state 'running' before returning - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: "Simon JAILLET (@jails)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a Cloud Databases - gather_facts: False - tasks: - - name: Server build request - local_action: - module: rax_cdb - credentials: ~/.raxpub - region: IAD - name: db-server1 - flavor: 1 - volume: 2 - cdb_type: MySQL - cdb_version: 5.6 - wait: yes - state: present - register: rax_db_server -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module - - -def find_instance(name): - - cdb = pyrax.cloud_databases - instances = cdb.list() - if instances: - for instance in instances: - if instance.name == name: - return instance - return False - - -def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait, - wait_timeout): - - for arg, value in dict(name=name, flavor=flavor, - volume=volume, type=cdb_type, version=cdb_version - ).items(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb"' - ' module' % arg) - - if not (volume >= 1 and volume <= 150): - module.fail_json(msg='volume is required to be between 1 and 150') - - cdb = pyrax.cloud_databases - - flavors = [] - for item in cdb.list_flavors(): - flavors.append(item.id) - - if not (flavor in flavors): - module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor)) - - changed = False - - instance = find_instance(name) - - if not instance: - action = 'create' - try: - instance = cdb.create(name=name, flavor=flavor, volume=volume, - type=cdb_type, version=cdb_version) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - else: - action = None - - if instance.volume.size != volume: - action = 'resize' - if instance.volume.size > volume: - module.fail_json(changed=False, action=action, - msg='The new volume size must be larger than ' - 'the current volume size', - cdb=rax_to_dict(instance)) - instance.resize_volume(volume) - changed = True - - if int(instance.flavor.id) != flavor: - action = 'resize' - pyrax.utils.wait_until(instance, 'status', 'ACTIVE', - attempts=wait_timeout) - instance.resize(flavor) - changed = True - - if wait: - pyrax.utils.wait_until(instance, 'status', 'ACTIVE', - attempts=wait_timeout) - - if wait and instance.status != 'ACTIVE': - module.fail_json(changed=changed, action=action, - cdb=rax_to_dict(instance), - msg='Timeout waiting for "%s" databases instance to ' - 'be created' % name) - - module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance)) - - -def delete_instance(module, name, wait, wait_timeout): - - if not name: - module.fail_json(msg='name is required for the "rax_cdb" module') - - changed = False - - instance = find_instance(name) - if not instance: - module.exit_json(changed=False, action='delete') - - try: - instance.delete() - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - if wait: - pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN', - attempts=wait_timeout) - - if wait and instance.status != 'SHUTDOWN': - module.fail_json(changed=changed, action='delete', - cdb=rax_to_dict(instance), - msg='Timeout waiting for "%s" databases instance to ' - 'be deleted' % name) - - module.exit_json(changed=changed, action='delete', - cdb=rax_to_dict(instance)) - - -def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, - wait_timeout): - - # act on the state - if state == 'present': - save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait, - wait_timeout) - elif state == 'absent': - delete_instance(module, name, wait, wait_timeout) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(type='str', required=True), - flavor=dict(type='int', default=1), - volume=dict(type='int', default=2), - cdb_type=dict(type='str', default='MySQL', aliases=['type']), - cdb_version=dict(type='str', default='5.6', aliases=['version']), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - flavor = module.params.get('flavor') - volume = module.params.get('volume') - cdb_type = module.params.get('cdb_type') - cdb_version = module.params.get('cdb_version') - state = module.params.get('state') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_cdb_database.py b/plugins/modules/cloud/rackspace/rax_cdb_database.py deleted file mode 100644 index 86cd1aac40..0000000000 --- a/plugins/modules/cloud/rackspace/rax_cdb_database.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: rax_cdb_database -short_description: 'create / delete a database in the Cloud Databases' -description: - - create / delete a database in the Cloud Databases. -options: - cdb_id: - type: str - description: - - The databases server UUID - required: yes - name: - type: str - description: - - Name to give to the database - required: yes - character_set: - type: str - description: - - Set of symbols and encodings - default: 'utf8' - collate: - type: str - description: - - Set of rules for comparing characters in a character set - default: 'utf8_general_ci' - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -author: "Simon JAILLET (@jails)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a database in Cloud Databases - tasks: - - name: Database build request - local_action: - module: rax_cdb_database - credentials: ~/.raxpub - region: IAD - cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 - name: db1 - state: present - register: rax_db_database -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module - - -def find_database(instance, name): - try: - database = instance.get_database(name) - except Exception: - return False - - return database - - -def save_database(module, cdb_id, name, character_set, collate): - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - database = find_database(instance, name) - - if not database: - try: - database = instance.create_database(name=name, - character_set=character_set, - collate=collate) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='create', - database=rax_to_dict(database)) - - -def delete_database(module, cdb_id, name): - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - database = find_database(instance, name) - - if database: - try: - database.delete() - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='delete', - database=rax_to_dict(database)) - - -def rax_cdb_database(module, state, cdb_id, name, character_set, collate): - - # act on the state - if state == 'present': - save_database(module, cdb_id, name, character_set, collate) - elif state == 'absent': - delete_database(module, cdb_id, name) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - cdb_id=dict(type='str', required=True), - name=dict(type='str', required=True), - character_set=dict(type='str', default='utf8'), - collate=dict(type='str', default='utf8_general_ci'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - cdb_id = module.params.get('cdb_id') - name = module.params.get('name') - character_set = module.params.get('character_set') - collate = module.params.get('collate') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - rax_cdb_database(module, state, cdb_id, name, character_set, collate) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_cdb_user.py b/plugins/modules/cloud/rackspace/rax_cdb_user.py deleted file mode 100644 index 674f17c070..0000000000 --- a/plugins/modules/cloud/rackspace/rax_cdb_user.py +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_cdb_user -short_description: create / delete a Rackspace Cloud Database -description: - - create / delete a database in the Cloud Databases. -options: - cdb_id: - type: str - description: - - The databases server UUID - required: yes - db_username: - type: str - description: - - Name of the database user - required: yes - db_password: - type: str - description: - - Database user password - required: yes - databases: - type: list - elements: str - description: - - Name of the databases that the user can access - default: [] - host: - type: str - description: - - Specifies the host from which a user is allowed to connect to - the database. Possible values are a string containing an IPv4 address - or "%" to allow connecting from any host - default: '%' - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present -author: "Simon JAILLET (@jails)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a user in Cloud Databases - tasks: - - name: User build request - local_action: - module: rax_cdb_user - credentials: ~/.raxpub - region: IAD - cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66 - db_username: user1 - db_password: user1 - databases: ['db1'] - state: present - register: rax_db_user -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_text -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module - - -def find_user(instance, name): - try: - user = instance.get_user(name) - except Exception: - return False - - return user - - -def save_user(module, cdb_id, name, password, databases, host): - - for arg, value in dict(cdb_id=cdb_id, name=name).items(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_user" ' - 'module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - user = find_user(instance, name) - - if not user: - action = 'create' - try: - user = instance.create_user(name=name, - password=password, - database_names=databases, - host=host) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - else: - action = 'update' - - if user.host != host: - changed = True - - user.update(password=password, host=host) - - former_dbs = set([item.name for item in user.list_user_access()]) - databases = set(databases) - - if databases != former_dbs: - try: - revoke_dbs = [db for db in former_dbs if db not in databases] - user.revoke_user_access(db_names=revoke_dbs) - - new_dbs = [db for db in databases if db not in former_dbs] - user.grant_user_access(db_names=new_dbs) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action=action, user=rax_to_dict(user)) - - -def delete_user(module, cdb_id, name): - - for arg, value in dict(cdb_id=cdb_id, name=name).items(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_user"' - ' module' % arg) - - cdb = pyrax.cloud_databases - - try: - instance = cdb.get(cdb_id) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - changed = False - - user = find_user(instance, name) - - if user: - try: - user.delete() - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - changed = True - - module.exit_json(changed=changed, action='delete') - - -def rax_cdb_user(module, state, cdb_id, name, password, databases, host): - - # act on the state - if state == 'present': - save_user(module, cdb_id, name, password, databases, host) - elif state == 'absent': - delete_user(module, cdb_id, name) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - cdb_id=dict(type='str', required=True), - db_username=dict(type='str', required=True), - db_password=dict(type='str', required=True, no_log=True), - databases=dict(type='list', elements='str', default=[]), - host=dict(type='str', default='%'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - cdb_id = module.params.get('cdb_id') - name = module.params.get('db_username') - password = module.params.get('db_password') - databases = module.params.get('databases') - host = to_text(module.params.get('host'), errors='surrogate_or_strict') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - rax_cdb_user(module, state, cdb_id, name, password, databases, host) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_clb.py b/plugins/modules/cloud/rackspace/rax_clb.py deleted file mode 100644 index 9160133e21..0000000000 --- a/plugins/modules/cloud/rackspace/rax_clb.py +++ /dev/null @@ -1,312 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_clb -short_description: create / delete a load balancer in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud load balancer. -options: - algorithm: - type: str - description: - - algorithm for the balancer being created - choices: - - RANDOM - - LEAST_CONNECTIONS - - ROUND_ROBIN - - WEIGHTED_LEAST_CONNECTIONS - - WEIGHTED_ROUND_ROBIN - default: LEAST_CONNECTIONS - meta: - type: dict - description: - - A hash of metadata to associate with the instance - name: - type: str - description: - - Name to give the load balancer - required: yes - port: - type: int - description: - - Port for the balancer being created - default: 80 - protocol: - type: str - description: - - Protocol for the balancer being created - choices: - - DNS_TCP - - DNS_UDP - - FTP - - HTTP - - HTTPS - - IMAPS - - IMAPv4 - - LDAP - - LDAPS - - MYSQL - - POP3 - - POP3S - - SMTP - - TCP - - TCP_CLIENT_FIRST - - UDP - - UDP_STREAM - - SFTP - default: HTTP - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - timeout: - type: int - description: - - timeout for communication between the balancer and the node - default: 30 - type: - type: str - description: - - type of interface for the balancer being created - choices: - - PUBLIC - - SERVICENET - default: PUBLIC - vip_id: - type: str - description: - - Virtual IP ID to use when creating the load balancer for purposes of - sharing an IP with another load balancer of another protocol - wait: - description: - - wait for the balancer to be in state 'running' before returning - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a Load Balancer - gather_facts: False - hosts: local - connection: local - tasks: - - name: Load Balancer create request - local_action: - module: rax_clb - credentials: ~/.raxpub - name: my-lb - port: 8080 - protocol: HTTP - type: SERVICENET - timeout: 30 - region: DFW - wait: yes - state: present - meta: - app: my-cool-app - register: my_lb -''' - - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (CLB_ALGORITHMS, - CLB_PROTOCOLS, - rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, - vip_type, timeout, wait, wait_timeout, vip_id): - if int(timeout) < 30: - module.fail_json(msg='"timeout" must be greater than or equal to 30') - - changed = False - balancers = [] - - clb = pyrax.cloud_loadbalancers - if not clb: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - balancer_list = clb.list() - while balancer_list: - retrieved = clb.list(marker=balancer_list.pop().id) - balancer_list.extend(retrieved) - if len(retrieved) < 2: - break - - for balancer in balancer_list: - if name != balancer.name and name != balancer.id: - continue - - balancers.append(balancer) - - if len(balancers) > 1: - module.fail_json(msg='Multiple Load Balancers were matched by name, ' - 'try using the Load Balancer ID instead') - - if state == 'present': - if isinstance(meta, dict): - metadata = [dict(key=k, value=v) for k, v in meta.items()] - - if not balancers: - try: - virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)] - balancer = clb.create(name, metadata=metadata, port=port, - algorithm=algorithm, protocol=protocol, - timeout=timeout, virtual_ips=virtual_ips) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - balancer = balancers[0] - setattr(balancer, 'metadata', - [dict(key=k, value=v) for k, v in - balancer.get_metadata().items()]) - atts = { - 'name': name, - 'algorithm': algorithm, - 'port': port, - 'protocol': protocol, - 'timeout': timeout - } - for att, value in atts.items(): - current = getattr(balancer, att) - if current != value: - changed = True - - if changed: - balancer.update(**atts) - - if balancer.metadata != metadata: - balancer.set_metadata(meta) - changed = True - - virtual_ips = [clb.VirtualIP(type=vip_type)] - current_vip_types = set([v.type for v in balancer.virtual_ips]) - vip_types = set([v.type for v in virtual_ips]) - if current_vip_types != vip_types: - module.fail_json(msg='Load balancer Virtual IP type cannot ' - 'be changed') - - if wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) - - balancer.get() - instance = rax_to_dict(balancer, 'clb') - - result = dict(changed=changed, balancer=instance) - - if balancer.status == 'ERROR': - result['msg'] = '%s failed to build' % balancer.id - elif wait and balancer.status not in ('ACTIVE', 'ERROR'): - result['msg'] = 'Timeout waiting on %s' % balancer.id - - if 'msg' in result: - module.fail_json(**result) - else: - module.exit_json(**result) - - elif state == 'absent': - if balancers: - balancer = balancers[0] - try: - balancer.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - instance = rax_to_dict(balancer, 'clb') - - if wait: - attempts = wait_timeout // 5 - pyrax.utils.wait_until(balancer, 'status', ('DELETED'), - interval=5, attempts=attempts) - else: - instance = {} - - module.exit_json(changed=changed, balancer=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - algorithm=dict(choices=CLB_ALGORITHMS, - default='LEAST_CONNECTIONS'), - meta=dict(type='dict', default={}), - name=dict(required=True), - port=dict(type='int', default=80), - protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'), - state=dict(default='present', choices=['present', 'absent']), - timeout=dict(type='int', default=30), - type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'), - vip_id=dict(), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - algorithm = module.params.get('algorithm') - meta = module.params.get('meta') - name = module.params.get('name') - port = module.params.get('port') - protocol = module.params.get('protocol') - state = module.params.get('state') - timeout = int(module.params.get('timeout')) - vip_id = module.params.get('vip_id') - vip_type = module.params.get('type') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - - setup_rax_module(module, pyrax) - - cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, - vip_type, timeout, wait, wait_timeout, vip_id) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_clb_nodes.py b/plugins/modules/cloud/rackspace/rax_clb_nodes.py deleted file mode 100644 index 4adcc66fb7..0000000000 --- a/plugins/modules/cloud/rackspace/rax_clb_nodes.py +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_clb_nodes -short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer -description: - - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer -options: - address: - type: str - required: false - description: - - IP address or domain name of the node - condition: - type: str - required: false - choices: - - enabled - - disabled - - draining - description: - - Condition for the node, which determines its role within the load - balancer - load_balancer_id: - type: int - required: true - description: - - Load balancer id - node_id: - type: int - required: false - description: - - Node id - port: - type: int - required: false - description: - - Port number of the load balanced service on the node - state: - type: str - required: false - default: "present" - choices: - - present - - absent - description: - - Indicate desired state of the node - type: - type: str - required: false - choices: - - primary - - secondary - description: - - Type of node - wait: - required: false - default: "no" - type: bool - description: - - Wait for the load balancer to become active before returning - wait_timeout: - type: int - required: false - default: 30 - description: - - How long to wait before giving up and returning an error - weight: - type: int - required: false - description: - - Weight of node - virtualenv: - type: path - description: - - Virtualenv to execute this module in -author: "Lukasz Kawczynski (@neuroid)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Add a new node to the load balancer - local_action: - module: rax_clb_nodes - load_balancer_id: 71 - address: 10.2.2.3 - port: 80 - condition: enabled - type: primary - wait: yes - credentials: /path/to/credentials - -- name: Drain connections from a node - local_action: - module: rax_clb_nodes - load_balancer_id: 71 - node_id: 410 - condition: draining - wait: yes - credentials: /path/to/credentials - -- name: Remove a node from the load balancer - local_action: - module: rax_clb_nodes - load_balancer_id: 71 - node_id: 410 - state: absent - wait: yes - credentials: /path/to/credentials -''' - -import os - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_clb_node_to_dict, rax_required_together, setup_rax_module - - -def _activate_virtualenv(path): - activate_this = os.path.join(path, 'bin', 'activate_this.py') - with open(activate_this) as f: - code = compile(f.read(), activate_this, 'exec') - exec(code) - - -def _get_node(lb, node_id=None, address=None, port=None): - """Return a matching node""" - for node in getattr(lb, 'nodes', []): - match_list = [] - if node_id is not None: - match_list.append(getattr(node, 'id', None) == node_id) - if address is not None: - match_list.append(getattr(node, 'address', None) == address) - if port is not None: - match_list.append(getattr(node, 'port', None) == port) - - if match_list and all(match_list): - return node - - return None - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - condition=dict(choices=['enabled', 'disabled', 'draining']), - load_balancer_id=dict(required=True, type='int'), - node_id=dict(type='int'), - port=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']), - type=dict(choices=['primary', 'secondary']), - virtualenv=dict(type='path'), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=30, type='int'), - weight=dict(type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params['address'] - condition = (module.params['condition'] and - module.params['condition'].upper()) - load_balancer_id = module.params['load_balancer_id'] - node_id = module.params['node_id'] - port = module.params['port'] - state = module.params['state'] - typ = module.params['type'] and module.params['type'].upper() - virtualenv = module.params['virtualenv'] - wait = module.params['wait'] - wait_timeout = module.params['wait_timeout'] or 1 - weight = module.params['weight'] - - if virtualenv: - try: - _activate_virtualenv(virtualenv) - except IOError as e: - module.fail_json(msg='Failed to activate virtualenv %s (%s)' % ( - virtualenv, e)) - - setup_rax_module(module, pyrax) - - if not pyrax.cloud_loadbalancers: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - try: - lb = pyrax.cloud_loadbalancers.get(load_balancer_id) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - - node = _get_node(lb, node_id, address, port) - - result = rax_clb_node_to_dict(node) - - if state == 'absent': - if not node: # Removing a non-existent node - module.exit_json(changed=False, state=state) - try: - lb.delete_node(node) - result = {} - except pyrax.exc.NotFound: - module.exit_json(changed=False, state=state) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - else: # present - if not node: - if node_id: # Updating a non-existent node - msg = 'Node %d not found' % node_id - if lb.nodes: - msg += (' (available nodes: %s)' % - ', '.join([str(x.id) for x in lb.nodes])) - module.fail_json(msg=msg) - else: # Creating a new node - try: - node = pyrax.cloudloadbalancers.Node( - address=address, port=port, condition=condition, - weight=weight, type=typ) - resp, body = lb.add_nodes([node]) - result.update(body['nodes'][0]) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - else: # Updating an existing node - mutable = { - 'condition': condition, - 'type': typ, - 'weight': weight, - } - - for name, value in mutable.items(): - if value is None or value == getattr(node, name): - mutable.pop(name) - - if not mutable: - module.exit_json(changed=False, state=state, node=result) - - try: - # The diff has to be set explicitly to update node's weight and - # type; this should probably be fixed in pyrax - lb.update_node(node, diff=mutable) - result.update(mutable) - except pyrax.exc.PyraxException as e: - module.fail_json(msg='%s' % e.message) - - if wait: - pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1, - attempts=wait_timeout) - if lb.status != 'ACTIVE': - module.fail_json( - msg='Load balancer not active after %ds (current status: %s)' % - (wait_timeout, lb.status.lower())) - - kwargs = {'node': result} if result else {} - module.exit_json(changed=True, state=state, **kwargs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_clb_ssl.py b/plugins/modules/cloud/rackspace/rax_clb_ssl.py deleted file mode 100644 index adf375124d..0000000000 --- a/plugins/modules/cloud/rackspace/rax_clb_ssl.py +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: rax_clb_ssl -short_description: Manage SSL termination for a Rackspace Cloud Load Balancer. -description: -- Set up, reconfigure, or remove SSL termination for an existing load balancer. -options: - loadbalancer: - type: str - description: - - Name or ID of the load balancer on which to manage SSL termination. - required: true - state: - type: str - description: - - If set to "present", SSL termination will be added to this load balancer. - - If "absent", SSL termination will be removed instead. - choices: - - present - - absent - default: present - enabled: - description: - - If set to "false", temporarily disable SSL termination without discarding - - existing credentials. - default: true - type: bool - private_key: - type: str - description: - - The private SSL key as a string in PEM format. - certificate: - type: str - description: - - The public SSL certificates as a string in PEM format. - intermediate_certificate: - type: str - description: - - One or more intermediate certificate authorities as a string in PEM - - format, concatenated into a single string. - secure_port: - type: int - description: - - The port to listen for secure traffic. - default: 443 - secure_traffic_only: - description: - - If "true", the load balancer will *only* accept secure traffic. - default: false - type: bool - https_redirect: - description: - - If "true", the load balancer will redirect HTTP traffic to HTTPS. - - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL - - termination is also applied or removed. - type: bool - wait: - description: - - Wait for the balancer to be in state "running" before turning. - default: false - type: bool - wait_timeout: - type: int - description: - - How long before "wait" gives up, in seconds. - default: 300 -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Enable SSL termination on a load balancer - community.general.rax_clb_ssl: - loadbalancer: the_loadbalancer - state: present - private_key: "{{ lookup('file', 'credentials/server.key' ) }}" - certificate: "{{ lookup('file', 'credentials/server.crt' ) }}" - intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}" - secure_traffic_only: true - wait: true - -- name: Disable SSL termination - community.general.rax_clb_ssl: - loadbalancer: "{{ registered_lb.balancer.id }}" - state: absent - wait: true -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_find_loadbalancer, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, - certificate, intermediate_certificate, secure_port, - secure_traffic_only, https_redirect, - wait, wait_timeout): - # Validate arguments. - - if state == 'present': - if not private_key: - module.fail_json(msg="private_key must be provided.") - else: - private_key = private_key.strip() - - if not certificate: - module.fail_json(msg="certificate must be provided.") - else: - certificate = certificate.strip() - - attempts = wait_timeout // 5 - - # Locate the load balancer. - - balancer = rax_find_loadbalancer(module, pyrax, loadbalancer) - existing_ssl = balancer.get_ssl_termination() - - changed = False - - if state == 'present': - # Apply or reconfigure SSL termination on the load balancer. - ssl_attrs = dict( - securePort=secure_port, - privatekey=private_key, - certificate=certificate, - intermediateCertificate=intermediate_certificate, - enabled=enabled, - secureTrafficOnly=secure_traffic_only - ) - - needs_change = False - - if existing_ssl: - for ssl_attr, value in ssl_attrs.items(): - if ssl_attr == 'privatekey': - # The private key is not included in get_ssl_termination's - # output (as it shouldn't be). Also, if you're changing the - # private key, you'll also be changing the certificate, - # so we don't lose anything by not checking it. - continue - - if value is not None and existing_ssl.get(ssl_attr) != value: - # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr)) - needs_change = True - else: - needs_change = True - - if needs_change: - try: - balancer.add_ssl_termination(**ssl_attrs) - except pyrax.exceptions.PyraxException as e: - module.fail_json(msg='%s' % e.message) - changed = True - elif state == 'absent': - # Remove SSL termination if it's already configured. - if existing_ssl: - try: - balancer.delete_ssl_termination() - except pyrax.exceptions.PyraxException as e: - module.fail_json(msg='%s' % e.message) - changed = True - - if https_redirect is not None and balancer.httpsRedirect != https_redirect: - if changed: - # This wait is unavoidable because load balancers are immutable - # while the SSL termination changes above are being applied. - pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) - - try: - balancer.update(httpsRedirect=https_redirect) - except pyrax.exceptions.PyraxException as e: - module.fail_json(msg='%s' % e.message) - changed = True - - if changed and wait: - pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) - - balancer.get() - new_ssl_termination = balancer.get_ssl_termination() - - # Intentionally omit the private key from the module output, so you don't - # accidentally echo it with `ansible-playbook -v` or `debug`, and the - # certificate, which is just long. Convert other attributes to snake_case - # and include https_redirect at the top-level. - if new_ssl_termination: - new_ssl = dict( - enabled=new_ssl_termination['enabled'], - secure_port=new_ssl_termination['securePort'], - secure_traffic_only=new_ssl_termination['secureTrafficOnly'] - ) - else: - new_ssl = None - - result = dict( - changed=changed, - https_redirect=balancer.httpsRedirect, - ssl_termination=new_ssl, - balancer=rax_to_dict(balancer, 'clb') - ) - success = True - - if balancer.status == 'ERROR': - result['msg'] = '%s failed to build' % balancer.id - success = False - elif wait and balancer.status not in ('ACTIVE', 'ERROR'): - result['msg'] = 'Timeout waiting on %s' % balancer.id - success = False - - if success: - module.exit_json(**result) - else: - module.fail_json(**result) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update(dict( - loadbalancer=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - enabled=dict(type='bool', default=True), - private_key=dict(no_log=True), - certificate=dict(), - intermediate_certificate=dict(), - secure_port=dict(type='int', default=443), - secure_traffic_only=dict(type='bool', default=False), - https_redirect=dict(type='bool'), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300) - )) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module.') - - loadbalancer = module.params.get('loadbalancer') - state = module.params.get('state') - enabled = module.boolean(module.params.get('enabled')) - private_key = module.params.get('private_key') - certificate = module.params.get('certificate') - intermediate_certificate = module.params.get('intermediate_certificate') - secure_port = module.params.get('secure_port') - secure_traffic_only = module.boolean(module.params.get('secure_traffic_only')) - https_redirect = module.boolean(module.params.get('https_redirect')) - wait = module.boolean(module.params.get('wait')) - wait_timeout = module.params.get('wait_timeout') - - setup_rax_module(module, pyrax) - - cloud_load_balancer_ssl( - module, loadbalancer, state, enabled, private_key, certificate, - intermediate_certificate, secure_port, secure_traffic_only, - https_redirect, wait, wait_timeout - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_dns.py b/plugins/modules/cloud/rackspace/rax_dns.py deleted file mode 100644 index 915e13a9a6..0000000000 --- a/plugins/modules/cloud/rackspace/rax_dns.py +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_dns -short_description: Manage domains on Rackspace Cloud DNS -description: - - Manage domains on Rackspace Cloud DNS -options: - comment: - type: str - description: - - Brief description of the domain. Maximum length of 160 characters - email: - type: str - description: - - Email address of the domain administrator - name: - type: str - description: - - Domain name to create - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - ttl: - type: int - description: - - Time to live of domain in seconds - default: 3600 -notes: - - "It is recommended that plays utilizing this module be run with - C(serial: 1) to avoid exceeding the API request limit imposed by - the Rackspace CloudDNS API" -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Create domain - hosts: all - gather_facts: False - tasks: - - name: Domain create request - local_action: - module: rax_dns - credentials: ~/.raxpub - name: example.org - email: admin@example.org - register: rax_dns -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_dns(module, comment, email, name, state, ttl): - changed = False - - dns = pyrax.cloud_dns - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not email: - module.fail_json(msg='An "email" attribute is required for ' - 'creating a domain') - - try: - domain = dns.find(name=name) - except pyrax.exceptions.NoUniqueMatch as e: - module.fail_json(msg='%s' % e.message) - except pyrax.exceptions.NotFound: - try: - domain = dns.create(name=name, emailAddress=email, ttl=ttl, - comment=comment) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - update = {} - if comment != getattr(domain, 'comment', None): - update['comment'] = comment - if ttl != getattr(domain, 'ttl', None): - update['ttl'] = ttl - if email != getattr(domain, 'emailAddress', None): - update['emailAddress'] = email - - if update: - try: - domain.update(**update) - changed = True - domain.get() - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - domain = dns.find(name=name) - except pyrax.exceptions.NotFound: - domain = {} - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if domain: - try: - domain.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, domain=rax_to_dict(domain)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - comment=dict(), - email=dict(), - name=dict(), - state=dict(default='present', choices=['present', 'absent']), - ttl=dict(type='int', default=3600), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - comment = module.params.get('comment') - email = module.params.get('email') - name = module.params.get('name') - state = module.params.get('state') - ttl = module.params.get('ttl') - - setup_rax_module(module, pyrax, False) - - rax_dns(module, comment, email, name, state, ttl) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_dns_record.py b/plugins/modules/cloud/rackspace/rax_dns_record.py deleted file mode 100644 index 1a6986dea7..0000000000 --- a/plugins/modules/cloud/rackspace/rax_dns_record.py +++ /dev/null @@ -1,353 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_dns_record -short_description: Manage DNS records on Rackspace Cloud DNS -description: - - Manage DNS records on Rackspace Cloud DNS -options: - comment: - type: str - description: - - Brief description of the domain. Maximum length of 160 characters - data: - type: str - description: - - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for - SRV/TXT - required: True - domain: - type: str - description: - - Domain name to create the record in. This is an invalid option when - type=PTR - loadbalancer: - type: str - description: - - Load Balancer ID to create a PTR record for. Only used with type=PTR - name: - type: str - description: - - FQDN record name to create - required: True - overwrite: - description: - - Add new records if data doesn't match, instead of updating existing - record with matching name. If there are already multiple records with - matching name and overwrite=true, this module will fail. - default: true - type: bool - priority: - type: int - description: - - Required for MX and SRV records, but forbidden for other record types. - If specified, must be an integer from 0 to 65535. - server: - type: str - description: - - Server ID to create a PTR record for. Only used with type=PTR - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - ttl: - type: int - description: - - Time to live of record in seconds - default: 3600 - type: - type: str - description: - - DNS record type - choices: - - A - - AAAA - - CNAME - - MX - - NS - - SRV - - TXT - - PTR - required: true -notes: - - "It is recommended that plays utilizing this module be run with - C(serial: 1) to avoid exceeding the API request limit imposed by - the Rackspace CloudDNS API" - - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be - supplied - - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record. - - C(PTR) record support was added in version 1.7 -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Create DNS Records - hosts: all - gather_facts: False - tasks: - - name: Create A record - local_action: - module: rax_dns_record - credentials: ~/.raxpub - domain: example.org - name: www.example.org - data: "{{ rax_accessipv4 }}" - type: A - register: a_record - - - name: Create PTR record - local_action: - module: rax_dns_record - credentials: ~/.raxpub - server: "{{ rax_id }}" - name: "{{ inventory_hostname }}" - region: DFW - register: ptr_record -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_find_loadbalancer, - rax_find_server, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None, - name=None, server=None, state='present', ttl=7200): - changed = False - results = [] - - dns = pyrax.cloud_dns - - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if loadbalancer: - item = rax_find_loadbalancer(module, pyrax, loadbalancer) - elif server: - item = rax_find_server(module, pyrax, server) - - if state == 'present': - current = dns.list_ptr_records(item) - for record in current: - if record.data == data: - if record.ttl != ttl or record.name != name: - try: - dns.update_ptr_record(item, record, name, data, ttl) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - record.ttl = ttl - record.name = name - results.append(rax_to_dict(record)) - break - else: - results.append(rax_to_dict(record)) - break - - if not results: - record = dict(name=name, type='PTR', data=data, ttl=ttl, - comment=comment) - try: - results = dns.add_ptr_records(item, [record]) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, records=results) - - elif state == 'absent': - current = dns.list_ptr_records(item) - for record in current: - if record.data == data: - results.append(rax_to_dict(record)) - break - - if results: - try: - dns.delete_ptr_records(item, data) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, records=results) - - -def rax_dns_record(module, comment=None, data=None, domain=None, name=None, - overwrite=True, priority=None, record_type='A', - state='present', ttl=7200): - """Function for manipulating record types other than PTR""" - - changed = False - - dns = pyrax.cloud_dns - if not dns: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not priority and record_type in ['MX', 'SRV']: - module.fail_json(msg='A "priority" attribute is required for ' - 'creating a MX or SRV record') - - try: - domain = dns.find(name=domain) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - try: - if overwrite: - record = domain.find_record(record_type, name=name) - else: - record = domain.find_record(record_type, name=name, data=data) - except pyrax.exceptions.DomainRecordNotUnique as e: - module.fail_json(msg='overwrite=true and there are multiple matching records') - except pyrax.exceptions.DomainRecordNotFound as e: - try: - record_data = { - 'type': record_type, - 'name': name, - 'data': data, - 'ttl': ttl - } - if comment: - record_data.update(dict(comment=comment)) - if priority and record_type.upper() in ['MX', 'SRV']: - record_data.update(dict(priority=priority)) - - record = domain.add_records([record_data])[0] - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - update = {} - if comment != getattr(record, 'comment', None): - update['comment'] = comment - if ttl != getattr(record, 'ttl', None): - update['ttl'] = ttl - if priority != getattr(record, 'priority', None): - update['priority'] = priority - if data != getattr(record, 'data', None): - update['data'] = data - - if update: - try: - record.update(**update) - changed = True - record.get() - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - domain = dns.find(name=domain) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - try: - record = domain.find_record(record_type, name=name, data=data) - except pyrax.exceptions.DomainRecordNotFound as e: - record = {} - except pyrax.exceptions.DomainRecordNotUnique as e: - module.fail_json(msg='%s' % e.message) - - if record: - try: - record.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, record=rax_to_dict(record)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - comment=dict(), - data=dict(required=True), - domain=dict(), - loadbalancer=dict(), - name=dict(required=True), - overwrite=dict(type='bool', default=True), - priority=dict(type='int'), - server=dict(), - state=dict(default='present', choices=['present', 'absent']), - ttl=dict(type='int', default=3600), - type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', - 'SRV', 'TXT', 'PTR']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[ - ['server', 'loadbalancer', 'domain'], - ], - required_one_of=[ - ['server', 'loadbalancer', 'domain'], - ], - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - comment = module.params.get('comment') - data = module.params.get('data') - domain = module.params.get('domain') - loadbalancer = module.params.get('loadbalancer') - name = module.params.get('name') - overwrite = module.params.get('overwrite') - priority = module.params.get('priority') - server = module.params.get('server') - state = module.params.get('state') - ttl = module.params.get('ttl') - record_type = module.params.get('type') - - setup_rax_module(module, pyrax, False) - - if record_type.upper() == 'PTR': - if not server and not loadbalancer: - module.fail_json(msg='one of the following is required: ' - 'server,loadbalancer') - rax_dns_record_ptr(module, data=data, comment=comment, - loadbalancer=loadbalancer, name=name, server=server, - state=state, ttl=ttl) - else: - rax_dns_record(module, comment=comment, data=data, domain=domain, - name=name, overwrite=overwrite, priority=priority, - record_type=record_type, state=state, ttl=ttl) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_facts.py b/plugins/modules/cloud/rackspace/rax_facts.py deleted file mode 100644 index 0288a5e35b..0000000000 --- a/plugins/modules/cloud/rackspace/rax_facts.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_facts -short_description: Gather facts for Rackspace Cloud Servers -description: - - Gather facts for Rackspace Cloud Servers. -options: - address: - type: str - description: - - Server IP address to retrieve facts for, will match any IP assigned to - the server - id: - type: str - description: - - Server ID to retrieve facts for - name: - type: str - description: - - Server name to retrieve facts for -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Gather info about servers - hosts: all - gather_facts: False - tasks: - - name: Get facts about servers - local_action: - module: rax_facts - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW - - name: Map some facts - ansible.builtin.set_fact: - ansible_ssh_host: "{{ rax_accessipv4 }}" -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_facts(module, address, name, server_id): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - ansible_facts = {} - - search_opts = {} - if name: - search_opts = dict(name='^%s$' % name) - try: - servers = cs.servers.list(search_opts=search_opts) - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif address: - servers = [] - try: - for server in cs.servers.list(): - for addresses in server.networks.values(): - if address in addresses: - servers.append(server) - break - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif server_id: - servers = [] - try: - servers.append(cs.servers.get(server_id)) - except Exception as e: - pass - - servers[:] = [server for server in servers if server.status != "DELETED"] - - if len(servers) > 1: - module.fail_json(msg='Multiple servers found matching provided ' - 'search parameters') - elif len(servers) == 1: - ansible_facts = rax_to_dict(servers[0], 'server') - - module.exit_json(changed=changed, ansible_facts=ansible_facts) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - id=dict(), - name=dict(), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[['address', 'id', 'name']], - required_one_of=[['address', 'id', 'name']], - supports_check_mode=True, - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params.get('address') - server_id = module.params.get('id') - name = module.params.get('name') - - setup_rax_module(module, pyrax) - - rax_facts(module, address, name, server_id) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_files.py b/plugins/modules/cloud/rackspace/rax_files.py deleted file mode 100644 index 1e1f82c85d..0000000000 --- a/plugins/modules/cloud/rackspace/rax_files.py +++ /dev/null @@ -1,393 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Paul Durivage -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_files -short_description: Manipulate Rackspace Cloud Files Containers -description: - - Manipulate Rackspace Cloud Files Containers -options: - clear_meta: - description: - - Optionally clear existing metadata when applying metadata to existing containers. - Selecting this option is only appropriate when setting type=meta - type: bool - default: "no" - container: - type: str - description: - - The container to use for container or metadata operations. - meta: - type: dict - description: - - A hash of items to set as metadata values on a container - private: - description: - - Used to set a container as private, removing it from the CDN. B(Warning!) - Private containers, if previously made public, can have live objects - available until the TTL on cached objects expires - type: bool - default: false - public: - description: - - Used to set a container as public, available via the Cloud Files CDN - type: bool - default: false - region: - type: str - description: - - Region to create an instance in - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present', 'absent', 'list'] - default: present - ttl: - type: int - description: - - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes. - Setting a TTL is only appropriate for containers that are public - type: - type: str - description: - - Type of object to do work on, i.e. metadata object or a container object - choices: - - container - - meta - default: container - web_error: - type: str - description: - - Sets an object to be presented as the HTTP error page when accessed by the CDN URL - web_index: - type: str - description: - - Sets an object to be presented as the HTTP index page when accessed by the CDN URL -author: "Paul Durivage (@angstwad)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: "Test Cloud Files Containers" - hosts: local - gather_facts: no - tasks: - - name: "List all containers" - community.general.rax_files: - state: list - - - name: "Create container called 'mycontainer'" - community.general.rax_files: - container: mycontainer - - - name: "Create container 'mycontainer2' with metadata" - community.general.rax_files: - container: mycontainer2 - meta: - key: value - file_for: someuser@example.com - - - name: "Set a container's web index page" - community.general.rax_files: - container: mycontainer - web_index: index.html - - - name: "Set a container's web error page" - community.general.rax_files: - container: mycontainer - web_error: error.html - - - name: "Make container public" - community.general.rax_files: - container: mycontainer - public: yes - - - name: "Make container public with a 24 hour TTL" - community.general.rax_files: - container: mycontainer - public: yes - ttl: 86400 - - - name: "Make container private" - community.general.rax_files: - container: mycontainer - private: yes - -- name: "Test Cloud Files Containers Metadata Storage" - hosts: local - gather_facts: no - tasks: - - name: "Get mycontainer2 metadata" - community.general.rax_files: - container: mycontainer2 - type: meta - - - name: "Set mycontainer2 metadata" - community.general.rax_files: - container: mycontainer2 - type: meta - meta: - uploaded_by: someuser@example.com - - - name: "Remove mycontainer2 metadata" - community.general.rax_files: - container: "mycontainer2" - type: meta - state: absent - meta: - key: "" - file_for: "" -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError as e: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -EXIT_DICT = dict(success=True) -META_PREFIX = 'x-container-meta-' - - -def _get_container(module, cf, container): - try: - return cf.get_container(container) - except pyrax.exc.NoSuchContainer as e: - module.fail_json(msg=e.message) - - -def _fetch_meta(module, container): - EXIT_DICT['meta'] = dict() - try: - for k, v in container.get_metadata().items(): - split_key = k.split(META_PREFIX)[-1] - EXIT_DICT['meta'][split_key] = v - except Exception as e: - module.fail_json(msg=e.message) - - -def meta(cf, module, container_, state, meta_, clear_meta): - c = _get_container(module, cf, container_) - - if meta_ and state == 'present': - try: - meta_set = c.set_metadata(meta_, clear=clear_meta) - except Exception as e: - module.fail_json(msg=e.message) - elif meta_ and state == 'absent': - remove_results = [] - for k, v in meta_.items(): - c.remove_metadata_key(k) - remove_results.append(k) - EXIT_DICT['deleted_meta_keys'] = remove_results - elif state == 'absent': - remove_results = [] - for k, v in c.get_metadata().items(): - c.remove_metadata_key(k) - remove_results.append(k) - EXIT_DICT['deleted_meta_keys'] = remove_results - - _fetch_meta(module, c) - _locals = locals().keys() - - EXIT_DICT['container'] = c.name - if 'meta_set' in _locals or 'remove_results' in _locals: - EXIT_DICT['changed'] = True - - module.exit_json(**EXIT_DICT) - - -def container(cf, module, container_, state, meta_, clear_meta, ttl, public, - private, web_index, web_error): - if public and private: - module.fail_json(msg='container cannot be simultaneously ' - 'set to public and private') - - if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error): - module.fail_json(msg='state cannot be omitted when setting/removing ' - 'attributes on a container') - - if state == 'list': - # We don't care if attributes are specified, let's list containers - EXIT_DICT['containers'] = cf.list_containers() - module.exit_json(**EXIT_DICT) - - try: - c = cf.get_container(container_) - except pyrax.exc.NoSuchContainer as e: - # Make the container if state=present, otherwise bomb out - if state == 'present': - try: - c = cf.create_container(container_) - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['changed'] = True - EXIT_DICT['created'] = True - else: - module.fail_json(msg=e.message) - else: - # Successfully grabbed a container object - # Delete if state is absent - if state == 'absent': - try: - cont_deleted = c.delete() - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['deleted'] = True - - if meta_: - try: - meta_set = c.set_metadata(meta_, clear=clear_meta) - except Exception as e: - module.fail_json(msg=e.message) - finally: - _fetch_meta(module, c) - - if ttl: - try: - c.cdn_ttl = ttl - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['ttl'] = c.cdn_ttl - - if public: - try: - cont_public = c.make_public() - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['container_urls'] = dict(url=c.cdn_uri, - ssl_url=c.cdn_ssl_uri, - streaming_url=c.cdn_streaming_uri, - ios_uri=c.cdn_ios_uri) - - if private: - try: - cont_private = c.make_private() - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_private'] = True - - if web_index: - try: - cont_web_index = c.set_web_index_page(web_index) - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_index'] = True - finally: - _fetch_meta(module, c) - - if web_error: - try: - cont_err_index = c.set_web_error_page(web_error) - except Exception as e: - module.fail_json(msg=e.message) - else: - EXIT_DICT['set_error'] = True - finally: - _fetch_meta(module, c) - - EXIT_DICT['container'] = c.name - EXIT_DICT['objs_in_container'] = c.object_count - EXIT_DICT['total_bytes'] = c.total_bytes - - _locals = locals().keys() - if ('cont_deleted' in _locals - or 'meta_set' in _locals - or 'cont_public' in _locals - or 'cont_private' in _locals - or 'cont_web_index' in _locals - or 'cont_err_index' in _locals): - EXIT_DICT['changed'] = True - - module.exit_json(**EXIT_DICT) - - -def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, - private, web_index, web_error): - """ Dispatch from here to work with metadata or file objects """ - cf = pyrax.cloudfiles - - if cf is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if typ == "container": - container(cf, module, container_, state, meta_, clear_meta, ttl, - public, private, web_index, web_error) - else: - meta(cf, module, container_, state, meta_, clear_meta) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - container=dict(), - state=dict(choices=['present', 'absent', 'list'], - default='present'), - meta=dict(type='dict', default=dict()), - clear_meta=dict(default=False, type='bool'), - type=dict(choices=['container', 'meta'], default='container'), - ttl=dict(type='int'), - public=dict(default=False, type='bool'), - private=dict(default=False, type='bool'), - web_index=dict(), - web_error=dict() - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - container_ = module.params.get('container') - state = module.params.get('state') - meta_ = module.params.get('meta') - clear_meta = module.params.get('clear_meta') - typ = module.params.get('type') - ttl = module.params.get('ttl') - public = module.params.get('public') - private = module.params.get('private') - web_index = module.params.get('web_index') - web_error = module.params.get('web_error') - - if state in ['present', 'absent'] and not container_: - module.fail_json(msg='please specify a container name') - if clear_meta and not typ == 'meta': - module.fail_json(msg='clear_meta can only be used when setting ' - 'metadata') - - setup_rax_module(module, pyrax) - cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, - private, web_index, web_error) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_files_objects.py b/plugins/modules/cloud/rackspace/rax_files_objects.py deleted file mode 100644 index 8f8793e375..0000000000 --- a/plugins/modules/cloud/rackspace/rax_files_objects.py +++ /dev/null @@ -1,549 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Paul Durivage -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_files_objects -short_description: Upload, download, and delete objects in Rackspace Cloud Files -description: - - Upload, download, and delete objects in Rackspace Cloud Files. -options: - clear_meta: - description: - - Optionally clear existing metadata when applying metadata to existing objects. - Selecting this option is only appropriate when setting I(type=meta). - type: bool - default: false - container: - type: str - description: - - The container to use for file object operations. - required: true - dest: - type: str - description: - - The destination of a C(get) operation; i.e. a local directory, C(/home/user/myfolder). - Used to specify the destination of an operation on a remote object; i.e. a file name, - C(file1), or a comma-separated list of remote objects, C(file1,file2,file17). - expires: - type: int - description: - - Used to set an expiration in seconds on an uploaded file or folder. - meta: - type: dict - description: - - Items to set as metadata values on an uploaded file or folder. - method: - type: str - description: - - > - The method of operation to be performed: C(put) to upload files, C(get) to download files or - C(delete) to remove remote objects in Cloud Files. - choices: - - get - - put - - delete - default: get - src: - type: str - description: - - Source from which to upload files. Used to specify a remote object as a source for - an operation, i.e. a file name, C(file1), or a comma-separated list of remote objects, - C(file1,file2,file17). Parameters I(src) and I(dest) are mutually exclusive on remote-only object operations - structure: - description: - - Used to specify whether to maintain nested directory structure when downloading objects - from Cloud Files. Setting to false downloads the contents of a container to a single, - flat directory - type: bool - default: 'yes' - type: - type: str - description: - - Type of object to do work on - - Metadata object or a file object - choices: - - file - - meta - default: file -author: "Paul Durivage (@angstwad)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: "Test Cloud Files Objects" - hosts: local - gather_facts: False - tasks: - - name: "Get objects from test container" - community.general.rax_files_objects: - container: testcont - dest: ~/Downloads/testcont - - - name: "Get single object from test container" - community.general.rax_files_objects: - container: testcont - src: file1 - dest: ~/Downloads/testcont - - - name: "Get several objects from test container" - community.general.rax_files_objects: - container: testcont - src: file1,file2,file3 - dest: ~/Downloads/testcont - - - name: "Delete one object in test container" - community.general.rax_files_objects: - container: testcont - method: delete - dest: file1 - - - name: "Delete several objects in test container" - community.general.rax_files_objects: - container: testcont - method: delete - dest: file2,file3,file4 - - - name: "Delete all objects in test container" - community.general.rax_files_objects: - container: testcont - method: delete - - - name: "Upload all files to test container" - community.general.rax_files_objects: - container: testcont - method: put - src: ~/Downloads/onehundred - - - name: "Upload one file to test container" - community.general.rax_files_objects: - container: testcont - method: put - src: ~/Downloads/testcont/file1 - - - name: "Upload one file to test container with metadata" - community.general.rax_files_objects: - container: testcont - src: ~/Downloads/testcont/file2 - method: put - meta: - testkey: testdata - who_uploaded_this: someuser@example.com - - - name: "Upload one file to test container with TTL of 60 seconds" - community.general.rax_files_objects: - container: testcont - method: put - src: ~/Downloads/testcont/file3 - expires: 60 - - - name: "Attempt to get remote object that does not exist" - community.general.rax_files_objects: - container: testcont - method: get - src: FileThatDoesNotExist.jpg - dest: ~/Downloads/testcont - ignore_errors: yes - - - name: "Attempt to delete remote object that does not exist" - community.general.rax_files_objects: - container: testcont - method: delete - dest: FileThatDoesNotExist.jpg - ignore_errors: yes - -- name: "Test Cloud Files Objects Metadata" - hosts: local - gather_facts: false - tasks: - - name: "Get metadata on one object" - community.general.rax_files_objects: - container: testcont - type: meta - dest: file2 - - - name: "Get metadata on several objects" - community.general.rax_files_objects: - container: testcont - type: meta - src: file2,file1 - - - name: "Set metadata on an object" - community.general.rax_files_objects: - container: testcont - type: meta - dest: file17 - method: put - meta: - key1: value1 - key2: value2 - clear_meta: true - - - name: "Verify metadata is set" - community.general.rax_files_objects: - container: testcont - type: meta - src: file17 - - - name: "Delete metadata" - community.general.rax_files_objects: - container: testcont - type: meta - dest: file17 - method: delete - meta: - key1: '' - key2: '' - - - name: "Get metadata on all objects" - community.general.rax_files_objects: - container: testcont - type: meta -''' - -import os - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -EXIT_DICT = dict(success=False) -META_PREFIX = 'x-object-meta-' - - -def _get_container(module, cf, container): - try: - return cf.get_container(container) - except pyrax.exc.NoSuchContainer as e: - module.fail_json(msg=e.message) - - -def _upload_folder(cf, folder, container, ttl=None, headers=None): - """ Uploads a folder to Cloud Files. - """ - total_bytes = 0 - for root, dummy, files in os.walk(folder): - for fname in files: - full_path = os.path.join(root, fname) - obj_name = os.path.relpath(full_path, folder) - obj_size = os.path.getsize(full_path) - cf.upload_file(container, full_path, obj_name=obj_name, return_none=True, ttl=ttl, headers=headers) - total_bytes += obj_size - return total_bytes - - -def upload(module, cf, container, src, dest, meta, expires): - """ Uploads a single object or a folder to Cloud Files Optionally sets an - metadata, TTL value (expires), or Content-Disposition and Content-Encoding - headers. - """ - if not src: - module.fail_json(msg='src must be specified when uploading') - - c = _get_container(module, cf, container) - src = os.path.abspath(os.path.expanduser(src)) - is_dir = os.path.isdir(src) - - if not is_dir and not os.path.isfile(src) or not os.path.exists(src): - module.fail_json(msg='src must be a file or a directory') - if dest and is_dir: - module.fail_json(msg='dest cannot be set when whole ' - 'directories are uploaded') - - cont_obj = None - total_bytes = 0 - try: - if dest and not is_dir: - cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta) - elif is_dir: - total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta) - else: - cont_obj = c.upload_file(src, ttl=expires, headers=meta) - except Exception as e: - module.fail_json(msg=e.message) - - EXIT_DICT['success'] = True - EXIT_DICT['container'] = c.name - EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name) - if cont_obj or total_bytes > 0: - EXIT_DICT['changed'] = True - if meta: - EXIT_DICT['meta'] = dict(updated=True) - - if cont_obj: - EXIT_DICT['bytes'] = cont_obj.total_bytes - EXIT_DICT['etag'] = cont_obj.etag - else: - EXIT_DICT['bytes'] = total_bytes - - module.exit_json(**EXIT_DICT) - - -def download(module, cf, container, src, dest, structure): - """ Download objects from Cloud Files to a local path specified by "dest". - Optionally disable maintaining a directory structure by by passing a - false value to "structure". - """ - # Looking for an explicit destination - if not dest: - module.fail_json(msg='dest is a required argument when ' - 'downloading from Cloud Files') - - # Attempt to fetch the container by name - c = _get_container(module, cf, container) - - # Accept a single object name or a comma-separated list of objs - # If not specified, get the entire container - if src: - objs = map(str.strip, src.split(',')) - else: - objs = c.get_object_names() - - dest = os.path.abspath(os.path.expanduser(dest)) - is_dir = os.path.isdir(dest) - - if not is_dir: - module.fail_json(msg='dest must be a directory') - - try: - results = [c.download_object(obj, dest, structure=structure) for obj in objs] - except Exception as e: - module.fail_json(msg=e.message) - - len_results = len(results) - len_objs = len(objs) - - EXIT_DICT['container'] = c.name - EXIT_DICT['requested_downloaded'] = results - if results: - EXIT_DICT['changed'] = True - if len_results == len_objs: - EXIT_DICT['success'] = True - EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest) - else: - EXIT_DICT['msg'] = "Error: only %s of %s objects were " \ - "downloaded" % (len_results, len_objs) - module.exit_json(**EXIT_DICT) - - -def delete(module, cf, container, src, dest): - """ Delete specific objects by proving a single file name or a - comma-separated list to src OR dest (but not both). Omitting file name(s) - assumes the entire container is to be deleted. - """ - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to be deleted " - "have been specified on both src and dest args") - - c = _get_container(module, cf, container) - - objs = dest or src - if objs: - objs = map(str.strip, objs.split(',')) - else: - objs = c.get_object_names() - - num_objs = len(objs) - - try: - results = [c.delete_object(obj) for obj in objs] - except Exception as e: - module.fail_json(msg=e.message) - - num_deleted = results.count(True) - - EXIT_DICT['container'] = c.name - EXIT_DICT['deleted'] = num_deleted - EXIT_DICT['requested_deleted'] = objs - - if num_deleted: - EXIT_DICT['changed'] = True - - if num_objs == num_deleted: - EXIT_DICT['success'] = True - EXIT_DICT['msg'] = "%s objects deleted" % num_deleted - else: - EXIT_DICT['msg'] = ("Error: only %s of %s objects " - "deleted" % (num_deleted, num_objs)) - module.exit_json(**EXIT_DICT) - - -def get_meta(module, cf, container, src, dest): - """ Get metadata for a single file, comma-separated list, or entire - container - """ - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to be deleted " - "have been specified on both src and dest args") - - c = _get_container(module, cf, container) - - objs = dest or src - if objs: - objs = map(str.strip, objs.split(',')) - else: - objs = c.get_object_names() - - try: - results = dict() - for obj in objs: - meta = c.get_object(obj).get_metadata() - results[obj] = dict((k.split(META_PREFIX)[-1], v) for k, v in meta.items()) - except Exception as e: - module.fail_json(msg=e.message) - - EXIT_DICT['container'] = c.name - if results: - EXIT_DICT['meta_results'] = results - EXIT_DICT['success'] = True - module.exit_json(**EXIT_DICT) - - -def put_meta(module, cf, container, src, dest, meta, clear_meta): - """ Set metadata on a container, single file, or comma-separated list. - Passing a true value to clear_meta clears the metadata stored in Cloud - Files before setting the new metadata to the value of "meta". - """ - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; files to set meta" - " have been specified on both src and dest args") - objs = dest or src - objs = map(str.strip, objs.split(',')) - - c = _get_container(module, cf, container) - - try: - results = [c.get_object(obj).set_metadata(meta, clear=clear_meta) for obj in objs] - except Exception as e: - module.fail_json(msg=e.message) - - EXIT_DICT['container'] = c.name - EXIT_DICT['success'] = True - if results: - EXIT_DICT['changed'] = True - EXIT_DICT['num_changed'] = True - module.exit_json(**EXIT_DICT) - - -def delete_meta(module, cf, container, src, dest, meta): - """ Removes metadata keys and values specified in meta, if any. Deletes on - all objects specified by src or dest (but not both), if any; otherwise it - deletes keys on all objects in the container - """ - if src and dest: - module.fail_json(msg="Error: ambiguous instructions; meta keys to be " - "deleted have been specified on both src and dest" - " args") - objs = dest or src - objs = map(str.strip, objs.split(',')) - - c = _get_container(module, cf, container) - - try: - for obj in objs: - o = c.get_object(obj) - results = [ - o.remove_metadata_key(k) - for k in (meta or o.get_metadata()) - ] - except Exception as e: - module.fail_json(msg=e.message) - - EXIT_DICT['container'] = c.name - EXIT_DICT['success'] = True - if results: - EXIT_DICT['changed'] = True - EXIT_DICT['num_deleted'] = len(results) - module.exit_json(**EXIT_DICT) - - -def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, - structure, expires): - """ Dispatch from here to work with metadata or file objects """ - cf = pyrax.cloudfiles - - if cf is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if typ == "file": - if method == 'get': - download(module, cf, container, src, dest, structure) - - if method == 'put': - upload(module, cf, container, src, dest, meta, expires) - - if method == 'delete': - delete(module, cf, container, src, dest) - - else: - if method == 'get': - get_meta(module, cf, container, src, dest) - - if method == 'put': - put_meta(module, cf, container, src, dest, meta, clear_meta) - - if method == 'delete': - delete_meta(module, cf, container, src, dest, meta) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - container=dict(required=True), - src=dict(), - dest=dict(), - method=dict(default='get', choices=['put', 'get', 'delete']), - type=dict(default='file', choices=['file', 'meta']), - meta=dict(type='dict', default=dict()), - clear_meta=dict(default=False, type='bool'), - structure=dict(default=True, type='bool'), - expires=dict(type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - container = module.params.get('container') - src = module.params.get('src') - dest = module.params.get('dest') - method = module.params.get('method') - typ = module.params.get('type') - meta = module.params.get('meta') - clear_meta = module.params.get('clear_meta') - structure = module.params.get('structure') - expires = module.params.get('expires') - - if clear_meta and not typ == 'meta': - module.fail_json(msg='clear_meta can only be used when setting metadata') - - setup_rax_module(module, pyrax) - cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_identity.py b/plugins/modules/cloud/rackspace/rax_identity.py deleted file mode 100644 index 2021052faa..0000000000 --- a/plugins/modules/cloud/rackspace/rax_identity.py +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_identity -short_description: Load Rackspace Cloud Identity -description: - - Verifies Rackspace Cloud credentials and returns identity information -options: - state: - type: str - description: - - Indicate desired state of the resource - choices: ['present'] - default: present - required: false -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Load Rackspace Cloud Identity - gather_facts: False - hosts: local - connection: local - tasks: - - name: Load Identity - local_action: - module: rax_identity - credentials: ~/.raxpub - region: DFW - register: rackspace_identity -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_required_together, rax_to_dict, - setup_rax_module) - - -def cloud_identity(module, state, identity): - instance = dict( - authenticated=identity.authenticated, - credentials=identity._creds_file - ) - changed = False - - instance.update(rax_to_dict(identity)) - instance['services'] = instance.get('services', {}).keys() - - if state == 'present': - if not identity.authenticated: - module.fail_json(msg='Credentials could not be verified!') - - module.exit_json(changed=changed, identity=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - if not pyrax.identity: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - cloud_identity(module, state, pyrax.identity) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_keypair.py b/plugins/modules/cloud/rackspace/rax_keypair.py deleted file mode 100644 index 90b0183e50..0000000000 --- a/plugins/modules/cloud/rackspace/rax_keypair.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_keypair -short_description: Create a keypair for use with Rackspace Cloud Servers -description: - - Create a keypair for use with Rackspace Cloud Servers -options: - name: - type: str - description: - - Name of keypair - required: true - public_key: - type: str - description: - - Public Key string to upload. Can be a file path or string - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: "Matt Martz (@sivel)" -notes: - - Keypairs cannot be manipulated, only created and deleted. To "update" a - keypair you must first delete and then recreate. - - The ability to specify a file path for the public key was added in 1.7 -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Create a keypair - hosts: localhost - gather_facts: False - tasks: - - name: Keypair request - local_action: - module: rax_keypair - credentials: ~/.raxpub - name: my_keypair - region: DFW - register: keypair - - name: Create local public key - local_action: - module: copy - content: "{{ keypair.keypair.public_key }}" - dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub" - - name: Create local private key - local_action: - module: copy - content: "{{ keypair.keypair.private_key }}" - dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}" - -- name: Create a keypair - hosts: localhost - gather_facts: False - tasks: - - name: Keypair request - local_action: - module: rax_keypair - credentials: ~/.raxpub - name: my_keypair - public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}" - region: DFW - register: keypair -''' -import os - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, - rax_required_together, - rax_to_dict, - setup_rax_module, - ) - - -def rax_keypair(module, name, public_key, state): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - keypair = {} - - if state == 'present': - if public_key and os.path.isfile(public_key): - try: - f = open(public_key) - public_key = f.read() - f.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % public_key) - - try: - keypair = cs.keypairs.find(name=name) - except cs.exceptions.NotFound: - try: - keypair = cs.keypairs.create(name, public_key) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - keypair = cs.keypairs.find(name=name) - except Exception: - pass - - if keypair: - try: - keypair.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, keypair=rax_to_dict(keypair)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(required=True), - public_key=dict(), - state=dict(default='present', choices=['absent', 'present']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - public_key = module.params.get('public_key') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - rax_keypair(module, name, public_key, state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_meta.py b/plugins/modules/cloud/rackspace/rax_meta.py deleted file mode 100644 index 3504181f19..0000000000 --- a/plugins/modules/cloud/rackspace/rax_meta.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_meta -short_description: Manipulate metadata for Rackspace Cloud Servers -description: - - Manipulate metadata for Rackspace Cloud Servers -options: - address: - type: str - description: - - Server IP address to modify metadata for, will match any IP assigned to - the server - id: - type: str - description: - - Server ID to modify metadata for - name: - type: str - description: - - Server name to modify metadata for - meta: - type: dict - description: - - A hash of metadata to associate with the instance -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Set metadata for a server - hosts: all - gather_facts: False - tasks: - - name: Set metadata - local_action: - module: rax_meta - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW - meta: - group: primary_group - groups: - - group_two - - group_three - app: my_app - - - name: Clear metadata - local_action: - module: rax_meta - credentials: ~/.raxpub - name: "{{ inventory_hostname }}" - region: DFW -''' - -import json - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module -from ansible.module_utils.six import string_types - - -def rax_meta(module, address, name, server_id, meta): - changed = False - - cs = pyrax.cloudservers - - if cs is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - search_opts = {} - if name: - search_opts = dict(name='^%s$' % name) - try: - servers = cs.servers.list(search_opts=search_opts) - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif address: - servers = [] - try: - for server in cs.servers.list(): - for addresses in server.networks.values(): - if address in addresses: - servers.append(server) - break - except Exception as e: - module.fail_json(msg='%s' % e.message) - elif server_id: - servers = [] - try: - servers.append(cs.servers.get(server_id)) - except Exception as e: - pass - - if len(servers) > 1: - module.fail_json(msg='Multiple servers found matching provided ' - 'search parameters') - elif not servers: - module.fail_json(msg='Failed to find a server matching provided ' - 'search parameters') - - # Normalize and ensure all metadata values are strings - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, string_types): - meta[k] = '%s' % v - - server = servers[0] - if server.metadata == meta: - changed = False - else: - changed = True - removed = set(server.metadata.keys()).difference(meta.keys()) - cs.servers.delete_meta(server, list(removed)) - cs.servers.set_meta(server, meta) - server.get() - - module.exit_json(changed=changed, meta=server.metadata) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - address=dict(), - id=dict(), - name=dict(), - meta=dict(type='dict', default=dict()), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[['address', 'id', 'name']], - required_one_of=[['address', 'id', 'name']], - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - address = module.params.get('address') - server_id = module.params.get('id') - name = module.params.get('name') - meta = module.params.get('meta') - - setup_rax_module(module, pyrax) - - rax_meta(module, address, name, server_id, meta) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_mon_alarm.py b/plugins/modules/cloud/rackspace/rax_mon_alarm.py deleted file mode 100644 index 7e99db3fa8..0000000000 --- a/plugins/modules/cloud/rackspace/rax_mon_alarm.py +++ /dev/null @@ -1,228 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_alarm -short_description: Create or delete a Rackspace Cloud Monitoring alarm. -description: -- Create or delete a Rackspace Cloud Monitoring alarm that associates an - existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with - criteria that specify what conditions will trigger which levels of - notifications. Rackspace monitoring module flow | rax_mon_entity -> - rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan -> - *rax_mon_alarm* -options: - state: - type: str - description: - - Ensure that the alarm with this C(label) exists or does not exist. - choices: [ "present", "absent" ] - required: false - default: present - label: - type: str - description: - - Friendly name for this alarm, used to achieve idempotence. Must be a String - between 1 and 255 characters long. - required: true - entity_id: - type: str - description: - - ID of the entity this alarm is attached to. May be acquired by registering - the value of a rax_mon_entity task. - required: true - check_id: - type: str - description: - - ID of the check that should be alerted on. May be acquired by registering - the value of a rax_mon_check task. - required: true - notification_plan_id: - type: str - description: - - ID of the notification plan to trigger if this alarm fires. May be acquired - by registering the value of a rax_mon_notification_plan task. - required: true - criteria: - type: str - description: - - Alarm DSL that describes alerting conditions and their output states. Must - be between 1 and 16384 characters long. See - http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html - for a reference on the alerting language. - disabled: - description: - - If yes, create this alarm, but leave it in an inactive state. Defaults to - no. - type: bool - default: false - metadata: - type: dict - description: - - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String - keys and values between 1 and 255 characters long. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Alarm example - gather_facts: False - hosts: local - connection: local - tasks: - - name: Ensure that a specific alarm exists. - community.general.rax_mon_alarm: - credentials: ~/.rax_pub - state: present - label: uhoh - entity_id: "{{ the_entity['entity']['id'] }}" - check_id: "{{ the_check['check']['id'] }}" - notification_plan_id: "{{ defcon1['notification_plan']['id'] }}" - criteria: > - if (rate(metric['average']) > 10) { - return new AlarmStatus(WARNING); - } - return new AlarmStatus(OK); - register: the_alarm -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria, - disabled, metadata): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - if criteria and len(criteria) < 1 or len(criteria) > 16384: - module.fail_json(msg='criteria must be between 1 and 16384 characters long') - - # Coerce attributes. - - changed = False - alarm = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [a for a in cm.list_alarms(entity_id) if a.label == label] - - if existing: - alarm = existing[0] - - if state == 'present': - should_create = False - should_update = False - should_delete = False - - if len(existing) > 1: - module.fail_json(msg='%s existing alarms have the label %s.' % - (len(existing), label)) - - if alarm: - if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id: - should_delete = should_create = True - - should_update = (disabled and disabled != alarm.disabled) or \ - (metadata and metadata != alarm.metadata) or \ - (criteria and criteria != alarm.criteria) - - if should_update and not should_delete: - cm.update_alarm(entity=entity_id, alarm=alarm, - criteria=criteria, disabled=disabled, - label=label, metadata=metadata) - changed = True - - if should_delete: - alarm.delete() - changed = True - else: - should_create = True - - if should_create: - alarm = cm.create_alarm(entity=entity_id, check=check_id, - notification_plan=notification_plan_id, - criteria=criteria, disabled=disabled, label=label, - metadata=metadata) - changed = True - else: - for a in existing: - a.delete() - changed = True - - if alarm: - alarm_dict = { - "id": alarm.id, - "label": alarm.label, - "check_id": alarm.check_id, - "notification_plan_id": alarm.notification_plan_id, - "criteria": alarm.criteria, - "disabled": alarm.disabled, - "metadata": alarm.metadata - } - module.exit_json(changed=changed, alarm=alarm_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - entity_id=dict(required=True), - check_id=dict(required=True), - notification_plan_id=dict(required=True), - criteria=dict(), - disabled=dict(type='bool', default=False), - metadata=dict(type='dict') - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - label = module.params.get('label') - entity_id = module.params.get('entity_id') - check_id = module.params.get('check_id') - notification_plan_id = module.params.get('notification_plan_id') - criteria = module.params.get('criteria') - disabled = module.boolean(module.params.get('disabled')) - metadata = module.params.get('metadata') - - setup_rax_module(module, pyrax) - - alarm(module, state, label, entity_id, check_id, notification_plan_id, - criteria, disabled, metadata) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_mon_check.py b/plugins/modules/cloud/rackspace/rax_mon_check.py deleted file mode 100644 index 17a3932f6e..0000000000 --- a/plugins/modules/cloud/rackspace/rax_mon_check.py +++ /dev/null @@ -1,320 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_check -short_description: Create or delete a Rackspace Cloud Monitoring check for an - existing entity. -description: -- Create or delete a Rackspace Cloud Monitoring check associated with an - existing rax_mon_entity. A check is a specific test or measurement that is - performed, possibly from different monitoring zones, on the systems you - monitor. Rackspace monitoring module flow | rax_mon_entity -> - *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan -> - rax_mon_alarm -options: - state: - type: str - description: - - Ensure that a check with this C(label) exists or does not exist. - choices: ["present", "absent"] - default: present - entity_id: - type: str - description: - - ID of the rax_mon_entity to target with this check. - required: true - label: - type: str - description: - - Defines a label for this check, between 1 and 64 characters long. - required: true - check_type: - type: str - description: - - The type of check to create. C(remote.) checks may be created on any - rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities - that have a non-null C(agent_id). - - | - Choices for this option are: - - C(remote.dns) - - C(remote.ftp-banner) - - C(remote.http) - - C(remote.imap-banner) - - C(remote.mssql-banner) - - C(remote.mysql-banner) - - C(remote.ping) - - C(remote.pop3-banner) - - C(remote.postgresql-banner) - - C(remote.smtp-banner) - - C(remote.smtp) - - C(remote.ssh) - - C(remote.tcp) - - C(remote.telnet-banner) - - C(agent.filesystem) - - C(agent.memory) - - C(agent.load_average) - - C(agent.cpu) - - C(agent.disk) - - C(agent.network) - - C(agent.plugin) - required: true - monitoring_zones_poll: - type: str - description: - - Comma-separated list of the names of the monitoring zones the check should - run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon, - mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks. - target_hostname: - type: str - description: - - One of `target_hostname` and `target_alias` is required for remote.* checks, - but prohibited for agent.* checks. The hostname this check should target. - Must be a valid IPv4, IPv6, or FQDN. - target_alias: - type: str - description: - - One of `target_alias` and `target_hostname` is required for remote.* checks, - but prohibited for agent.* checks. Use the corresponding key in the entity's - `ip_addresses` hash to resolve an IP address to target. - details: - type: dict - description: - - Additional details specific to the check type. Must be a hash of strings - between 1 and 255 characters long, or an array or object containing 0 to - 256 items. - disabled: - description: - - If "yes", ensure the check is created, but don't actually use it yet. - type: bool - default: false - metadata: - type: dict - description: - - Hash of arbitrary key-value pairs to accompany this check if it fires. - Keys and values must be strings between 1 and 255 characters long. - period: - type: int - description: - - The number of seconds between each time the check is performed. Must be - greater than the minimum period set on your account. - timeout: - type: int - description: - - The number of seconds this check will wait when attempting to collect - results. Must be less than the period. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Create a monitoring check - gather_facts: False - hosts: local - connection: local - tasks: - - name: Associate a check with an existing entity. - community.general.rax_mon_check: - credentials: ~/.rax_pub - state: present - entity_id: "{{ the_entity['entity']['id'] }}" - label: the_check - check_type: remote.ping - monitoring_zones_poll: mziad,mzord,mzdfw - details: - count: 10 - meta: - hurf: durf - register: the_check -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_check(module, state, entity_id, label, check_type, - monitoring_zones_poll, target_hostname, target_alias, details, - disabled, metadata, period, timeout): - - # Coerce attributes. - - if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list): - monitoring_zones_poll = [monitoring_zones_poll] - - if period: - period = int(period) - - if timeout: - timeout = int(timeout) - - changed = False - check = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - entity = cm.get_entity(entity_id) - if not entity: - module.fail_json(msg='Failed to instantiate entity. "%s" may not be' - ' a valid entity id.' % entity_id) - - existing = [e for e in entity.list_checks() if e.label == label] - - if existing: - check = existing[0] - - if state == 'present': - if len(existing) > 1: - module.fail_json(msg='%s existing checks have a label of %s.' % - (len(existing), label)) - - should_delete = False - should_create = False - should_update = False - - if check: - # Details may include keys set to default values that are not - # included in the initial creation. - # - # Only force a recreation of the check if one of the *specified* - # keys is missing or has a different value. - if details: - for (key, value) in details.items(): - if key not in check.details: - should_delete = should_create = True - elif value != check.details[key]: - should_delete = should_create = True - - should_update = label != check.label or \ - (target_hostname and target_hostname != check.target_hostname) or \ - (target_alias and target_alias != check.target_alias) or \ - (disabled != check.disabled) or \ - (metadata and metadata != check.metadata) or \ - (period and period != check.period) or \ - (timeout and timeout != check.timeout) or \ - (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll) - - if should_update and not should_delete: - check.update(label=label, - disabled=disabled, - metadata=metadata, - monitoring_zones_poll=monitoring_zones_poll, - timeout=timeout, - period=period, - target_alias=target_alias, - target_hostname=target_hostname) - changed = True - else: - # The check doesn't exist yet. - should_create = True - - if should_delete: - check.delete() - - if should_create: - check = cm.create_check(entity, - label=label, - check_type=check_type, - target_hostname=target_hostname, - target_alias=target_alias, - monitoring_zones_poll=monitoring_zones_poll, - details=details, - disabled=disabled, - metadata=metadata, - period=period, - timeout=timeout) - changed = True - elif state == 'absent': - if check: - check.delete() - changed = True - else: - module.fail_json(msg='state must be either present or absent.') - - if check: - check_dict = { - "id": check.id, - "label": check.label, - "type": check.type, - "target_hostname": check.target_hostname, - "target_alias": check.target_alias, - "monitoring_zones_poll": check.monitoring_zones_poll, - "details": check.details, - "disabled": check.disabled, - "metadata": check.metadata, - "period": check.period, - "timeout": check.timeout - } - module.exit_json(changed=changed, check=check_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - entity_id=dict(required=True), - label=dict(required=True), - check_type=dict(required=True), - monitoring_zones_poll=dict(), - target_hostname=dict(), - target_alias=dict(), - details=dict(type='dict', default={}), - disabled=dict(type='bool', default=False), - metadata=dict(type='dict', default={}), - period=dict(type='int'), - timeout=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - entity_id = module.params.get('entity_id') - label = module.params.get('label') - check_type = module.params.get('check_type') - monitoring_zones_poll = module.params.get('monitoring_zones_poll') - target_hostname = module.params.get('target_hostname') - target_alias = module.params.get('target_alias') - details = module.params.get('details') - disabled = module.boolean(module.params.get('disabled')) - metadata = module.params.get('metadata') - period = module.params.get('period') - timeout = module.params.get('timeout') - - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - cloud_check(module, state, entity_id, label, check_type, - monitoring_zones_poll, target_hostname, target_alias, details, - disabled, metadata, period, timeout) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_mon_entity.py b/plugins/modules/cloud/rackspace/rax_mon_entity.py deleted file mode 100644 index 2f8cdeefd8..0000000000 --- a/plugins/modules/cloud/rackspace/rax_mon_entity.py +++ /dev/null @@ -1,192 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_entity -short_description: Create or delete a Rackspace Cloud Monitoring entity -description: -- Create or delete a Rackspace Cloud Monitoring entity, which represents a device - to monitor. Entities associate checks and alarms with a target system and - provide a convenient, centralized place to store IP addresses. Rackspace - monitoring module flow | *rax_mon_entity* -> rax_mon_check -> - rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm -options: - label: - type: str - description: - - Defines a name for this entity. Must be a non-empty string between 1 and - 255 characters long. - required: true - state: - type: str - description: - - Ensure that an entity with this C(name) exists or does not exist. - choices: ["present", "absent"] - default: present - agent_id: - type: str - description: - - Rackspace monitoring agent on the target device to which this entity is - bound. Necessary to collect C(agent.) rax_mon_checks against this entity. - named_ip_addresses: - type: dict - description: - - Hash of IP addresses that may be referenced by name by rax_mon_checks - added to this entity. Must be a dictionary of with keys that are names - between 1 and 64 characters long, and values that are valid IPv4 or IPv6 - addresses. - metadata: - type: dict - description: - - Hash of arbitrary C(name), C(value) pairs that are passed to associated - rax_mon_alarms. Names and values must all be between 1 and 255 characters - long. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Entity example - gather_facts: False - hosts: local - connection: local - tasks: - - name: Ensure an entity exists - community.general.rax_mon_entity: - credentials: ~/.rax_pub - state: present - label: my_entity - named_ip_addresses: - web_box: 192.0.2.4 - db_box: 192.0.2.5 - meta: - hurf: durf - register: the_entity -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_monitoring(module, state, label, agent_id, named_ip_addresses, - metadata): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - changed = False - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [] - for entity in cm.list_entities(): - if label == entity.label: - existing.append(entity) - - entity = None - - if existing: - entity = existing[0] - - if state == 'present': - should_update = False - should_delete = False - should_create = False - - if len(existing) > 1: - module.fail_json(msg='%s existing entities have the label %s.' % - (len(existing), label)) - - if entity: - if named_ip_addresses and named_ip_addresses != entity.ip_addresses: - should_delete = should_create = True - - # Change an existing Entity, unless there's nothing to do. - should_update = agent_id and agent_id != entity.agent_id or \ - (metadata and metadata != entity.metadata) - - if should_update and not should_delete: - entity.update(agent_id, metadata) - changed = True - - if should_delete: - entity.delete() - else: - should_create = True - - if should_create: - # Create a new Entity. - entity = cm.create_entity(label=label, agent=agent_id, - ip_addresses=named_ip_addresses, - metadata=metadata) - changed = True - else: - # Delete the existing Entities. - for e in existing: - e.delete() - changed = True - - if entity: - entity_dict = { - "id": entity.id, - "name": entity.name, - "agent_id": entity.agent_id, - } - module.exit_json(changed=changed, entity=entity_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - agent_id=dict(), - named_ip_addresses=dict(type='dict', default={}), - metadata=dict(type='dict', default={}) - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - label = module.params.get('label') - agent_id = module.params.get('agent_id') - named_ip_addresses = module.params.get('named_ip_addresses') - metadata = module.params.get('metadata') - - setup_rax_module(module, pyrax) - - cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_mon_notification.py b/plugins/modules/cloud/rackspace/rax_mon_notification.py deleted file mode 100644 index fb645c3036..0000000000 --- a/plugins/modules/cloud/rackspace/rax_mon_notification.py +++ /dev/null @@ -1,175 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_notification -short_description: Create or delete a Rackspace Cloud Monitoring notification. -description: -- Create or delete a Rackspace Cloud Monitoring notification that specifies a - channel that can be used to communicate alarms, such as email, webhooks, or - PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> - *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm -options: - state: - type: str - description: - - Ensure that the notification with this C(label) exists or does not exist. - choices: ['present', 'absent'] - default: present - label: - type: str - description: - - Defines a friendly name for this notification. String between 1 and 255 - characters long. - required: true - notification_type: - type: str - description: - - A supported notification type. - choices: ["webhook", "email", "pagerduty"] - required: true - details: - type: dict - description: - - Dictionary of key-value pairs used to initialize the notification. - Required keys and meanings vary with notification type. See - http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/ - service-notification-types-crud.html for details. - required: true -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Monitoring notification example - gather_facts: False - hosts: local - connection: local - tasks: - - name: Email me when something goes wrong. - rax_mon_entity: - credentials: ~/.rax_pub - label: omg - type: email - details: - address: me@mailhost.com - register: the_notification -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def notification(module, state, label, notification_type, details): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - changed = False - notification = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [] - for n in cm.list_notifications(): - if n.label == label: - existing.append(n) - - if existing: - notification = existing[0] - - if state == 'present': - should_update = False - should_delete = False - should_create = False - - if len(existing) > 1: - module.fail_json(msg='%s existing notifications are labelled %s.' % - (len(existing), label)) - - if notification: - should_delete = (notification_type != notification.type) - - should_update = (details != notification.details) - - if should_update and not should_delete: - notification.update(details=notification.details) - changed = True - - if should_delete: - notification.delete() - else: - should_create = True - - if should_create: - notification = cm.create_notification(notification_type, - label=label, details=details) - changed = True - else: - for n in existing: - n.delete() - changed = True - - if notification: - notification_dict = { - "id": notification.id, - "type": notification.type, - "label": notification.label, - "details": notification.details - } - module.exit_json(changed=changed, notification=notification_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']), - details=dict(required=True, type='dict') - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - label = module.params.get('label') - notification_type = module.params.get('notification_type') - details = module.params.get('details') - - setup_rax_module(module, pyrax) - - notification(module, state, label, notification_type, details) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py b/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py deleted file mode 100644 index 25e506829f..0000000000 --- a/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_mon_notification_plan -short_description: Create or delete a Rackspace Cloud Monitoring notification - plan. -description: -- Create or delete a Rackspace Cloud Monitoring notification plan by - associating existing rax_mon_notifications with severity levels. Rackspace - monitoring module flow | rax_mon_entity -> rax_mon_check -> - rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm -options: - state: - type: str - description: - - Ensure that the notification plan with this C(label) exists or does not - exist. - choices: ['present', 'absent'] - default: present - label: - type: str - description: - - Defines a friendly name for this notification plan. String between 1 and - 255 characters long. - required: true - critical_state: - type: list - elements: str - description: - - Notification list to use when the alarm state is CRITICAL. Must be an - array of valid rax_mon_notification ids. - warning_state: - type: list - elements: str - description: - - Notification list to use when the alarm state is WARNING. Must be an array - of valid rax_mon_notification ids. - ok_state: - type: list - elements: str - description: - - Notification list to use when the alarm state is OK. Must be an array of - valid rax_mon_notification ids. -author: Ash Wilson (@smashwilson) -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Example notification plan - gather_facts: False - hosts: local - connection: local - tasks: - - name: Establish who gets called when. - community.general.rax_mon_notification_plan: - credentials: ~/.rax_pub - state: present - label: defcon1 - critical_state: - - "{{ everyone['notification']['id'] }}" - warning_state: - - "{{ opsfloor['notification']['id'] }}" - register: defcon1 -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def notification_plan(module, state, label, critical_state, warning_state, ok_state): - - if len(label) < 1 or len(label) > 255: - module.fail_json(msg='label must be between 1 and 255 characters long') - - changed = False - notification_plan = None - - cm = pyrax.cloud_monitoring - if not cm: - module.fail_json(msg='Failed to instantiate client. This typically ' - 'indicates an invalid region or an incorrectly ' - 'capitalized region name.') - - existing = [] - for n in cm.list_notification_plans(): - if n.label == label: - existing.append(n) - - if existing: - notification_plan = existing[0] - - if state == 'present': - should_create = False - should_delete = False - - if len(existing) > 1: - module.fail_json(msg='%s notification plans are labelled %s.' % - (len(existing), label)) - - if notification_plan: - should_delete = (critical_state and critical_state != notification_plan.critical_state) or \ - (warning_state and warning_state != notification_plan.warning_state) or \ - (ok_state and ok_state != notification_plan.ok_state) - - if should_delete: - notification_plan.delete() - should_create = True - else: - should_create = True - - if should_create: - notification_plan = cm.create_notification_plan(label=label, - critical_state=critical_state, - warning_state=warning_state, - ok_state=ok_state) - changed = True - else: - for np in existing: - np.delete() - changed = True - - if notification_plan: - notification_plan_dict = { - "id": notification_plan.id, - "critical_state": notification_plan.critical_state, - "warning_state": notification_plan.warning_state, - "ok_state": notification_plan.ok_state, - "metadata": notification_plan.metadata - } - module.exit_json(changed=changed, notification_plan=notification_plan_dict) - else: - module.exit_json(changed=changed) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', choices=['present', 'absent']), - label=dict(required=True), - critical_state=dict(type='list', elements='str'), - warning_state=dict(type='list', elements='str'), - ok_state=dict(type='list', elements='str'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - - label = module.params.get('label') - critical_state = module.params.get('critical_state') - warning_state = module.params.get('warning_state') - ok_state = module.params.get('ok_state') - - setup_rax_module(module, pyrax) - - notification_plan(module, state, label, critical_state, warning_state, ok_state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_network.py b/plugins/modules/cloud/rackspace/rax_network.py deleted file mode 100644 index 146c08c8e1..0000000000 --- a/plugins/modules/cloud/rackspace/rax_network.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_network -short_description: create / delete an isolated network in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud isolated network. -options: - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - label: - type: str - description: - - Label (name) to give the network - required: yes - cidr: - type: str - description: - - cidr of the network being created -author: - - "Christopher H. Laco (@claco)" - - "Jesse Keating (@omgjlk)" -extends_documentation_fragment: -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build an Isolated Network - gather_facts: False - - tasks: - - name: Network create request - local_action: - module: rax_network - credentials: ~/.raxpub - label: my-net - cidr: 192.168.3.0/24 - state: present -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_network(module, state, label, cidr): - changed = False - network = None - networks = [] - - if not pyrax.cloud_networks: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if state == 'present': - if not cidr: - module.fail_json(msg='missing required arguments: cidr') - - try: - network = pyrax.cloud_networks.find_network_by_label(label) - except pyrax.exceptions.NetworkNotFound: - try: - network = pyrax.cloud_networks.create(label, cidr=cidr) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - elif state == 'absent': - try: - network = pyrax.cloud_networks.find_network_by_label(label) - network.delete() - changed = True - except pyrax.exceptions.NetworkNotFound: - pass - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if network: - instance = dict(id=network.id, - label=network.label, - cidr=network.cidr) - networks.append(instance) - - module.exit_json(changed=changed, networks=networks) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - state=dict(default='present', - choices=['present', 'absent']), - label=dict(required=True), - cidr=dict() - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - state = module.params.get('state') - label = module.params.get('label') - cidr = module.params.get('cidr') - - setup_rax_module(module, pyrax) - - cloud_network(module, state, label, cidr) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_queue.py b/plugins/modules/cloud/rackspace/rax_queue.py deleted file mode 100644 index 46c942c70d..0000000000 --- a/plugins/modules/cloud/rackspace/rax_queue.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_queue -short_description: create / delete a queue in Rackspace Public Cloud -description: - - creates / deletes a Rackspace Public Cloud queue. -options: - name: - type: str - description: - - Name to give the queue - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: - - "Christopher H. Laco (@claco)" - - "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' -- name: Build a Queue - gather_facts: False - hosts: local - connection: local - tasks: - - name: Queue create request - local_action: - module: rax_queue - credentials: ~/.raxpub - name: my-queue - region: DFW - state: present - register: my_queue -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module - - -def cloud_queue(module, state, name): - for arg in (state, name): - if not arg: - module.fail_json(msg='%s is required for rax_queue' % arg) - - changed = False - queues = [] - instance = {} - - cq = pyrax.queues - if not cq: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - for queue in cq.list(): - if name != queue.name: - continue - - queues.append(queue) - - if len(queues) > 1: - module.fail_json(msg='Multiple Queues were matched by name') - - if state == 'present': - if not queues: - try: - queue = cq.create(name) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - queue = queues[0] - - instance = dict(name=queue.name) - result = dict(changed=changed, queue=instance) - module.exit_json(**result) - - elif state == 'absent': - if queues: - queue = queues[0] - try: - queue.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, queue=instance) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - name=dict(), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together() - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - name = module.params.get('name') - state = module.params.get('state') - - setup_rax_module(module, pyrax) - - cloud_queue(module, state, name) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_scaling_group.py b/plugins/modules/cloud/rackspace/rax_scaling_group.py deleted file mode 100644 index 4080e4c6a4..0000000000 --- a/plugins/modules/cloud/rackspace/rax_scaling_group.py +++ /dev/null @@ -1,441 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_scaling_group -short_description: Manipulate Rackspace Cloud Autoscale Groups -description: - - Manipulate Rackspace Cloud Autoscale Groups -options: - config_drive: - description: - - Attach read-only configuration drive to server as label config-2 - type: bool - default: 'no' - cooldown: - type: int - description: - - The period of time, in seconds, that must pass before any scaling can - occur after the previous scaling. Must be an integer between 0 and - 86400 (24 hrs). - default: 300 - disk_config: - type: str - description: - - Disk partitioning strategy - - If not specified, it will fallback to C(auto). - choices: - - auto - - manual - files: - type: dict - description: - - 'Files to insert into the instance. Hash of C(remotepath: localpath)' - flavor: - type: str - description: - - flavor to use for the instance - required: true - image: - type: str - description: - - image to use for the instance. Can be an C(id), C(human_id) or C(name) - required: true - key_name: - type: str - description: - - key pair to use on the instance - loadbalancers: - type: list - elements: dict - description: - - List of load balancer C(id) and C(port) hashes - max_entities: - type: int - description: - - The maximum number of entities that are allowed in the scaling group. - Must be an integer between 0 and 1000. - required: true - meta: - type: dict - description: - - A hash of metadata to associate with the instance - min_entities: - type: int - description: - - The minimum number of entities that are allowed in the scaling group. - Must be an integer between 0 and 1000. - required: true - name: - type: str - description: - - Name to give the scaling group - required: true - networks: - type: list - elements: str - description: - - The network to attach to the instances. If specified, you must include - ALL networks including the public and private interfaces. Can be C(id) - or C(label). - default: - - public - - private - server_name: - type: str - description: - - The base name for servers created by Autoscale - required: true - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present - user_data: - type: str - description: - - Data to be uploaded to the servers config drive. This option implies - I(config_drive). Can be a file path or a string - wait: - description: - - wait for the scaling group to finish provisioning the minimum amount of - servers - type: bool - default: 'no' - wait_timeout: - type: int - description: - - how long before wait gives up, in seconds - default: 300 -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' ---- -- hosts: localhost - gather_facts: false - connection: local - tasks: - - community.general.rax_scaling_group: - credentials: ~/.raxpub - region: ORD - cooldown: 300 - flavor: performance1-1 - image: bb02b1a3-bc77-4d17-ab5b-421d89850fca - min_entities: 5 - max_entities: 10 - name: ASG Test - server_name: asgtest - loadbalancers: - - id: 228385 - port: 80 - register: asg -''' - -import base64 -import json -import os -import time - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_find_image, rax_find_network, - rax_required_together, rax_to_dict, setup_rax_module) -from ansible.module_utils.six import string_types - - -def rax_asg(module, cooldown=300, disk_config=None, files=None, flavor=None, - image=None, key_name=None, loadbalancers=None, meta=None, - min_entities=0, max_entities=0, name=None, networks=None, - server_name=None, state='present', user_data=None, - config_drive=False, wait=True, wait_timeout=300): - files = {} if files is None else files - loadbalancers = [] if loadbalancers is None else loadbalancers - meta = {} if meta is None else meta - networks = [] if networks is None else networks - - changed = False - - au = pyrax.autoscale - if not au: - module.fail_json(msg='Failed to instantiate clients. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - if user_data: - config_drive = True - - if user_data and os.path.isfile(user_data): - try: - f = open(user_data) - user_data = f.read() - f.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % user_data) - - if state == 'present': - # Normalize and ensure all metadata values are strings - if meta: - for k, v in meta.items(): - if isinstance(v, list): - meta[k] = ','.join(['%s' % i for i in v]) - elif isinstance(v, dict): - meta[k] = json.dumps(v) - elif not isinstance(v, string_types): - meta[k] = '%s' % v - - if image: - image = rax_find_image(module, pyrax, image) - - nics = [] - if networks: - for network in networks: - nics.extend(rax_find_network(module, pyrax, network)) - - for nic in nics: - # pyrax is currently returning net-id, but we need uuid - # this check makes this forward compatible for a time when - # pyrax uses uuid instead - if nic.get('net-id'): - nic.update(uuid=nic['net-id']) - del nic['net-id'] - - # Handle the file contents - personality = [] - if files: - for rpath in files.keys(): - lpath = os.path.expanduser(files[rpath]) - try: - f = open(lpath, 'r') - personality.append({ - 'path': rpath, - 'contents': f.read() - }) - f.close() - except Exception as e: - module.fail_json(msg='Failed to load %s' % lpath) - - lbs = [] - if loadbalancers: - for lb in loadbalancers: - try: - lb_id = int(lb.get('id')) - except (ValueError, TypeError): - module.fail_json(msg='Load balancer ID is not an integer: ' - '%s' % lb.get('id')) - try: - port = int(lb.get('port')) - except (ValueError, TypeError): - module.fail_json(msg='Load balancer port is not an ' - 'integer: %s' % lb.get('port')) - if not lb_id or not port: - continue - lbs.append((lb_id, port)) - - try: - sg = au.find(name=name) - except pyrax.exceptions.NoUniqueMatch as e: - module.fail_json(msg='%s' % e.message) - except pyrax.exceptions.NotFound: - try: - sg = au.create(name, cooldown=cooldown, - min_entities=min_entities, - max_entities=max_entities, - launch_config_type='launch_server', - server_name=server_name, image=image, - flavor=flavor, disk_config=disk_config, - metadata=meta, personality=personality, - networks=nics, load_balancers=lbs, - key_name=key_name, config_drive=config_drive, - user_data=user_data) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if not changed: - # Scaling Group Updates - group_args = {} - if cooldown != sg.cooldown: - group_args['cooldown'] = cooldown - - if min_entities != sg.min_entities: - group_args['min_entities'] = min_entities - - if max_entities != sg.max_entities: - group_args['max_entities'] = max_entities - - if group_args: - changed = True - sg.update(**group_args) - - # Launch Configuration Updates - lc = sg.get_launch_config() - lc_args = {} - if server_name != lc.get('name'): - lc_args['server_name'] = server_name - - if image != lc.get('image'): - lc_args['image'] = image - - if flavor != lc.get('flavor'): - lc_args['flavor'] = flavor - - disk_config = disk_config or 'AUTO' - if ((disk_config or lc.get('disk_config')) and - disk_config != lc.get('disk_config', 'AUTO')): - lc_args['disk_config'] = disk_config - - if (meta or lc.get('meta')) and meta != lc.get('metadata'): - lc_args['metadata'] = meta - - test_personality = [] - for p in personality: - test_personality.append({ - 'path': p['path'], - 'contents': base64.b64encode(p['contents']) - }) - if ((test_personality or lc.get('personality')) and - test_personality != lc.get('personality')): - lc_args['personality'] = personality - - if nics != lc.get('networks'): - lc_args['networks'] = nics - - if lbs != lc.get('load_balancers'): - # Work around for https://github.com/rackspace/pyrax/pull/393 - lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs) - - if key_name != lc.get('key_name'): - lc_args['key_name'] = key_name - - if config_drive != lc.get('config_drive', False): - lc_args['config_drive'] = config_drive - - if (user_data and - base64.b64encode(user_data) != lc.get('user_data')): - lc_args['user_data'] = user_data - - if lc_args: - # Work around for https://github.com/rackspace/pyrax/pull/389 - if 'flavor' not in lc_args: - lc_args['flavor'] = lc.get('flavor') - changed = True - sg.update_launch_config(**lc_args) - - sg.get() - - if wait: - end_time = time.time() + wait_timeout - infinite = wait_timeout == 0 - while infinite or time.time() < end_time: - state = sg.get_state() - if state["pending_capacity"] == 0: - break - - time.sleep(5) - - module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) - - else: - try: - sg = au.find(name=name) - sg.delete() - changed = True - except pyrax.exceptions.NotFound as e: - sg = {} - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - config_drive=dict(default=False, type='bool'), - cooldown=dict(type='int', default=300), - disk_config=dict(choices=['auto', 'manual']), - files=dict(type='dict', default={}), - flavor=dict(required=True), - image=dict(required=True), - key_name=dict(), - loadbalancers=dict(type='list', elements='dict'), - meta=dict(type='dict', default={}), - min_entities=dict(type='int', required=True), - max_entities=dict(type='int', required=True), - name=dict(required=True), - networks=dict(type='list', elements='str', default=['public', 'private']), - server_name=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - user_data=dict(no_log=True), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=300, type='int'), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - config_drive = module.params.get('config_drive') - cooldown = module.params.get('cooldown') - disk_config = module.params.get('disk_config') - if disk_config: - disk_config = disk_config.upper() - files = module.params.get('files') - flavor = module.params.get('flavor') - image = module.params.get('image') - key_name = module.params.get('key_name') - loadbalancers = module.params.get('loadbalancers') - meta = module.params.get('meta') - min_entities = module.params.get('min_entities') - max_entities = module.params.get('max_entities') - name = module.params.get('name') - networks = module.params.get('networks') - server_name = module.params.get('server_name') - state = module.params.get('state') - user_data = module.params.get('user_data') - - if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000: - module.fail_json(msg='min_entities and max_entities must be an ' - 'integer between 0 and 1000') - - if not 0 <= cooldown <= 86400: - module.fail_json(msg='cooldown must be an integer between 0 and 86400') - - setup_rax_module(module, pyrax) - - rax_asg(module, cooldown=cooldown, disk_config=disk_config, - files=files, flavor=flavor, image=image, meta=meta, - key_name=key_name, loadbalancers=loadbalancers, - min_entities=min_entities, max_entities=max_entities, - name=name, networks=networks, server_name=server_name, - state=state, config_drive=config_drive, user_data=user_data) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/rackspace/rax_scaling_policy.py b/plugins/modules/cloud/rackspace/rax_scaling_policy.py deleted file mode 100644 index be46bd62a6..0000000000 --- a/plugins/modules/cloud/rackspace/rax_scaling_policy.py +++ /dev/null @@ -1,287 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: rax_scaling_policy -short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy -description: - - Manipulate Rackspace Cloud Autoscale Scaling Policy -options: - at: - type: str - description: - - The UTC time when this policy will be executed. The time must be - formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as - C(2013-05-19T08:07:08Z) - change: - type: int - description: - - The change, either as a number of servers or as a percentage, to make - in the scaling group. If this is a percentage, you must set - I(is_percent) to C(true) also. - cron: - type: str - description: - - The time when the policy will be executed, as a cron entry. For - example, if this is parameter is set to C(1 0 * * *) - cooldown: - type: int - description: - - The period of time, in seconds, that must pass before any scaling can - occur after the previous scaling. Must be an integer between 0 and - 86400 (24 hrs). - default: 300 - desired_capacity: - type: int - description: - - The desired server capacity of the scaling the group; that is, how - many servers should be in the scaling group. - is_percent: - description: - - Whether the value in I(change) is a percent value - default: false - type: bool - name: - type: str - description: - - Name to give the policy - required: true - policy_type: - type: str - description: - - The type of policy that will be executed for the current release. - choices: - - webhook - - schedule - required: true - scaling_group: - type: str - description: - - Name of the scaling group that this policy will be added to - required: true - state: - type: str - description: - - Indicate desired state of the resource - choices: - - present - - absent - default: present -author: "Matt Martz (@sivel)" -extends_documentation_fragment: -- community.general.rackspace -- community.general.rackspace.openstack - -''' - -EXAMPLES = ''' ---- -- hosts: localhost - gather_facts: false - connection: local - tasks: - - community.general.rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - at: '2013-05-19T08:07:08Z' - change: 25 - cooldown: 300 - is_percent: true - name: ASG Test Policy - at - policy_type: schedule - scaling_group: ASG Test - register: asps_at - - - community.general.rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - cron: '1 0 * * *' - change: 25 - cooldown: 300 - is_percent: true - name: ASG Test Policy - cron - policy_type: schedule - scaling_group: ASG Test - register: asp_cron - - - community.general.rax_scaling_policy: - credentials: ~/.raxpub - region: ORD - cooldown: 300 - desired_capacity: 5 - name: ASG Test Policy - webhook - policy_type: webhook - scaling_group: ASG Test - register: asp_webhook -''' - -try: - import pyrax - HAS_PYRAX = True -except ImportError: - HAS_PYRAX = False - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.rax import (UUID, rax_argument_spec, rax_required_together, rax_to_dict, - setup_rax_module) - - -def rax_asp(module, at=None, change=0, cron=None, cooldown=300, - desired_capacity=0, is_percent=False, name=None, - policy_type=None, scaling_group=None, state='present'): - changed = False - - au = pyrax.autoscale - if not au: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - - try: - UUID(scaling_group) - except ValueError: - try: - sg = au.find(name=scaling_group) - except Exception as e: - module.fail_json(msg='%s' % e.message) - else: - try: - sg = au.get(scaling_group) - except Exception as e: - module.fail_json(msg='%s' % e.message) - - if state == 'present': - policies = filter(lambda p: name == p.name, sg.list_policies()) - if len(policies) > 1: - module.fail_json(msg='No unique policy match found by name') - if at: - args = dict(at=at) - elif cron: - args = dict(cron=cron) - else: - args = None - - if not policies: - try: - policy = sg.add_policy(name, policy_type=policy_type, - cooldown=cooldown, change=change, - is_percent=is_percent, - desired_capacity=desired_capacity, - args=args) - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - else: - policy = policies[0] - kwargs = {} - if policy_type != policy.type: - kwargs['policy_type'] = policy_type - - if cooldown != policy.cooldown: - kwargs['cooldown'] = cooldown - - if hasattr(policy, 'change') and change != policy.change: - kwargs['change'] = change - - if hasattr(policy, 'changePercent') and is_percent is False: - kwargs['change'] = change - kwargs['is_percent'] = False - elif hasattr(policy, 'change') and is_percent is True: - kwargs['change'] = change - kwargs['is_percent'] = True - - if hasattr(policy, 'desiredCapacity') and change: - kwargs['change'] = change - elif ((hasattr(policy, 'change') or - hasattr(policy, 'changePercent')) and desired_capacity): - kwargs['desired_capacity'] = desired_capacity - - if hasattr(policy, 'args') and args != policy.args: - kwargs['args'] = args - - if kwargs: - policy.update(**kwargs) - changed = True - - policy.get() - - module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) - - else: - try: - policies = filter(lambda p: name == p.name, sg.list_policies()) - if len(policies) > 1: - module.fail_json(msg='No unique policy match found by name') - elif not policies: - policy = {} - else: - policy.delete() - changed = True - except Exception as e: - module.fail_json(msg='%s' % e.message) - - module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) - - -def main(): - argument_spec = rax_argument_spec() - argument_spec.update( - dict( - at=dict(), - change=dict(type='int'), - cron=dict(), - cooldown=dict(type='int', default=300), - desired_capacity=dict(type='int'), - is_percent=dict(type='bool', default=False), - name=dict(required=True), - policy_type=dict(required=True, choices=['webhook', 'schedule']), - scaling_group=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_together=rax_required_together(), - mutually_exclusive=[ - ['cron', 'at'], - ['change', 'desired_capacity'], - ] - ) - - if not HAS_PYRAX: - module.fail_json(msg='pyrax is required for this module') - - at = module.params.get('at') - change = module.params.get('change') - cron = module.params.get('cron') - cooldown = module.params.get('cooldown') - desired_capacity = module.params.get('desired_capacity') - is_percent = module.params.get('is_percent') - name = module.params.get('name') - policy_type = module.params.get('policy_type') - scaling_group = module.params.get('scaling_group') - state = module.params.get('state') - - if (at or cron) and policy_type == 'webhook': - module.fail_json(msg='policy_type=schedule is required for a time ' - 'based policy') - - setup_rax_module(module, pyrax) - - rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown, - desired_capacity=desired_capacity, is_percent=is_percent, - name=name, policy_type=policy_type, scaling_group=scaling_group, - state=state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_image_info.py b/plugins/modules/cloud/scaleway/scaleway_image_info.py deleted file mode 100644 index 98aa453f3c..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_image_info.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_image_info -short_description: Gather information about the Scaleway images available. -description: - - Gather information about the Scaleway images available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@remyleone)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway compute zone - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway images information - community.general.scaleway_image_info: - region: par1 - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_image_info }}" -''' - -RETURN = r''' ---- -scaleway_image_info: - description: - - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." - returned: success - type: list - elements: dict - sample: - "scaleway_image_info": [ - { - "arch": "x86_64", - "creation_date": "2018-07-17T16:18:49.276456+00:00", - "default_bootscript": { - "architecture": "x86_64", - "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", - "default": false, - "dtb": "", - "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8", - "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", - "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93", - "organization": "11111111-1111-4111-8111-111111111111", - "public": true, - "title": "x86_64 mainline 4.9.93 rev1" - }, - "extra_volumes": [], - "from_server": null, - "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0", - "modification_date": "2018-07-17T16:42:06.319315+00:00", - "name": "Debian Stretch", - "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", - "public": true, - "root_volume": { - "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd", - "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18", - "size": 25000000000, - "volume_type": "l_ssd" - }, - "state": "available" - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION) - - -class ScalewayImageInfo(Scaleway): - - def __init__(self, module): - super(ScalewayImageInfo, self).__init__(module) - self.name = 'images' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_image_info=ScalewayImageInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_server_info.py b/plugins/modules/cloud/scaleway/scaleway_server_info.py deleted file mode 100644 index 2b9d91b47c..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_server_info.py +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_server_info -short_description: Gather information about the Scaleway servers available. -description: - - Gather information about the Scaleway servers available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@remyleone)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway region to use (for example C(par1)). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway servers information - community.general.scaleway_server_info: - region: par1 - register: result - -- ansible.builtin.debug: - msg: "{{ result.scaleway_server_info }}" -''' - -RETURN = r''' ---- -scaleway_server_info: - description: - - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." - returned: success - type: list - elements: dict - sample: - "scaleway_server_info": [ - { - "arch": "x86_64", - "boot_type": "local", - "bootscript": { - "architecture": "x86_64", - "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", - "default": true, - "dtb": "", - "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", - "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", - "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", - "organization": "11111111-1111-4111-8111-111111111111", - "public": true, - "title": "x86_64 mainline 4.4.127 rev1" - }, - "commercial_type": "START1-XS", - "creation_date": "2018-08-14T21:36:56.271545+00:00", - "dynamic_ip_required": false, - "enable_ipv6": false, - "extra_networks": [], - "hostname": "scw-e0d256", - "id": "12f19bc7-108c-4517-954c-e6b3d0311363", - "image": { - "arch": "x86_64", - "creation_date": "2018-04-26T12:42:21.619844+00:00", - "default_bootscript": { - "architecture": "x86_64", - "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", - "default": true, - "dtb": "", - "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", - "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", - "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", - "organization": "11111111-1111-4111-8111-111111111111", - "public": true, - "title": "x86_64 mainline 4.4.127 rev1" - }, - "extra_volumes": [], - "from_server": null, - "id": "67375eb1-f14d-4f02-bb42-6119cecbde51", - "modification_date": "2018-04-26T12:49:07.573004+00:00", - "name": "Ubuntu Xenial", - "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", - "public": true, - "root_volume": { - "id": "020b8d61-3867-4a0e-84a4-445c5393e05d", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", - "size": 25000000000, - "volume_type": "l_ssd" - }, - "state": "available" - }, - "ipv6": null, - "location": { - "cluster_id": "5", - "hypervisor_id": "412", - "node_id": "2", - "platform_id": "13", - "zone_id": "par1" - }, - "maintenances": [], - "modification_date": "2018-08-14T21:37:28.630882+00:00", - "name": "scw-e0d256", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "private_ip": "10.14.222.131", - "protected": false, - "public_ip": { - "address": "163.172.170.197", - "dynamic": false, - "id": "ea081794-a581-4495-8451-386ddaf0a451" - }, - "security_group": { - "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e", - "name": "Default security group" - }, - "state": "running", - "state_detail": "booted", - "tags": [], - "volumes": { - "0": { - "creation_date": "2018-08-14T21:36:56.271545+00:00", - "export_uri": "device://dev/vda", - "id": "68386fae-4f55-4fbf-aabb-953036a85872", - "modification_date": "2018-08-14T21:36:56.271545+00:00", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "server": { - "id": "12f19bc7-108c-4517-954c-e6b3d0311363", - "name": "scw-e0d256" - }, - "size": 25000000000, - "state": "available", - "volume_type": "l_ssd" - } - } - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, - ScalewayException, - scaleway_argument_spec, - SCALEWAY_LOCATION, -) - - -class ScalewayServerInfo(Scaleway): - - def __init__(self, module): - super(ScalewayServerInfo, self).__init__(module) - self.name = 'servers' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - scaleway_server_info=ScalewayServerInfo(module).get_resources() - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/webfaction/webfaction_app.py b/plugins/modules/cloud/webfaction/webfaction_app.py deleted file mode 100644 index cd779b035a..0000000000 --- a/plugins/modules/cloud/webfaction/webfaction_app.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from: -# * Andy Baker -# * Federico Tarantini -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Create a Webfaction application using Ansible and the Webfaction API -# -# Valid application types can be found by looking here: -# https://docs.webfaction.com/xmlrpc-api/apps.html#application-types - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: webfaction_app -short_description: Add or remove applications on a Webfaction host -description: - - Add or remove applications on a Webfaction host. Further documentation at U(https://github.com/quentinsf/ansible-webfaction). -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as - your host, you may want to add C(serial: 1) to the plays. - - See `the webfaction API `_ for more info. - -options: - name: - description: - - The name of the application - required: true - type: str - - state: - description: - - Whether the application should exist - choices: ['present', 'absent'] - default: "present" - type: str - - type: - description: - - The type of application to create. See the Webfaction docs at U(https://docs.webfaction.com/xmlrpc-api/apps.html) for a list. - required: true - type: str - - autostart: - description: - - Whether the app should restart with an C(autostart.cgi) script - type: bool - default: 'no' - - extra_info: - description: - - Any extra parameters required by the app - default: '' - type: str - - port_open: - description: - - IF the port should be opened - type: bool - default: 'no' - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str - - machine: - description: - - The machine name to use (optional for accounts with only one machine) - type: str - -''' - -EXAMPLES = ''' - - name: Create a test app - community.general.webfaction_app: - name: "my_wsgi_app1" - state: present - type: mod_wsgi35-python27 - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - machine: "{{webfaction_machine}}" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - type=dict(required=True), - autostart=dict(type='bool', default=False), - extra_info=dict(default=""), - port_open=dict(type='bool', default=False), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - machine=dict(), - ), - supports_check_mode=True - ) - app_name = module.params['name'] - app_type = module.params['type'] - app_state = module.params['state'] - - if module.params['machine']: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'], - module.params['machine'] - ) - else: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - app_list = webfaction.list_apps(session_id) - app_map = dict([(i['name'], i) for i in app_list]) - existing_app = app_map.get(app_name) - - result = {} - - # Here's where the real stuff happens - - if app_state == 'present': - - # Does an app with this name already exist? - if existing_app: - if existing_app['type'] != app_type: - module.fail_json(msg="App already exists with different type. Please fix by hand.") - - # If it exists with the right type, we don't change it - # Should check other parameters. - module.exit_json( - changed=False, - result=existing_app, - ) - - if not module.check_mode: - # If this isn't a dry run, create the app - result.update( - webfaction.create_app( - session_id, app_name, app_type, - module.boolean(module.params['autostart']), - module.params['extra_info'], - module.boolean(module.params['port_open']) - ) - ) - - elif app_state == 'absent': - - # If the app's already not there, nothing changed. - if not existing_app: - module.exit_json( - changed=False, - ) - - if not module.check_mode: - # If this isn't a dry run, delete the app - result.update( - webfaction.delete_app(session_id, app_name) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(app_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/webfaction/webfaction_db.py b/plugins/modules/cloud/webfaction/webfaction_db.py deleted file mode 100644 index 8708c7743b..0000000000 --- a/plugins/modules/cloud/webfaction/webfaction_db.py +++ /dev/null @@ -1,195 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from: -# * Andy Baker -# * Federico Tarantini -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Create a webfaction database using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: webfaction_db -short_description: Add or remove a database on Webfaction -description: - - Add or remove a database on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as - your host, you may want to add C(serial: 1) to the plays. - - See `the webfaction API `_ for more info. -options: - - name: - description: - - The name of the database - required: true - type: str - - state: - description: - - Whether the database should exist - choices: ['present', 'absent'] - default: "present" - type: str - - type: - description: - - The type of database to create. - required: true - choices: ['mysql', 'postgresql'] - type: str - - password: - description: - - The password for the new database user. - type: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str - - machine: - description: - - The machine name to use (optional for accounts with only one machine) - type: str -''' - -EXAMPLES = ''' - # This will also create a default DB user with the same - # name as the database, and the specified password. - - - name: Create a database - community.general.webfaction_db: - name: "{{webfaction_user}}_db1" - password: mytestsql - type: mysql - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - machine: "{{webfaction_machine}}" - - # Note that, for symmetry's sake, deleting a database using - # 'state: absent' will also delete the matching user. - -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - # You can specify an IP address or hostname. - type=dict(required=True, choices=['mysql', 'postgresql']), - password=dict(no_log=True), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - machine=dict(), - ), - supports_check_mode=True - ) - db_name = module.params['name'] - db_state = module.params['state'] - db_type = module.params['type'] - db_passwd = module.params['password'] - - if module.params['machine']: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'], - module.params['machine'] - ) - else: - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - db_list = webfaction.list_dbs(session_id) - db_map = dict([(i['name'], i) for i in db_list]) - existing_db = db_map.get(db_name) - - user_list = webfaction.list_db_users(session_id) - user_map = dict([(i['username'], i) for i in user_list]) - existing_user = user_map.get(db_name) - - result = {} - - # Here's where the real stuff happens - - if db_state == 'present': - - # Does a database with this name already exist? - if existing_db: - # Yes, but of a different type - fail - if existing_db['db_type'] != db_type: - module.fail_json(msg="Database already exists but is a different type. Please fix by hand.") - - # If it exists with the right type, we don't change anything. - module.exit_json( - changed=False, - ) - - if not module.check_mode: - # If this isn't a dry run, create the db - # and default user. - result.update( - webfaction.create_db( - session_id, db_name, db_type, db_passwd - ) - ) - - elif db_state == 'absent': - - # If this isn't a dry run... - if not module.check_mode: - - if not (existing_db or existing_user): - module.exit_json(changed=False,) - - if existing_db: - # Delete the db if it exists - result.update( - webfaction.delete_db(session_id, db_name, db_type) - ) - - if existing_user: - # Delete the default db user if it exists - result.update( - webfaction.delete_db_user(session_id, db_name, db_type) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(db_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/webfaction/webfaction_domain.py b/plugins/modules/cloud/webfaction/webfaction_domain.py deleted file mode 100644 index f9c3b7db7a..0000000000 --- a/plugins/modules/cloud/webfaction/webfaction_domain.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Quentin Stafford-Fraser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Create Webfaction domains and subdomains using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: webfaction_domain -short_description: Add or remove domains and subdomains on Webfaction -description: - - Add or remove domains or subdomains on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. - If you don't specify subdomains, the domain will be deleted. - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as - your host, you may want to add C(serial: 1) to the plays. - - See `the webfaction API `_ for more info. - -options: - - name: - description: - - The name of the domain - required: true - type: str - - state: - description: - - Whether the domain should exist - choices: ['present', 'absent'] - default: "present" - type: str - - subdomains: - description: - - Any subdomains to create. - default: [] - type: list - elements: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str -''' - -EXAMPLES = ''' - - name: Create a test domain - community.general.webfaction_domain: - name: mydomain.com - state: present - subdomains: - - www - - blog - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - - - name: Delete test domain and any subdomains - community.general.webfaction_domain: - name: mydomain.com - state: absent - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" - -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - subdomains=dict(default=[], type='list', elements='str'), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - ), - supports_check_mode=True - ) - domain_name = module.params['name'] - domain_state = module.params['state'] - domain_subdomains = module.params['subdomains'] - - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - domain_list = webfaction.list_domains(session_id) - domain_map = dict([(i['domain'], i) for i in domain_list]) - existing_domain = domain_map.get(domain_name) - - result = {} - - # Here's where the real stuff happens - - if domain_state == 'present': - - # Does an app with this name already exist? - if existing_domain: - - if set(existing_domain['subdomains']) >= set(domain_subdomains): - # If it exists with the right subdomains, we don't change anything. - module.exit_json( - changed=False, - ) - - positional_args = [session_id, domain_name] + domain_subdomains - - if not module.check_mode: - # If this isn't a dry run, create the app - # print positional_args - result.update( - webfaction.create_domain( - *positional_args - ) - ) - - elif domain_state == 'absent': - - # If the app's already not there, nothing changed. - if not existing_domain: - module.exit_json( - changed=False, - ) - - positional_args = [session_id, domain_name] + domain_subdomains - - if not module.check_mode: - # If this isn't a dry run, delete the app - result.update( - webfaction.delete_domain(*positional_args) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(domain_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/webfaction/webfaction_mailbox.py b/plugins/modules/cloud/webfaction/webfaction_mailbox.py deleted file mode 100644 index 37755763a2..0000000000 --- a/plugins/modules/cloud/webfaction/webfaction_mailbox.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Quentin Stafford-Fraser and Andy Baker -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Create webfaction mailbox using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: webfaction_mailbox -short_description: Add or remove mailboxes on Webfaction -description: - - Add or remove mailboxes on a Webfaction account. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as - your host, you may want to add C(serial: 1) to the plays. - - See `the webfaction API `_ for more info. -options: - - mailbox_name: - description: - - The name of the mailbox - required: true - type: str - - mailbox_password: - description: - - The password for the mailbox - required: true - type: str - - state: - description: - - Whether the mailbox should exist - choices: ['present', 'absent'] - default: "present" - type: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str -''' - -EXAMPLES = ''' - - name: Create a mailbox - community.general.webfaction_mailbox: - mailbox_name="mybox" - mailbox_password="myboxpw" - state=present - login_name={{webfaction_user}} - login_password={{webfaction_passwd}} -''' - - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - mailbox_name=dict(required=True), - mailbox_password=dict(required=True, no_log=True), - state=dict(required=False, choices=['present', 'absent'], default='present'), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - ), - supports_check_mode=True - ) - - mailbox_name = module.params['mailbox_name'] - site_state = module.params['state'] - - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)] - existing_mailbox = mailbox_name in mailbox_list - - result = {} - - # Here's where the real stuff happens - - if site_state == 'present': - - # Does a mailbox with this name already exist? - if existing_mailbox: - module.exit_json(changed=False,) - - positional_args = [session_id, mailbox_name] - - if not module.check_mode: - # If this isn't a dry run, create the mailbox - result.update(webfaction.create_mailbox(*positional_args)) - - elif site_state == 'absent': - - # If the mailbox is already not there, nothing changed. - if not existing_mailbox: - module.exit_json(changed=False) - - if not module.check_mode: - # If this isn't a dry run, delete the mailbox - result.update(webfaction.delete_mailbox(session_id, mailbox_name)) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(site_state)) - - module.exit_json(changed=True, result=result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/webfaction/webfaction_site.py b/plugins/modules/cloud/webfaction/webfaction_site.py deleted file mode 100644 index 87faade3e2..0000000000 --- a/plugins/modules/cloud/webfaction/webfaction_site.py +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Quentin Stafford-Fraser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Create Webfaction website using Ansible and the Webfaction API - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: webfaction_site -short_description: Add or remove a website on a Webfaction host -description: - - Add or remove a website on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction. -author: Quentin Stafford-Fraser (@quentinsf) -notes: - - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP - address. You can use a DNS name. - - If a site of the same name exists in the account but on a different host, the operation will exit. - - > - You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API. - The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as - your host, you may want to add C(serial: 1) to the plays. - - See `the webfaction API `_ for more info. - -options: - - name: - description: - - The name of the website - required: true - type: str - - state: - description: - - Whether the website should exist - choices: ['present', 'absent'] - default: "present" - type: str - - host: - description: - - The webfaction host on which the site should be created. - required: true - type: str - - https: - description: - - Whether or not to use HTTPS - type: bool - default: 'no' - - site_apps: - description: - - A mapping of URLs to apps - default: [] - type: list - elements: list - - subdomains: - description: - - A list of subdomains associated with this site. - default: [] - type: list - elements: str - - login_name: - description: - - The webfaction account to use - required: true - type: str - - login_password: - description: - - The webfaction password to use - required: true - type: str -''' - -EXAMPLES = ''' - - name: Create website - community.general.webfaction_site: - name: testsite1 - state: present - host: myhost.webfaction.com - subdomains: - - 'testsite1.my_domain.org' - site_apps: - - ['testapp1', '/'] - https: no - login_name: "{{webfaction_user}}" - login_password: "{{webfaction_passwd}}" -''' - -import socket - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/') - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - state=dict(choices=['present', 'absent'], default='present'), - # You can specify an IP address or hostname. - host=dict(required=True), - https=dict(required=False, type='bool', default=False), - subdomains=dict(type='list', elements='str', default=[]), - site_apps=dict(type='list', elements='list', default=[]), - login_name=dict(required=True), - login_password=dict(required=True, no_log=True), - ), - supports_check_mode=True - ) - site_name = module.params['name'] - site_state = module.params['state'] - site_host = module.params['host'] - site_ip = socket.gethostbyname(site_host) - - session_id, account = webfaction.login( - module.params['login_name'], - module.params['login_password'] - ) - - site_list = webfaction.list_websites(session_id) - site_map = dict([(i['name'], i) for i in site_list]) - existing_site = site_map.get(site_name) - - result = {} - - # Here's where the real stuff happens - - if site_state == 'present': - - # Does a site with this name already exist? - if existing_site: - - # If yes, but it's on a different IP address, then fail. - # If we wanted to allow relocation, we could add a 'relocate=true' option - # which would get the existing IP address, delete the site there, and create it - # at the new address. A bit dangerous, perhaps, so for now we'll require manual - # deletion if it's on another host. - - if existing_site['ip'] != site_ip: - module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.") - - # If it's on this host and the key parameters are the same, nothing needs to be done. - - if (existing_site['https'] == module.boolean(module.params['https'])) and \ - (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \ - (dict(existing_site['website_apps']) == dict(module.params['site_apps'])): - module.exit_json( - changed=False - ) - - positional_args = [ - session_id, site_name, site_ip, - module.boolean(module.params['https']), - module.params['subdomains'], - ] - for a in module.params['site_apps']: - positional_args.append((a[0], a[1])) - - if not module.check_mode: - # If this isn't a dry run, create or modify the site - result.update( - webfaction.create_website( - *positional_args - ) if not existing_site else webfaction.update_website( - *positional_args - ) - ) - - elif site_state == 'absent': - - # If the site's already not there, nothing changed. - if not existing_site: - module.exit_json( - changed=False, - ) - - if not module.check_mode: - # If this isn't a dry run, delete the site - result.update( - webfaction.delete_website(session_id, site_name, site_ip) - ) - - else: - module.fail_json(msg="Unknown state specified: {0}".format(site_state)) - - module.exit_json( - changed=True, - result=result - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/xenserver/xenserver_guest_info.py b/plugins/modules/cloud/xenserver/xenserver_guest_info.py deleted file mode 100644 index a2e777253e..0000000000 --- a/plugins/modules/cloud/xenserver/xenserver_guest_info.py +++ /dev/null @@ -1,222 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2018, Bojan Vitnik -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: xenserver_guest_info -short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool -description: > - This module can be used to gather essential VM facts. -author: -- Bojan Vitnik (@bvitnik) -notes: -- Minimal supported version of XenServer is 5.6. -- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. -- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside - Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your - Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: - U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' -- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are - accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' -- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no) - which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' -- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change. -requirements: -- python >= 2.6 -- XenAPI -options: - name: - description: - - Name of the VM to gather facts from. - - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. - - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage. - - This parameter is case sensitive. - type: str - aliases: [ name_label ] - uuid: - description: - - UUID of the VM to gather fact of. This is XenServer's unique identifier. - - It is required if name is not unique. - type: str -extends_documentation_fragment: -- community.general.xenserver.documentation - -''' - -EXAMPLES = r''' -- name: Gather facts - community.general.xenserver_guest_info: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - name: testvm_11 - delegate_to: localhost - register: facts -''' - -RETURN = r''' -instance: - description: Metadata about the VM - returned: always - type: dict - sample: { - "cdrom": { - "type": "none" - }, - "customization_agent": "native", - "disks": [ - { - "name": "testvm_11-0", - "name_desc": "", - "os_device": "xvda", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "0" - }, - { - "name": "testvm_11-1", - "name_desc": "", - "os_device": "xvdb", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "1" - } - ], - "domid": "56", - "folder": "", - "hardware": { - "memory_mb": 8192, - "num_cpu_cores_per_socket": 2, - "num_cpus": 4 - }, - "home_server": "", - "is_template": false, - "name": "testvm_11", - "name_desc": "", - "networks": [ - { - "gateway": "192.168.0.254", - "gateway6": "fc00::fffe", - "ip": "192.168.0.200", - "ip6": [ - "fe80:0000:0000:0000:e9cb:625a:32c5:c291", - "fc00:0000:0000:0000:0000:0000:0000:0001" - ], - "mac": "ba:91:3a:48:20:76", - "mtu": "1500", - "name": "Pool-wide network associated with eth1", - "netmask": "255.255.255.128", - "prefix": "25", - "prefix6": "64", - "vif_device": "0" - } - ], - "other_config": { - "base_template_name": "Windows Server 2016 (64-bit)", - "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", - "install-methods": "cdrom", - "instant": "true", - "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" - }, - "platform": { - "acpi": "1", - "apic": "true", - "cores-per-socket": "2", - "device_id": "0002", - "hpet": "true", - "nx": "true", - "pae": "true", - "timeoffset": "-25200", - "vga": "std", - "videoram": "8", - "viridian": "true", - "viridian_reference_tsc": "true", - "viridian_time_ref_count": "true" - }, - "state": "poweredon", - "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", - "xenstore_data": { - "vm-data": "" - } - } -''' - -HAS_XENAPI = False -try: - import XenAPI - HAS_XENAPI = True -except ImportError: - pass - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref, - gather_vm_params, gather_vm_facts) - - -class XenServerVM(XenServerObject): - """Class for managing XenServer VM. - - Attributes: - vm_ref (str): XAPI reference to VM. - vm_params (dict): A dictionary with VM parameters as returned - by gather_vm_params() function. - """ - - def __init__(self, module): - """Inits XenServerVM using module parameters. - - Args: - module: Reference to AnsibleModule object. - """ - super(XenServerVM, self).__init__(module) - - self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ") - self.gather_params() - - def gather_params(self): - """Gathers all VM parameters available in XAPI database.""" - self.vm_params = gather_vm_params(self.module, self.vm_ref) - - def gather_facts(self): - """Gathers and returns VM facts.""" - return gather_vm_facts(self.module, self.vm_params) - - -def main(): - argument_spec = xenserver_common_argument_spec() - argument_spec.update( - name=dict(type='str', aliases=['name_label']), - uuid=dict(type='str'), - ) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[ - ['name', 'uuid'], - ], - ) - - result = {'failed': False, 'changed': False} - - # Module will exit with an error message if no VM is found. - vm = XenServerVM(module) - - # Gather facts. - result['instance'] = vm.gather_facts() - - if result['failed']: - module.fail_json(**result) - else: - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py b/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py deleted file mode 100644 index 4a195ff50a..0000000000 --- a/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py +++ /dev/null @@ -1,270 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2018, Bojan Vitnik -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: xenserver_guest_powerstate -short_description: Manages power states of virtual machines running on Citrix Hypervisor/XenServer host or pool -description: > - This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine. -author: -- Bojan Vitnik (@bvitnik) -notes: -- Minimal supported version of XenServer is 5.6. -- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0. -- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside - Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your - Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: - U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' -- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are - accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' -- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no) - which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' -requirements: -- python >= 2.6 -- XenAPI -options: - state: - description: - - Specify the state VM should be in. - - If C(state) is set to value other than C(present), then VM is transitioned into required state and facts are returned. - - If C(state) is set to C(present), then VM is just checked for existence and facts are returned. - type: str - default: present - choices: [ powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present ] - name: - description: - - Name of the VM to manage. - - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. - - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage. - - This parameter is case sensitive. - type: str - aliases: [ name_label ] - uuid: - description: - - UUID of the VM to manage if known. This is XenServer's unique identifier. - - It is required if name is not unique. - type: str - wait_for_ip_address: - description: - - Wait until XenServer detects an IP address for the VM. - - This requires XenServer Tools to be preinstalled on the VM to work properly. - type: bool - default: no - state_change_timeout: - description: - - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if C(wait_for_ip_address: yes).' - - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. - - In case of timeout, module will generate an error message. - type: int - default: 0 -extends_documentation_fragment: -- community.general.xenserver.documentation - -''' - -EXAMPLES = r''' -- name: Power on VM - community.general.xenserver_guest_powerstate: - hostname: "{{ xenserver_hostname }}" - username: "{{ xenserver_username }}" - password: "{{ xenserver_password }}" - name: testvm_11 - state: powered-on - delegate_to: localhost - register: facts -''' - -RETURN = r''' -instance: - description: Metadata about the VM - returned: always - type: dict - sample: { - "cdrom": { - "type": "none" - }, - "customization_agent": "native", - "disks": [ - { - "name": "windows-template-testing-0", - "name_desc": "", - "os_device": "xvda", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "0" - }, - { - "name": "windows-template-testing-1", - "name_desc": "", - "os_device": "xvdb", - "size": 42949672960, - "sr": "Local storage", - "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075", - "vbd_userdevice": "1" - } - ], - "domid": "56", - "folder": "", - "hardware": { - "memory_mb": 8192, - "num_cpu_cores_per_socket": 2, - "num_cpus": 4 - }, - "home_server": "", - "is_template": false, - "name": "windows-template-testing", - "name_desc": "", - "networks": [ - { - "gateway": "192.168.0.254", - "gateway6": "fc00::fffe", - "ip": "192.168.0.200", - "ip6": [ - "fe80:0000:0000:0000:e9cb:625a:32c5:c291", - "fc00:0000:0000:0000:0000:0000:0000:0001" - ], - "mac": "ba:91:3a:48:20:76", - "mtu": "1500", - "name": "Pool-wide network associated with eth1", - "netmask": "255.255.255.128", - "prefix": "25", - "prefix6": "64", - "vif_device": "0" - } - ], - "other_config": { - "base_template_name": "Windows Server 2016 (64-bit)", - "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5", - "install-methods": "cdrom", - "instant": "true", - "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e" - }, - "platform": { - "acpi": "1", - "apic": "true", - "cores-per-socket": "2", - "device_id": "0002", - "hpet": "true", - "nx": "true", - "pae": "true", - "timeoffset": "-25200", - "vga": "std", - "videoram": "8", - "viridian": "true", - "viridian_reference_tsc": "true", - "viridian_time_ref_count": "true" - }, - "state": "poweredon", - "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda", - "xenstore_data": { - "vm-data": "" - } - } -''' - -import re - -HAS_XENAPI = False -try: - import XenAPI - HAS_XENAPI = True -except ImportError: - pass - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref, - gather_vm_params, gather_vm_facts, set_vm_power_state, - wait_for_vm_ip_address) - - -class XenServerVM(XenServerObject): - """Class for managing XenServer VM. - - Attributes: - vm_ref (str): XAPI reference to VM. - vm_params (dict): A dictionary with VM parameters as returned - by gather_vm_params() function. - """ - - def __init__(self, module): - """Inits XenServerVM using module parameters. - - Args: - module: Reference to Ansible module object. - """ - super(XenServerVM, self).__init__(module) - - self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ") - self.gather_params() - - def gather_params(self): - """Gathers all VM parameters available in XAPI database.""" - self.vm_params = gather_vm_params(self.module, self.vm_ref) - - def gather_facts(self): - """Gathers and returns VM facts.""" - return gather_vm_facts(self.module, self.vm_params) - - def set_power_state(self, power_state): - """Controls VM power state.""" - state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout']) - - # If state has changed, update vm_params. - if state_changed: - self.vm_params['power_state'] = current_state.capitalize() - - return state_changed - - def wait_for_ip_address(self): - """Waits for VM to acquire an IP address.""" - self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout']) - - -def main(): - argument_spec = xenserver_common_argument_spec() - argument_spec.update( - state=dict(type='str', default='present', - choices=['powered-on', 'powered-off', 'restarted', 'shutdown-guest', 'reboot-guest', 'suspended', 'present']), - name=dict(type='str', aliases=['name_label']), - uuid=dict(type='str'), - wait_for_ip_address=dict(type='bool', default=False), - state_change_timeout=dict(type='int', default=0), - ) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[ - ['name', 'uuid'], - ], - ) - - result = {'failed': False, 'changed': False} - - # Module will exit with an error message if no VM is found. - vm = XenServerVM(module) - - # Set VM power state. - if module.params['state'] != "present": - result['changed'] = vm.set_power_state(module.params['state']) - - if module.params['wait_for_ip_address']: - vm.wait_for_ip_address() - - result['instance'] = vm.gather_facts() - - if result['failed']: - module.fail_json(**result) - else: - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/cloud_init_data_facts.py b/plugins/modules/cloud_init_data_facts.py similarity index 53% rename from plugins/modules/cloud/misc/cloud_init_data_facts.py rename to plugins/modules/cloud_init_data_facts.py index 1b44c50cbe..8da427fa2e 100644 --- a/plugins/modules/cloud/misc/cloud_init_data_facts.py +++ b/plugins/modules/cloud_init_data_facts.py @@ -1,31 +1,32 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, René Moser +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: cloud_init_data_facts -short_description: Retrieve facts of cloud-init. +short_description: Retrieve facts of cloud-init description: - - Gathers facts by reading the status.json and result.json of cloud-init. + - Gathers facts by reading the C(status.json) and C(result.json) of cloud-init. author: René Moser (@resmo) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module options: filter: description: - - Filter facts + - Filter facts. type: str - choices: [ status, result ] + choices: [status, result] notes: - See http://cloudinit.readthedocs.io/ for more information about cloud-init. -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather all facts of cloud init community.general.cloud_init_data_facts: register: result @@ -40,47 +41,49 @@ EXAMPLES = ''' until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage" retries: 50 delay: 5 -''' +""" -RETURN = ''' ---- +RETURN = r""" cloud_init_data_facts: description: Facts of result and status. returned: success type: dict - sample: '{ - "status": { + sample: + { + "status": { "v1": { - "datasource": "DataSourceCloudStack", - "errors": [] - }, - "result": { - "v1": { - "datasource": "DataSourceCloudStack", - "init": { - "errors": [], - "finished": 1522066377.0185432, - "start": 1522066375.2648022 - }, - "init-local": { - "errors": [], - "finished": 1522066373.70919, - "start": 1522066373.4726632 - }, - "modules-config": { - "errors": [], - "finished": 1522066380.9097016, - "start": 1522066379.0011985 - }, - "modules-final": { - "errors": [], - "finished": 1522066383.56594, - "start": 1522066382.3449218 - }, - "stage": null + "datasource": "DataSourceCloudStack", + "errors": [] } - }' -''' + }, + "result": { + "v1": { + "datasource": "DataSourceCloudStack", + "init": { + "errors": [], + "finished": 1522066377.0185432, + "start": 1522066375.2648022 + }, + "init-local": { + "errors": [], + "finished": 1522066373.70919, + "start": 1522066373.4726632 + }, + "modules-config": { + "errors": [], + "finished": 1522066380.9097016, + "start": 1522066379.0011985 + }, + "modules-final": { + "errors": [], + "finished": 1522066383.56594, + "start": 1522066382.3449218 + }, + "stage": null + } + } + } +""" import os @@ -103,9 +106,8 @@ def gather_cloud_init_data_facts(module): json_file = os.path.join(CLOUD_INIT_PATH, i + '.json') if os.path.exists(json_file): - f = open(json_file, 'rb') - contents = to_text(f.read(), errors='surrogate_or_strict') - f.close() + with open(json_file, 'rb') as f: + contents = to_text(f.read(), errors='surrogate_or_strict') if contents: res['cloud_init_data_facts'][i] = module.from_json(contents) diff --git a/plugins/modules/net_tools/cloudflare_dns.py b/plugins/modules/cloudflare_dns.py similarity index 55% rename from plugins/modules/net_tools/cloudflare_dns.py rename to plugins/modules/cloudflare_dns.py index 4e82e0af36..1398d5873a 100644 --- a/plugins/modules/net_tools/cloudflare_dns.py +++ b/plugins/modules/cloudflare_dns.py @@ -1,159 +1,185 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016 Michael Gruener -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016 Michael Gruener +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: cloudflare_dns author: -- Michael Gruener (@mgruener) -requirements: - - python >= 2.6 + - Michael Gruener (@mgruener) short_description: Manage Cloudflare DNS records description: - - "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)." + - 'Manages DNS records using the Cloudflare API, see the docs: U(https://api.cloudflare.com/).' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: api_token: description: - - API token. - - Required for api token authentication. - - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." - - Can be specified in C(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0. + - API token. + - Required for API token authentication. + - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." + - Can be specified in E(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0. type: str - required: false version_added: '0.2.0' account_api_key: description: - - Account API key. - - Required for api keys authentication. - - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." + - Account API key. + - Required for API keys authentication. + - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." type: str - required: false - aliases: [ account_api_token ] + aliases: [account_api_token] account_email: description: - - Account email. Required for API keys authentication. + - Account email. Required for API keys authentication. type: str - required: false algorithm: description: - - Algorithm number. - - Required for C(type=DS) and C(type=SSHFP) when C(state=present). + - Algorithm number. + - Required for O(type=DS) and O(type=SSHFP) when O(state=present). type: int cert_usage: description: - - Certificate usage number. - - Required for C(type=TLSA) when C(state=present). + - Certificate usage number. + - Required for O(type=TLSA) when O(state=present). type: int - choices: [ 0, 1, 2, 3 ] + choices: [0, 1, 2, 3] + comment: + description: + - Comments or notes about the DNS record. + type: str + version_added: 10.1.0 + flag: + description: + - Issuer Critical Flag. + - Required for O(type=CAA) when O(state=present). + type: int + choices: [0, 1] + version_added: 8.0.0 + tag: + description: + - CAA issue restriction. + - Required for O(type=CAA) when O(state=present). + type: str + choices: [issue, issuewild, iodef] + version_added: 8.0.0 hash_type: description: - - Hash type number. - - Required for C(type=DS), C(type=SSHFP) and C(type=TLSA) when C(state=present). + - Hash type number. + - Required for O(type=DS), O(type=SSHFP) and O(type=TLSA) when O(state=present). type: int - choices: [ 1, 2 ] + choices: [1, 2] key_tag: description: - - DNSSEC key tag. - - Needed for C(type=DS) when C(state=present). + - DNSSEC key tag. + - Needed for O(type=DS) when O(state=present). type: int port: description: - - Service port. - - Required for C(type=SRV) and C(type=TLSA). + - Service port. + - Required for O(type=SRV) and O(type=TLSA). type: int priority: description: - - Record priority. - - Required for C(type=MX) and C(type=SRV) + - Record priority. + - Required for O(type=MX) and O(type=SRV). default: 1 type: int proto: description: - - Service protocol. Required for C(type=SRV) and C(type=TLSA). - - Common values are TCP and UDP. - - Before Ansible 2.6 only TCP and UDP were available. + - Service protocol. Required for O(type=SRV) and O(type=TLSA). + - Common values are TCP and UDP. type: str proxied: description: - - Proxy through Cloudflare network or just use DNS. + - Proxy through Cloudflare network or just use DNS. type: bool - default: no + default: false record: description: - - Record to add. - - Required if C(state=present). - - Default is C(@) (e.g. the zone name). + - Record to add. + - Required if O(state=present). + - Default is V(@) (that is, the zone name). type: str default: '@' - aliases: [ name ] + aliases: [name] selector: description: - - Selector number. - - Required for C(type=TLSA) when C(state=present). - choices: [ 0, 1 ] + - Selector number. + - Required for O(type=TLSA) when O(state=present). + choices: [0, 1] type: int service: description: - - Record service. - - Required for I(type=SRV). + - Record service. + - Required for O(type=SRV). type: str solo: description: - - Whether the record should be the only one for that record type and record name. - - Only use with C(state=present). - - This will delete all other records with the same record name and type. + - Whether the record should be the only one for that record type and record name. + - Only use with O(state=present). + - This deletes all other records with the same record name and type. type: bool state: description: - - Whether the record(s) should exist or not. + - Whether the record(s) should exist or not. type: str - choices: [ absent, present ] + choices: [absent, present] default: present + tags: + description: + - Custom tags for the DNS record. + type: list + elements: str + version_added: 10.1.0 timeout: description: - - Timeout for Cloudflare API calls. + - Timeout for Cloudflare API calls. type: int default: 30 ttl: description: - - The TTL to give the new record. - - Must be between 120 and 2,147,483,647 seconds, or 1 for automatic. + - The TTL to give the new record. + - Must be between V(120) and V(2,147,483,647) seconds, or V(1) for automatic. type: int default: 1 type: description: - - The type of DNS record to create. Required if C(state=present). - - C(type=DS), C(type=SSHFP) and C(type=TLSA) added in Ansible 2.7. + - The type of DNS record to create. Required if O(state=present). + - Support for V(SPF) has been removed from community.general 9.0.0 since that record type is no longer supported by + CloudFlare. + - Support for V(PTR) has been added in community.general 11.1.0. type: str - choices: [ A, AAAA, CNAME, DS, MX, NS, SPF, SRV, SSHFP, TLSA, TXT ] + choices: [A, AAAA, CNAME, DS, MX, NS, SRV, SSHFP, TLSA, CAA, TXT, PTR] value: description: - - The record value. - - Required for C(state=present). + - The record value. + - Required for O(state=present). type: str - aliases: [ content ] + aliases: [content] weight: description: - - Service weight. - - Required for C(type=SRV). + - Service weight. + - Required for O(type=SRV). type: int default: 1 zone: description: - - The name of the Zone to work with (e.g. "example.com"). - - The Zone must already exist. + - The name of the Zone to work with (for example V(example.com)). + - The Zone must already exist. type: str required: true - aliases: [ domain ] -''' + aliases: [domain] +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a test.example.net A record to point to 127.0.0.1 community.general.cloudflare_dns: zone: example.net @@ -172,6 +198,18 @@ EXAMPLES = r''' value: 127.0.0.1 api_token: dummyapitoken +- name: Create a record with comment and tags + community.general.cloudflare_dns: + zone: example.net + record: test + type: A + value: 127.0.0.1 + comment: Local test website + tags: + - test + - local + api_token: dummyapitoken + - name: Create a example.net CNAME record to example.com community.general.cloudflare_dns: zone: example.net @@ -205,7 +243,7 @@ EXAMPLES = r''' zone: example.net type: CNAME value: example.com - proxied: yes + proxied: true account_email: test@example.com account_api_key: dummyapitoken state: present @@ -254,6 +292,15 @@ EXAMPLES = r''' hash_type: 1 value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3 +- name: Create a CAA record subdomain.example.com + community.general.cloudflare_dns: + zone: example.com + record: subdomain + type: CAA + flag: 0 + tag: issue + value: ca.example.com + - name: Create a DS record for subdomain.example.com community.general.cloudflare_dns: zone: example.com @@ -263,111 +310,147 @@ EXAMPLES = r''' algorithm: 8 hash_type: 2 value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB -''' -RETURN = r''' +- name: Create PTR record "1.2.0.192.in-addr.arpa" with value "test.example.com" + community.general.cloudflare_dns: + zone: 2.0.192.in-addr.arpa + record: 1 + type: PTR + value: test.example.com + state: present +""" + +RETURN = r""" record: - description: A dictionary containing the record data. - returned: success, except on record deletion - type: complex - contains: - content: - description: The record content (details depend on record type). - returned: success - type: str - sample: 192.0.2.91 - created_on: - description: The record creation date. - returned: success - type: str - sample: "2016-03-25T19:09:42.516553Z" - data: - description: Additional record data. - returned: success, if type is SRV, DS, SSHFP or TLSA - type: dict - sample: { - name: "jabber", - port: 8080, - priority: 10, - proto: "_tcp", - service: "_xmpp", - target: "jabberhost.sample.com", - weight: 5, - } - id: - description: The record ID. - returned: success - type: str - sample: f9efb0549e96abcb750de63b38c9576e - locked: - description: No documentation available. - returned: success - type: bool - sample: False - meta: - description: No documentation available. - returned: success - type: dict - sample: { auto_added: false } - modified_on: - description: Record modification date. - returned: success - type: str - sample: "2016-03-25T19:09:42.516553Z" - name: - description: The record name as FQDN (including _service and _proto for SRV). - returned: success - type: str - sample: www.sample.com - priority: - description: Priority of the MX record. - returned: success, if type is MX - type: int - sample: 10 - proxiable: - description: Whether this record can be proxied through Cloudflare. - returned: success - type: bool - sample: False - proxied: - description: Whether the record is proxied through Cloudflare. - returned: success - type: bool - sample: False - ttl: - description: The time-to-live for the record. - returned: success - type: int - sample: 300 - type: - description: The record type. - returned: success - type: str - sample: A - zone_id: - description: The ID of the zone containing the record. - returned: success - type: str - sample: abcede0bf9f0066f94029d2e6b73856a - zone_name: - description: The name of the zone containing the record. - returned: success - type: str - sample: sample.com -''' + description: A dictionary containing the record data. + returned: success, except on record deletion + type: complex + contains: + comment: + description: Comments or notes about the DNS record. + returned: success + type: str + sample: Domain verification record + version_added: 10.1.0 + comment_modified_on: + description: When the record comment was last modified. Omitted if there is no comment. + returned: success + type: str + sample: "2024-01-01T05:20:00.12345Z" + version_added: 10.1.0 + content: + description: The record content (details depend on record type). + returned: success + type: str + sample: 192.0.2.91 + created_on: + description: The record creation date. + returned: success + type: str + sample: "2016-03-25T19:09:42.516553Z" + data: + description: Additional record data. + returned: success, if type is SRV, DS, SSHFP TLSA or CAA + type: dict + sample: + { + "name": "jabber", + "port": 8080, + "priority": 10, + "proto": "_tcp", + "service": "_xmpp", + "target": "jabberhost.sample.com", + "weight": 5 + } + id: + description: The record ID. + returned: success + type: str + sample: f9efb0549e96abcb750de63b38c9576e + locked: + description: No documentation available. + returned: success + type: bool + sample: false + meta: + description: Extra Cloudflare-specific information about the record. + returned: success + type: dict + sample: {"auto_added": false} + modified_on: + description: Record modification date. + returned: success + type: str + sample: "2016-03-25T19:09:42.516553Z" + name: + description: The record name as FQDN (including _service and _proto for SRV). + returned: success + type: str + sample: www.sample.com + priority: + description: Priority of the MX record. + returned: success, if type is MX + type: int + sample: 10 + proxiable: + description: Whether this record can be proxied through Cloudflare. + returned: success + type: bool + sample: false + proxied: + description: Whether the record is proxied through Cloudflare. + returned: success + type: bool + sample: false + tags: + description: Custom tags for the DNS record. + returned: success + type: list + elements: str + sample: ["production", "app"] + version_added: 10.1.0 + tags_modified_on: + description: When the record tags were last modified. Omitted if there are no tags. + returned: success + type: str + sample: "2025-01-01T05:20:00.12345Z" + version_added: 10.1.0 + ttl: + description: The time-to-live for the record. + returned: success + type: int + sample: 300 + type: + description: The record type. + returned: success + type: str + sample: A + zone_id: + description: The ID of the zone containing the record. + returned: success + type: str + sample: abcede0bf9f0066f94029d2e6b73856a + zone_name: + description: The name of the zone containing the record. + returned: success + type: str + sample: sample.com +""" import json +from urllib.parse import urlencode from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.urls import fetch_url def lowercase_string(param): - if not isinstance(param, str): - return param - return param.lower() + return param.lower() if isinstance(param, str) else param + + +def join_str(sep, *args): + return sep.join([str(arg) for arg in args]) class CloudflareAPI(object): @@ -382,7 +465,11 @@ class CloudflareAPI(object): self.account_email = module.params['account_email'] self.algorithm = module.params['algorithm'] self.cert_usage = module.params['cert_usage'] + self.comment = module.params['comment'] self.hash_type = module.params['hash_type'] + self.flag = module.params['flag'] + self.tag = module.params['tag'] + self.tags = module.params['tags'] self.key_tag = module.params['key_tag'] self.port = module.params['port'] self.priority = module.params['priority'] @@ -409,29 +496,29 @@ class CloudflareAPI(object): if (self.type == 'AAAA') and (self.value is not None): self.value = self.value.lower() - if (self.type == 'SRV'): + if self.type == 'SRV': if (self.proto is not None) and (not self.proto.startswith('_')): - self.proto = '_' + self.proto + self.proto = '_{0}'.format(self.proto) if (self.service is not None) and (not self.service.startswith('_')): - self.service = '_' + self.service + self.service = '_{0}'.format(self.service) - if (self.type == 'TLSA'): + if self.type == 'TLSA': if (self.proto is not None) and (not self.proto.startswith('_')): - self.proto = '_' + self.proto + self.proto = '_{0}'.format(self.proto) if (self.port is not None): - self.port = '_' + str(self.port) + self.port = '_{0}'.format(self.port) if not self.record.endswith(self.zone): - self.record = self.record + '.' + self.zone + self.record = join_str('.', self.record, self.zone) - if (self.type == 'DS'): + if self.type == 'DS': if self.record == self.zone: self.module.fail_json(msg="DS records only apply to subdomains.") def _cf_simple_api_call(self, api_call, method='GET', payload=None): if self.api_token: headers = { - 'Authorization': 'Bearer ' + self.api_token, + 'Authorization': 'Bearer {0}'.format(self.api_token), 'Content-Type': 'application/json', } else: @@ -481,6 +568,9 @@ class CloudflareAPI(object): try: content = resp.read() except AttributeError: + content = None + + if not content: if info['body']: content = info['body'] else: @@ -528,7 +618,7 @@ class CloudflareAPI(object): else: raw_api_call = api_call while next_page <= pagination['total_pages']: - raw_api_call += '?' + '&'.join(parameters) + raw_api_call += '?{0}'.format('&'.join(parameters)) result, status = self._cf_simple_api_call(raw_api_call, method, payload) data += result['result'] next_page += 1 @@ -553,8 +643,8 @@ class CloudflareAPI(object): name = self.zone param = '' if name: - param = '?' + urlencode({'name': name}) - zones, status = self._cf_api_call('/zones' + param) + param = '?{0}'.format(urlencode({'name': name})) + zones, status = self._cf_api_call('/zones{0}'.format(param)) return zones def get_dns_records(self, zone_name=None, type=None, record=None, value=''): @@ -579,195 +669,212 @@ class CloudflareAPI(object): if value: query['content'] = value if query: - api_call += '?' + urlencode(query) + api_call += '?{0}'.format(urlencode(query)) records, status = self._cf_api_call(api_call) return records - def delete_dns_records(self, **kwargs): - params = {} - for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone', - 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']: - if param in kwargs: - params[param] = kwargs[param] - else: - params[param] = getattr(self, param) - + def delete_dns_records(self, solo): records = [] - content = params['value'] - search_record = params['record'] - if params['type'] == 'SRV': - if not (params['value'] is None or params['value'] == ''): - content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] - search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] - elif params['type'] == 'DS': - if not (params['value'] is None or params['value'] == ''): - content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - elif params['type'] == 'SSHFP': - if not (params['value'] is None or params['value'] == ''): - content = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - elif params['type'] == 'TLSA': - if not (params['value'] is None or params['value'] == ''): - content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - search_record = params['port'] + '.' + params['proto'] + '.' + params['record'] - if params['solo']: + content = self.value + search_record = self.record + if self.type == 'SRV': + if not (self.value is None or self.value == ''): + content = join_str('\t', self.weight, self.port, self.value) + search_record = join_str('.', self.service, self.proto, self.record) + elif self.type == 'DS': + if not (self.value is None or self.value == ''): + content = join_str('\t', self.key_tag, self.algorithm, self.hash_type, self.value) + elif self.type == 'SSHFP': + if not (self.value is None or self.value == ''): + content = join_str(' ', self.algorithm, self.hash_type, self.value.upper()) + elif self.type == 'TLSA': + if not (self.value is None or self.value == ''): + content = join_str('\t', self.cert_usage, self.selector, self.hash_type, self.value) + search_record = join_str('.', self.port, self.proto, self.record) + if solo: search_value = None else: search_value = content - records = self.get_dns_records(params['zone'], params['type'], search_record, search_value) + zone_id = self._get_zone_id(self.zone) + records = self.get_dns_records(self.zone, self.type, search_record, search_value) for rr in records: - if params['solo']: - if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)): + if solo: + if not ((rr['type'] == self.type) and (rr['name'] == search_record) and (rr['content'] == content)): self.changed = True if not self.module.check_mode: - result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE') + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, rr['id']), 'DELETE') else: self.changed = True if not self.module.check_mode: - result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE') + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, rr['id']), 'DELETE') return self.changed - def ensure_dns_record(self, **kwargs): - params = {} - for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone', - 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']: - if param in kwargs: - params[param] = kwargs[param] - else: - params[param] = getattr(self, param) - - search_value = params['value'] - search_record = params['record'] + def ensure_dns_record(self): + search_value = self.value + search_record = self.record new_record = None - if (params['type'] is None) or (params['record'] is None): - self.module.fail_json(msg="You must provide a type and a record to create a new record") - if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']): - if not params['value']: + if self.type in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'PTR']: + if not self.value: self.module.fail_json(msg="You must provide a non-empty value to create this record type") # there can only be one CNAME per record # ignoring the value when searching for existing # CNAME records allows us to update the value if it # changes - if params['type'] == 'CNAME': + if self.type == 'CNAME': search_value = None new_record = { - "type": params['type'], - "name": params['record'], - "content": params['value'], - "ttl": params['ttl'] + "type": self.type, + "name": self.record, + "content": self.value, + "ttl": self.ttl } - if (params['type'] in ['A', 'AAAA', 'CNAME']): - new_record["proxied"] = params["proxied"] + if self.type in ['A', 'AAAA', 'CNAME']: + new_record["proxied"] = self.proxied - if params['type'] == 'MX': - for attr in [params['priority'], params['value']]: + if self.type == 'MX': + for attr in [self.priority, self.value]: if (attr is None) or (attr == ''): self.module.fail_json(msg="You must provide priority and a value to create this record type") new_record = { - "type": params['type'], - "name": params['record'], - "content": params['value'], - "priority": params['priority'], - "ttl": params['ttl'] + "type": self.type, + "name": self.record, + "content": self.value, + "priority": self.priority, + "ttl": self.ttl } - if params['type'] == 'SRV': - for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]: + if self.type == 'SRV': + for attr in [self.port, self.priority, self.proto, self.service, self.weight, self.value]: if (attr is None) or (attr == ''): self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type") srv_data = { - "target": params['value'], - "port": params['port'], - "weight": params['weight'], - "priority": params['priority'], - "name": params['record'][:-len('.' + params['zone'])], - "proto": params['proto'], - "service": params['service'] + "target": self.value, + "port": self.port, + "weight": self.weight, + "priority": self.priority, } - new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data} - search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] - search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] - if params['type'] == 'DS': - for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]: + new_record = { + "type": self.type, + "name": join_str('.', self.service, self.proto, self.record), + "ttl": self.ttl, + 'data': srv_data, + } + search_value = join_str('\t', self.weight, self.port, self.value) + search_record = join_str('.', self.service, self.proto, self.record) + + if self.type == 'DS': + for attr in [self.key_tag, self.algorithm, self.hash_type, self.value]: if (attr is None) or (attr == ''): self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type") ds_data = { - "key_tag": params['key_tag'], - "algorithm": params['algorithm'], - "digest_type": params['hash_type'], - "digest": params['value'], + "key_tag": self.key_tag, + "algorithm": self.algorithm, + "digest_type": self.hash_type, + "digest": self.value, } new_record = { - "type": params['type'], - "name": params['record'], + "type": self.type, + "name": self.record, 'data': ds_data, - "ttl": params['ttl'], + "ttl": self.ttl, } - search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + search_value = join_str('\t', self.key_tag, self.algorithm, self.hash_type, self.value) - if params['type'] == 'SSHFP': - for attr in [params['algorithm'], params['hash_type'], params['value']]: + if self.type == 'SSHFP': + for attr in [self.algorithm, self.hash_type, self.value]: if (attr is None) or (attr == ''): self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type") sshfp_data = { - "fingerprint": params['value'], - "type": params['hash_type'], - "algorithm": params['algorithm'], + "fingerprint": self.value.upper(), + "type": self.hash_type, + "algorithm": self.algorithm, } new_record = { - "type": params['type'], - "name": params['record'], + "type": self.type, + "name": self.record, 'data': sshfp_data, - "ttl": params['ttl'], + "ttl": self.ttl, } - search_value = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + search_value = join_str(' ', self.algorithm, self.hash_type, self.value) - if params['type'] == 'TLSA': - for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]: + if self.type == 'TLSA': + for attr in [self.port, self.proto, self.cert_usage, self.selector, self.hash_type, self.value]: if (attr is None) or (attr == ''): self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type") - search_record = params['port'] + '.' + params['proto'] + '.' + params['record'] + search_record = join_str('.', self.port, self.proto, self.record) tlsa_data = { - "usage": params['cert_usage'], - "selector": params['selector'], - "matching_type": params['hash_type'], - "certificate": params['value'], + "usage": self.cert_usage, + "selector": self.selector, + "matching_type": self.hash_type, + "certificate": self.value, } new_record = { - "type": params['type'], + "type": self.type, "name": search_record, 'data': tlsa_data, - "ttl": params['ttl'], + "ttl": self.ttl, } - search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + search_value = join_str('\t', self.cert_usage, self.selector, self.hash_type, self.value) - zone_id = self._get_zone_id(params['zone']) - records = self.get_dns_records(params['zone'], params['type'], search_record, search_value) + if self.type == 'CAA': + for attr in [self.flag, self.tag, self.value]: + if attr == '': + self.module.fail_json(msg="You must provide flag, tag and a value to create this record type") + caa_data = { + "flags": self.flag, + "tag": self.tag, + "value": self.value, + } + new_record = { + "type": self.type, + "name": self.record, + 'data': caa_data, + "ttl": self.ttl, + } + search_value = None + + new_record['comment'] = self.comment or None + new_record['tags'] = self.tags or [] + + zone_id = self._get_zone_id(self.zone) + records = self.get_dns_records(self.zone, self.type, search_record, search_value) # in theory this should be impossible as cloudflare does not allow # the creation of duplicate records but lets cover it anyways if len(records) > 1: - self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!") + # As Cloudflare API cannot filter record containing quotes + # CAA records must be compared locally + if self.type == 'CAA': + for rr in records: + if rr['data']['flags'] == caa_data['flags'] and rr['data']['tag'] == caa_data['tag'] and rr['data']['value'] == caa_data['value']: + return rr, self.changed + else: + self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!") # record already exists, check if it must be updated if len(records) == 1: cur_record = records[0] do_update = False - if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']): + if (self.ttl is not None) and (cur_record['ttl'] != self.ttl): do_update = True - if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']): + if (self.priority is not None) and ('priority' in cur_record) and (cur_record['priority'] != self.priority): do_update = True - if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']): + if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != self.proxied): do_update = True if ('data' in new_record) and ('data' in cur_record): - if (cur_record['data'] != new_record['data']): + if cur_record['data'] != new_record['data']: do_update = True - if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']): + if (self.type == 'CNAME') and (cur_record['content'] != new_record['content']): + do_update = True + if cur_record['comment'] != new_record['comment']: + do_update = True + if sorted(cur_record['tags']) != sorted(new_record['tags']): do_update = True if do_update: if self.module.check_mode: @@ -789,19 +896,18 @@ class CloudflareAPI(object): def main(): module = AnsibleModule( argument_spec=dict( - api_token=dict( - type="str", - required=False, - no_log=True, - fallback=(env_fallback, ["CLOUDFLARE_TOKEN"]), - ), - account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']), - account_email=dict(type='str', required=False), + api_token=dict(type="str", no_log=True, fallback=(env_fallback, ["CLOUDFLARE_TOKEN"])), + account_api_key=dict(type='str', no_log=True, aliases=['account_api_token']), + account_email=dict(type='str'), algorithm=dict(type='int'), cert_usage=dict(type='int', choices=[0, 1, 2, 3]), + comment=dict(type='str'), hash_type=dict(type='int', choices=[1, 2]), key_tag=dict(type='int', no_log=False), port=dict(type='int'), + flag=dict(type='int', choices=[0, 1]), + tag=dict(type='str', choices=['issue', 'issuewild', 'iodef']), + tags=dict(type='list', elements='str'), priority=dict(type='int', default=1), proto=dict(type='str'), proxied=dict(type='bool', default=False), @@ -812,7 +918,7 @@ def main(): state=dict(type='str', default='present', choices=['absent', 'present']), timeout=dict(type='int', default=30), ttl=dict(type='int', default=1), - type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SPF', 'SRV', 'SSHFP', 'TLSA', 'TXT']), + type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SRV', 'SSHFP', 'TLSA', 'CAA', 'TXT', 'PTR']), value=dict(type='str', aliases=['content']), weight=dict(type='int', default=1), zone=dict(type='str', required=True, aliases=['domain']), @@ -823,11 +929,16 @@ def main(): ('state', 'absent', ['record']), ('type', 'SRV', ['proto', 'service']), ('type', 'TLSA', ['proto', 'port']), + ('type', 'CAA', ['flag', 'tag']), + ], + required_together=[ + ('account_api_key', 'account_email'), + ], + required_one_of=[ + ['api_token', 'account_api_key'], ], ) - if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']): - module.fail_json(msg="Either api_token or account_api_key and account_email params are required.") if module.params['type'] == 'SRV': if not ((module.params['weight'] is not None and module.params['port'] is not None and not (module.params['value'] is None or module.params['value'] == '')) @@ -849,6 +960,13 @@ def main(): and (module.params['value'] is None or module.params['value'] == ''))): module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.") + if module.params['type'] == 'CAA': + if not ((module.params['flag'] is not None and module.params['tag'] is not None + and not (module.params['value'] is None or module.params['value'] == '')) + or (module.params['flag'] is None and module.params['tag'] is None + and (module.params['value'] is None or module.params['value'] == ''))): + module.fail_json(msg="For CAA records the params flag, tag and value all need to be defined, or not at all.") + if module.params['type'] == 'DS': if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None and not (module.params['value'] is None or module.params['value'] == '')) diff --git a/plugins/modules/clustering/consul/consul_acl.py b/plugins/modules/clustering/consul/consul_acl.py deleted file mode 100644 index 1e01e58af5..0000000000 --- a/plugins/modules/clustering/consul/consul_acl.py +++ /dev/null @@ -1,683 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2015, Steve Gargan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: consul_acl -short_description: Manipulate Consul ACL keys and rules -description: - - Allows the addition, modification and deletion of ACL keys and associated - rules in a consul cluster via the agent. For more details on using and - configuring ACLs, see https://www.consul.io/docs/guides/acl.html. -author: - - Steve Gargan (@sgargan) - - Colin Nolan (@colin-nolan) -options: - mgmt_token: - description: - - a management token is required to manipulate the acl lists - required: true - type: str - state: - description: - - whether the ACL pair should be present or absent - required: false - choices: ['present', 'absent'] - default: present - type: str - token_type: - description: - - the type of token that should be created - choices: ['client', 'management'] - default: client - type: str - name: - description: - - the name that should be associated with the acl key, this is opaque - to Consul - required: false - type: str - token: - description: - - the token key identifying an ACL rule set. If generated by consul - this will be a UUID - required: false - type: str - rules: - type: list - elements: dict - description: - - rules that should be associated with a given token - required: false - host: - description: - - host of the consul agent defaults to localhost - required: false - default: localhost - type: str - port: - type: int - description: - - the port on which the consul agent is running - required: false - default: 8500 - scheme: - description: - - the protocol scheme on which the consul agent is running - required: false - default: http - type: str - validate_certs: - type: bool - description: - - whether to verify the tls certificate of the consul agent - required: false - default: True -requirements: - - python-consul - - pyhcl - - requests -''' - -EXAMPLES = """ -- name: Create an ACL with rules - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - name: Foo access - rules: - - key: "foo" - policy: read - - key: "private/foo" - policy: deny - -- name: Create an ACL with a specific token - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - name: Foo access - token: my-token - rules: - - key: "foo" - policy: read - -- name: Update the rules associated to an ACL token - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - name: Foo access - token: some_client_token - rules: - - event: "bbq" - policy: write - - key: "foo" - policy: read - - key: "private" - policy: deny - - keyring: write - - node: "hgs4" - policy: write - - operator: read - - query: "" - policy: write - - service: "consul" - policy: write - - session: "standup" - policy: write - -- name: Remove a token - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e - state: absent -""" - -RETURN = """ -token: - description: the token associated to the ACL (the ACL's ID) - returned: success - type: str - sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da -rules: - description: the HCL JSON representation of the rules associated to the ACL, in the format described in the - Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification). - returned: I(status) == "present" - type: str - sample: { - "key": { - "foo": { - "policy": "write" - }, - "bar": { - "policy": "deny" - } - } - } -operation: - description: the operation performed on the ACL - returned: changed - type: str - sample: update -""" - - -try: - import consul - python_consul_installed = True -except ImportError: - python_consul_installed = False - -try: - import hcl - pyhcl_installed = True -except ImportError: - pyhcl_installed = False - -try: - from requests.exceptions import ConnectionError - has_requests = True -except ImportError: - has_requests = False - -from collections import defaultdict -from ansible.module_utils.basic import to_text, AnsibleModule - - -RULE_SCOPES = [ - "agent", - "agent_prefix", - "event", - "event_prefix", - "key", - "key_prefix", - "keyring", - "node", - "node_prefix", - "operator", - "query", - "query_prefix", - "service", - "service_prefix", - "session", - "session_prefix", -] - -MANAGEMENT_PARAMETER_NAME = "mgmt_token" -HOST_PARAMETER_NAME = "host" -SCHEME_PARAMETER_NAME = "scheme" -VALIDATE_CERTS_PARAMETER_NAME = "validate_certs" -NAME_PARAMETER_NAME = "name" -PORT_PARAMETER_NAME = "port" -RULES_PARAMETER_NAME = "rules" -STATE_PARAMETER_NAME = "state" -TOKEN_PARAMETER_NAME = "token" -TOKEN_TYPE_PARAMETER_NAME = "token_type" - -PRESENT_STATE_VALUE = "present" -ABSENT_STATE_VALUE = "absent" - -CLIENT_TOKEN_TYPE_VALUE = "client" -MANAGEMENT_TOKEN_TYPE_VALUE = "management" - -REMOVE_OPERATION = "remove" -UPDATE_OPERATION = "update" -CREATE_OPERATION = "create" - -_POLICY_JSON_PROPERTY = "policy" -_RULES_JSON_PROPERTY = "Rules" -_TOKEN_JSON_PROPERTY = "ID" -_TOKEN_TYPE_JSON_PROPERTY = "Type" -_NAME_JSON_PROPERTY = "Name" -_POLICY_YML_PROPERTY = "policy" -_POLICY_HCL_PROPERTY = "policy" - -_ARGUMENT_SPEC = { - MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True), - HOST_PARAMETER_NAME: dict(default='localhost'), - SCHEME_PARAMETER_NAME: dict(default='http'), - VALIDATE_CERTS_PARAMETER_NAME: dict(type='bool', default=True), - NAME_PARAMETER_NAME: dict(), - PORT_PARAMETER_NAME: dict(default=8500, type='int'), - RULES_PARAMETER_NAME: dict(type='list', elements='dict'), - STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]), - TOKEN_PARAMETER_NAME: dict(no_log=False), - TOKEN_TYPE_PARAMETER_NAME: dict(choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE], - default=CLIENT_TOKEN_TYPE_VALUE) -} - - -def set_acl(consul_client, configuration): - """ - Sets an ACL based on the given configuration. - :param consul_client: the consul client - :param configuration: the run configuration - :return: the output of setting the ACL - """ - acls_as_json = decode_acls_as_json(consul_client.acl.list()) - existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None) - existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json) - if None in existing_acls_mapped_by_token: - raise AssertionError("expecting ACL list to be associated to a token: %s" % - existing_acls_mapped_by_token[None]) - - if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name: - # No token but name given so can get token from name - configuration.token = existing_acls_mapped_by_name[configuration.name].token - - if configuration.token and configuration.token in existing_acls_mapped_by_token: - return update_acl(consul_client, configuration) - else: - if configuration.token in existing_acls_mapped_by_token: - raise AssertionError() - if configuration.name in existing_acls_mapped_by_name: - raise AssertionError() - return create_acl(consul_client, configuration) - - -def update_acl(consul_client, configuration): - """ - Updates an ACL. - :param consul_client: the consul client - :param configuration: the run configuration - :return: the output of the update - """ - existing_acl = load_acl_with_token(consul_client, configuration.token) - changed = existing_acl.rules != configuration.rules - - if changed: - name = configuration.name if configuration.name is not None else existing_acl.name - rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) - updated_token = consul_client.acl.update( - configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl) - if updated_token != configuration.token: - raise AssertionError() - - return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION) - - -def create_acl(consul_client, configuration): - """ - Creates an ACL. - :param consul_client: the consul client - :param configuration: the run configuration - :return: the output of the creation - """ - rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None - token = consul_client.acl.create( - name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token) - rules = configuration.rules - return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION) - - -def remove_acl(consul, configuration): - """ - Removes an ACL. - :param consul: the consul client - :param configuration: the run configuration - :return: the output of the removal - """ - token = configuration.token - changed = consul.acl.info(token) is not None - if changed: - consul.acl.destroy(token) - return Output(changed=changed, token=token, operation=REMOVE_OPERATION) - - -def load_acl_with_token(consul, token): - """ - Loads the ACL with the given token (token == rule ID). - :param consul: the consul client - :param token: the ACL "token"/ID (not name) - :return: the ACL associated to the given token - :exception ConsulACLTokenNotFoundException: raised if the given token does not exist - """ - acl_as_json = consul.acl.info(token) - if acl_as_json is None: - raise ConsulACLNotFoundException(token) - return decode_acl_as_json(acl_as_json) - - -def encode_rules_as_hcl_string(rules): - """ - Converts the given rules into the equivalent HCL (string) representation. - :param rules: the rules - :return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal - note for justification) - """ - if len(rules) == 0: - # Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty - # string if there is no rules... - return None - rules_as_hcl = "" - for rule in rules: - rules_as_hcl += encode_rule_as_hcl_string(rule) - return rules_as_hcl - - -def encode_rule_as_hcl_string(rule): - """ - Converts the given rule into the equivalent HCL (string) representation. - :param rule: the rule - :return: the equivalent HCL (string) representation of the rule - """ - if rule.pattern is not None: - return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy) - else: - return '%s = "%s"\n' % (rule.scope, rule.policy) - - -def decode_rules_as_hcl_string(rules_as_hcl): - """ - Converts the given HCL (string) representation of rules into a list of rule domain models. - :param rules_as_hcl: the HCL (string) representation of a collection of rules - :return: the equivalent domain model to the given rules - """ - rules_as_hcl = to_text(rules_as_hcl) - rules_as_json = hcl.loads(rules_as_hcl) - return decode_rules_as_json(rules_as_json) - - -def decode_rules_as_json(rules_as_json): - """ - Converts the given JSON representation of rules into a list of rule domain models. - :param rules_as_json: the JSON representation of a collection of rules - :return: the equivalent domain model to the given rules - """ - rules = RuleCollection() - for scope in rules_as_json: - if not isinstance(rules_as_json[scope], dict): - rules.add(Rule(scope, rules_as_json[scope])) - else: - for pattern, policy in rules_as_json[scope].items(): - rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern)) - return rules - - -def encode_rules_as_json(rules): - """ - Converts the given rules into the equivalent JSON representation according to the documentation: - https://www.consul.io/docs/guides/acl.html#rule-specification. - :param rules: the rules - :return: JSON representation of the given rules - """ - rules_as_json = defaultdict(dict) - for rule in rules: - if rule.pattern is not None: - if rule.pattern in rules_as_json[rule.scope]: - raise AssertionError() - rules_as_json[rule.scope][rule.pattern] = { - _POLICY_JSON_PROPERTY: rule.policy - } - else: - if rule.scope in rules_as_json: - raise AssertionError() - rules_as_json[rule.scope] = rule.policy - return rules_as_json - - -def decode_rules_as_yml(rules_as_yml): - """ - Converts the given YAML representation of rules into a list of rule domain models. - :param rules_as_yml: the YAML representation of a collection of rules - :return: the equivalent domain model to the given rules - """ - rules = RuleCollection() - if rules_as_yml: - for rule_as_yml in rules_as_yml: - rule_added = False - for scope in RULE_SCOPES: - if scope in rule_as_yml: - if rule_as_yml[scope] is None: - raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope) - policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \ - else rule_as_yml[scope] - pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None - rules.add(Rule(scope, policy, pattern)) - rule_added = True - break - if not rule_added: - raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES))) - return rules - - -def decode_acl_as_json(acl_as_json): - """ - Converts the given JSON representation of an ACL into the equivalent domain model. - :param acl_as_json: the JSON representation of an ACL - :return: the equivalent domain model to the given ACL - """ - rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY] - rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \ - else RuleCollection() - return ACL( - rules=rules, - token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY], - token=acl_as_json[_TOKEN_JSON_PROPERTY], - name=acl_as_json[_NAME_JSON_PROPERTY] - ) - - -def decode_acls_as_json(acls_as_json): - """ - Converts the given JSON representation of ACLs into a list of ACL domain models. - :param acls_as_json: the JSON representation of a collection of ACLs - :return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same) - """ - return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json] - - -class ConsulACLNotFoundException(Exception): - """ - Exception raised if an ACL with is not found. - """ - - -class Configuration: - """ - Configuration for this module. - """ - - def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None, - rules=None, state=None, token=None, token_type=None): - self.management_token = management_token # type: str - self.host = host # type: str - self.scheme = scheme # type: str - self.validate_certs = validate_certs # type: bool - self.name = name # type: str - self.port = port # type: int - self.rules = rules # type: RuleCollection - self.state = state # type: str - self.token = token # type: str - self.token_type = token_type # type: str - - -class Output: - """ - Output of an action of this module. - """ - - def __init__(self, changed=None, token=None, rules=None, operation=None): - self.changed = changed # type: bool - self.token = token # type: str - self.rules = rules # type: RuleCollection - self.operation = operation # type: str - - -class ACL: - """ - Consul ACL. See: https://www.consul.io/docs/guides/acl.html. - """ - - def __init__(self, rules, token_type, token, name): - self.rules = rules - self.token_type = token_type - self.token = token - self.name = name - - def __eq__(self, other): - return other \ - and isinstance(other, self.__class__) \ - and self.rules == other.rules \ - and self.token_type == other.token_type \ - and self.token == other.token \ - and self.name == other.name - - def __hash__(self): - return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name) - - -class Rule: - """ - ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope. - """ - - def __init__(self, scope, policy, pattern=None): - self.scope = scope - self.policy = policy - self.pattern = pattern - - def __eq__(self, other): - return isinstance(other, self.__class__) \ - and self.scope == other.scope \ - and self.policy == other.policy \ - and self.pattern == other.pattern - - def __ne__(self, other): - return not self.__eq__(other) - - def __hash__(self): - return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern) - - def __str__(self): - return encode_rule_as_hcl_string(self) - - -class RuleCollection: - """ - Collection of ACL rules, which are part of a Consul ACL. - """ - - def __init__(self): - self._rules = {} - for scope in RULE_SCOPES: - self._rules[scope] = {} - - def __iter__(self): - all_rules = [] - for scope, pattern_keyed_rules in self._rules.items(): - for pattern, rule in pattern_keyed_rules.items(): - all_rules.append(rule) - return iter(all_rules) - - def __len__(self): - count = 0 - for scope in RULE_SCOPES: - count += len(self._rules[scope]) - return count - - def __eq__(self, other): - return isinstance(other, self.__class__) \ - and set(self) == set(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __str__(self): - return encode_rules_as_hcl_string(self) - - def add(self, rule): - """ - Adds the given rule to this collection. - :param rule: model of a rule - :raises ValueError: raised if there already exists a rule for a given scope and pattern - """ - if rule.pattern in self._rules[rule.scope]: - patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else "" - raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info)) - self._rules[rule.scope][rule.pattern] = rule - - -def get_consul_client(configuration): - """ - Gets a Consul client for the given configuration. - - Does not check if the Consul client can connect. - :param configuration: the run configuration - :return: Consul client - """ - token = configuration.management_token - if token is None: - token = configuration.token - if token is None: - raise AssertionError("Expecting the management token to always be set") - return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme, - verify=configuration.validate_certs, token=token) - - -def check_dependencies(): - """ - Checks that the required dependencies have been imported. - :exception ImportError: if it is detected that any of the required dependencies have not been imported - """ - if not python_consul_installed: - raise ImportError("python-consul required for this module. " - "See: https://python-consul.readthedocs.io/en/latest/#installation") - - if not pyhcl_installed: - raise ImportError("pyhcl required for this module. " - "See: https://pypi.org/project/pyhcl/") - - if not has_requests: - raise ImportError("requests required for this module. See https://pypi.org/project/requests/") - - -def main(): - """ - Main method. - """ - module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False) - - try: - check_dependencies() - except ImportError as e: - module.fail_json(msg=str(e)) - - configuration = Configuration( - management_token=module.params.get(MANAGEMENT_PARAMETER_NAME), - host=module.params.get(HOST_PARAMETER_NAME), - scheme=module.params.get(SCHEME_PARAMETER_NAME), - validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME), - name=module.params.get(NAME_PARAMETER_NAME), - port=module.params.get(PORT_PARAMETER_NAME), - rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)), - state=module.params.get(STATE_PARAMETER_NAME), - token=module.params.get(TOKEN_PARAMETER_NAME), - token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME) - ) - consul_client = get_consul_client(configuration) - - try: - if configuration.state == PRESENT_STATE_VALUE: - output = set_acl(consul_client, configuration) - else: - output = remove_acl(consul_client, configuration) - except ConnectionError as e: - module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( - configuration.host, configuration.port, str(e))) - raise - - return_values = dict(changed=output.changed, token=output.token, operation=output.operation) - if output.rules is not None: - return_values["rules"] = encode_rules_as_json(output.rules) - module.exit_json(**return_values) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/clustering/consul/consul_session.py b/plugins/modules/clustering/consul/consul_session.py deleted file mode 100644 index 7ace1f89a8..0000000000 --- a/plugins/modules/clustering/consul/consul_session.py +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Steve Gargan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: consul_session -short_description: Manipulate consul sessions -description: - - Allows the addition, modification and deletion of sessions in a consul - cluster. These sessions can then be used in conjunction with key value pairs - to implement distributed locks. In depth documentation for working with - sessions can be found at http://www.consul.io/docs/internals/sessions.html -requirements: - - python-consul - - requests -author: -- Steve Gargan (@sgargan) -options: - id: - description: - - ID of the session, required when I(state) is either C(info) or - C(remove). - type: str - state: - description: - - Whether the session should be present i.e. created if it doesn't - exist, or absent, removed if present. If created, the I(id) for the - session is returned in the output. If C(absent), I(id) is - required to remove the session. Info for a single session, all the - sessions for a node or all available sessions can be retrieved by - specifying C(info), C(node) or C(list) for the I(state); for C(node) - or C(info), the node I(name) or session I(id) is required as parameter. - choices: [ absent, info, list, node, present ] - type: str - default: present - name: - description: - - The name that should be associated with the session. Required when - I(state=node) is used. - type: str - delay: - description: - - The optional lock delay that can be attached to the session when it - is created. Locks for invalidated sessions ar blocked from being - acquired until this delay has expired. Durations are in seconds. - type: int - default: 15 - node: - description: - - The name of the node that with which the session will be associated. - by default this is the name of the agent. - type: str - datacenter: - description: - - The name of the datacenter in which the session exists or should be - created. - type: str - checks: - description: - - Checks that will be used to verify the session health. If - all the checks fail, the session will be invalidated and any locks - associated with the session will be release and can be acquired once - the associated lock delay has expired. - type: list - elements: str - host: - description: - - The host of the consul agent defaults to localhost. - type: str - default: localhost - port: - description: - - The port on which the consul agent is running. - type: int - default: 8500 - scheme: - description: - - The protocol scheme on which the consul agent is running. - type: str - default: http - validate_certs: - description: - - Whether to verify the TLS certificate of the consul agent. - type: bool - default: True - behavior: - description: - - The optional behavior that can be attached to the session when it - is created. This controls the behavior when a session is invalidated. - choices: [ delete, release ] - type: str - default: release -''' - -EXAMPLES = ''' -- name: Register basic session with consul - community.general.consul_session: - name: session1 - -- name: Register a session with an existing check - community.general.consul_session: - name: session_with_check - checks: - - existing_check_name - -- name: Register a session with lock_delay - community.general.consul_session: - name: session_with_delay - delay: 20s - -- name: Retrieve info about session by id - community.general.consul_session: - id: session_id - state: info - -- name: Retrieve active sessions - community.general.consul_session: - state: list -''' - -try: - import consul - from requests.exceptions import ConnectionError - python_consul_installed = True -except ImportError: - python_consul_installed = False - -from ansible.module_utils.basic import AnsibleModule - - -def execute(module): - - state = module.params.get('state') - - if state in ['info', 'list', 'node']: - lookup_sessions(module) - elif state == 'present': - update_session(module) - else: - remove_session(module) - - -def lookup_sessions(module): - - datacenter = module.params.get('datacenter') - - state = module.params.get('state') - consul_client = get_consul_api(module) - try: - if state == 'list': - sessions_list = consul_client.session.list(dc=datacenter) - # Ditch the index, this can be grabbed from the results - if sessions_list and len(sessions_list) >= 2: - sessions_list = sessions_list[1] - module.exit_json(changed=True, - sessions=sessions_list) - elif state == 'node': - node = module.params.get('node') - sessions = consul_client.session.node(node, dc=datacenter) - module.exit_json(changed=True, - node=node, - sessions=sessions) - elif state == 'info': - session_id = module.params.get('id') - - session_by_id = consul_client.session.info(session_id, dc=datacenter) - module.exit_json(changed=True, - session_id=session_id, - sessions=session_by_id) - - except Exception as e: - module.fail_json(msg="Could not retrieve session info %s" % e) - - -def update_session(module): - - name = module.params.get('name') - delay = module.params.get('delay') - checks = module.params.get('checks') - datacenter = module.params.get('datacenter') - node = module.params.get('node') - behavior = module.params.get('behavior') - - consul_client = get_consul_api(module) - - try: - session = consul_client.session.create( - name=name, - behavior=behavior, - node=node, - lock_delay=delay, - dc=datacenter, - checks=checks - ) - module.exit_json(changed=True, - session_id=session, - name=name, - behavior=behavior, - delay=delay, - checks=checks, - node=node) - except Exception as e: - module.fail_json(msg="Could not create/update session %s" % e) - - -def remove_session(module): - session_id = module.params.get('id') - - consul_client = get_consul_api(module) - - try: - consul_client.session.destroy(session_id) - - module.exit_json(changed=True, - session_id=session_id) - except Exception as e: - module.fail_json(msg="Could not remove session with id '%s' %s" % ( - session_id, e)) - - -def get_consul_api(module): - return consul.Consul(host=module.params.get('host'), - port=module.params.get('port'), - scheme=module.params.get('scheme'), - verify=module.params.get('validate_certs')) - - -def test_dependencies(module): - if not python_consul_installed: - module.fail_json(msg="python-consul required for this module. " - "see https://python-consul.readthedocs.io/en/latest/#installation") - - -def main(): - argument_spec = dict( - checks=dict(type='list', elements='str'), - delay=dict(type='int', default='15'), - behavior=dict(type='str', default='release', choices=['release', 'delete']), - host=dict(type='str', default='localhost'), - port=dict(type='int', default=8500), - scheme=dict(type='str', default='http'), - validate_certs=dict(type='bool', default=True), - id=dict(type='str'), - name=dict(type='str'), - node=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']), - datacenter=dict(type='str'), - ) - - module = AnsibleModule( - argument_spec=argument_spec, - required_if=[ - ('state', 'node', ['name']), - ('state', 'info', ['id']), - ('state', 'remove', ['id']), - ], - supports_check_mode=False - ) - - test_dependencies(module) - - try: - execute(module) - except ConnectionError as e: - module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( - module.params.get('host'), module.params.get('port'), e)) - except Exception as e: - module.fail_json(msg=str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clustering/nomad/nomad_job_info.py b/plugins/modules/clustering/nomad/nomad_job_info.py deleted file mode 100644 index 3d15712fda..0000000000 --- a/plugins/modules/clustering/nomad/nomad_job_info.py +++ /dev/null @@ -1,344 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2020, FERREIRA Christophe -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: nomad_job_info -author: FERREIRA Christophe (@chris93111) -version_added: "1.3.0" -short_description: Get Nomad Jobs info -description: - - Get info for one Nomad job. - - List Nomad jobs. -requirements: - - python-nomad -extends_documentation_fragment: - - community.general.nomad -options: - name: - description: - - Name of job for Get info. - - If not specified, lists all jobs. - type: str -notes: - - C(check_mode) is supported. -seealso: - - name: Nomad jobs documentation - description: Complete documentation for Nomad API jobs. - link: https://www.nomadproject.io/api-docs/jobs/ -''' - -EXAMPLES = ''' -- name: Get info for job awx - community.general.nomad_job_info: - host: localhost - name: awx - register: result - -- name: List Nomad jobs - community.general.nomad_job_info: - host: localhost - register: result - -''' - -RETURN = ''' -result: - description: List with dictionary contains jobs info - returned: success - type: list - sample: [ - { - "Affinities": null, - "AllAtOnce": false, - "Constraints": null, - "ConsulToken": "", - "CreateIndex": 13, - "Datacenters": [ - "dc1" - ], - "Dispatched": false, - "ID": "example", - "JobModifyIndex": 13, - "Meta": null, - "ModifyIndex": 13, - "Multiregion": null, - "Name": "example", - "Namespace": "default", - "NomadTokenID": "", - "ParameterizedJob": null, - "ParentID": "", - "Payload": null, - "Periodic": null, - "Priority": 50, - "Region": "global", - "Spreads": null, - "Stable": false, - "Status": "pending", - "StatusDescription": "", - "Stop": false, - "SubmitTime": 1602244370615307000, - "TaskGroups": [ - { - "Affinities": null, - "Constraints": null, - "Count": 1, - "EphemeralDisk": { - "Migrate": false, - "SizeMB": 300, - "Sticky": false - }, - "Meta": null, - "Migrate": { - "HealthCheck": "checks", - "HealthyDeadline": 300000000000, - "MaxParallel": 1, - "MinHealthyTime": 10000000000 - }, - "Name": "cache", - "Networks": null, - "ReschedulePolicy": { - "Attempts": 0, - "Delay": 30000000000, - "DelayFunction": "exponential", - "Interval": 0, - "MaxDelay": 3600000000000, - "Unlimited": true - }, - "RestartPolicy": { - "Attempts": 3, - "Delay": 15000000000, - "Interval": 1800000000000, - "Mode": "fail" - }, - "Scaling": null, - "Services": null, - "ShutdownDelay": null, - "Spreads": null, - "StopAfterClientDisconnect": null, - "Tasks": [ - { - "Affinities": null, - "Artifacts": null, - "CSIPluginConfig": null, - "Config": { - "image": "redis:3.2", - "port_map": [ - { - "db": 6379.0 - } - ] - }, - "Constraints": null, - "DispatchPayload": null, - "Driver": "docker", - "Env": null, - "KillSignal": "", - "KillTimeout": 5000000000, - "Kind": "", - "Leader": false, - "Lifecycle": null, - "LogConfig": { - "MaxFileSizeMB": 10, - "MaxFiles": 10 - }, - "Meta": null, - "Name": "redis", - "Resources": { - "CPU": 500, - "Devices": null, - "DiskMB": 0, - "IOPS": 0, - "MemoryMB": 256, - "Networks": [ - { - "CIDR": "", - "DNS": null, - "Device": "", - "DynamicPorts": [ - { - "HostNetwork": "default", - "Label": "db", - "To": 0, - "Value": 0 - } - ], - "IP": "", - "MBits": 10, - "Mode": "", - "ReservedPorts": null - } - ] - }, - "RestartPolicy": { - "Attempts": 3, - "Delay": 15000000000, - "Interval": 1800000000000, - "Mode": "fail" - }, - "Services": [ - { - "AddressMode": "auto", - "CanaryMeta": null, - "CanaryTags": null, - "Checks": [ - { - "AddressMode": "", - "Args": null, - "CheckRestart": null, - "Command": "", - "Expose": false, - "FailuresBeforeCritical": 0, - "GRPCService": "", - "GRPCUseTLS": false, - "Header": null, - "InitialStatus": "", - "Interval": 10000000000, - "Method": "", - "Name": "alive", - "Path": "", - "PortLabel": "", - "Protocol": "", - "SuccessBeforePassing": 0, - "TLSSkipVerify": false, - "TaskName": "", - "Timeout": 2000000000, - "Type": "tcp" - } - ], - "Connect": null, - "EnableTagOverride": false, - "Meta": null, - "Name": "redis-cache", - "PortLabel": "db", - "Tags": [ - "global", - "cache" - ], - "TaskName": "" - } - ], - "ShutdownDelay": 0, - "Templates": null, - "User": "", - "Vault": null, - "VolumeMounts": null - } - ], - "Update": { - "AutoPromote": false, - "AutoRevert": false, - "Canary": 0, - "HealthCheck": "checks", - "HealthyDeadline": 180000000000, - "MaxParallel": 1, - "MinHealthyTime": 10000000000, - "ProgressDeadline": 600000000000, - "Stagger": 30000000000 - }, - "Volumes": null - } - ], - "Type": "service", - "Update": { - "AutoPromote": false, - "AutoRevert": false, - "Canary": 0, - "HealthCheck": "", - "HealthyDeadline": 0, - "MaxParallel": 1, - "MinHealthyTime": 0, - "ProgressDeadline": 0, - "Stagger": 30000000000 - }, - "VaultNamespace": "", - "VaultToken": "", - "Version": 0 - } - ] - -''' - - -import os -import json - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -import_nomad = None -try: - import nomad - import_nomad = True -except ImportError: - import_nomad = False - - -def run(): - module = AnsibleModule( - argument_spec=dict( - host=dict(required=True, type='str'), - use_ssl=dict(type='bool', default=True), - timeout=dict(type='int', default=5), - validate_certs=dict(type='bool', default=True), - client_cert=dict(type='path'), - client_key=dict(type='path'), - namespace=dict(type='str'), - name=dict(type='str'), - token=dict(type='str', no_log=True) - ), - supports_check_mode=True - ) - - if not import_nomad: - module.fail_json(msg=missing_required_lib("python-nomad")) - - certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key')) - - nomad_client = nomad.Nomad( - host=module.params.get('host'), - secure=module.params.get('use_ssl'), - timeout=module.params.get('timeout'), - verify=module.params.get('validate_certs'), - cert=certificate_ssl, - namespace=module.params.get('namespace'), - token=module.params.get('token') - ) - - changed = False - result = list() - try: - job_list = nomad_client.jobs.get_jobs() - for job in job_list: - result.append(nomad_client.job.get_job(job.get('ID'))) - except Exception as e: - module.fail_json(msg=to_native(e)) - - if module.params.get('name'): - filter = list() - try: - for job in result: - if job.get('ID') == module.params.get('name'): - filter.append(job) - result = filter - if not filter: - module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name'))) - except Exception as e: - module.fail_json(msg=to_native(e)) - - module.exit_json(changed=changed, result=result) - - -def main(): - - run() - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/clustering/pacemaker_cluster.py b/plugins/modules/clustering/pacemaker_cluster.py deleted file mode 100644 index 4ec6010f53..0000000000 --- a/plugins/modules/clustering/pacemaker_cluster.py +++ /dev/null @@ -1,222 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Mathieu Bultel -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: pacemaker_cluster -short_description: Manage pacemaker clusters -author: -- Mathieu Bultel (@matbu) -description: - - This module can manage a pacemaker cluster and nodes from Ansible using - the pacemaker cli. -options: - state: - description: - - Indicate desired state of the cluster - choices: [ cleanup, offline, online, restart ] - type: str - node: - description: - - Specify which node of the cluster you want to manage. None == the - cluster status itself, 'all' == check the status of all nodes. - type: str - timeout: - description: - - Timeout when the module should considered that the action has failed - default: 300 - type: int - force: - description: - - Force the change of the cluster state - type: bool - default: 'yes' -''' -EXAMPLES = ''' ---- -- name: Set cluster Online - hosts: localhost - gather_facts: no - tasks: - - name: Get cluster state - community.general.pacemaker_cluster: - state: online -''' - -RETURN = ''' -changed: - description: True if the cluster state has changed - type: bool - returned: always -out: - description: The output of the current state of the cluster. It return a - list of the nodes state. - type: str - sample: 'out: [[" overcloud-controller-0", " Online"]]}' - returned: always -rc: - description: exit code of the module - type: bool - returned: always -''' - -import time - -from ansible.module_utils.basic import AnsibleModule - - -_PCS_CLUSTER_DOWN = "Error: cluster is not currently running on this node" - - -def get_cluster_status(module): - cmd = "pcs cluster status" - rc, out, err = module.run_command(cmd) - if out in _PCS_CLUSTER_DOWN: - return 'offline' - else: - return 'online' - - -def get_node_status(module, node='all'): - if node == 'all': - cmd = "pcs cluster pcsd-status %s" % node - else: - cmd = "pcs cluster pcsd-status" - rc, out, err = module.run_command(cmd) - if rc == 1: - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) - status = [] - for o in out.splitlines(): - status.append(o.split(':')) - return status - - -def clean_cluster(module, timeout): - cmd = "pcs resource cleanup" - rc, out, err = module.run_command(cmd) - if rc == 1: - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) - - -def set_cluster(module, state, timeout, force): - if state == 'online': - cmd = "pcs cluster start" - if state == 'offline': - cmd = "pcs cluster stop" - if force: - cmd = "%s --force" % cmd - rc, out, err = module.run_command(cmd) - if rc == 1: - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) - - t = time.time() - ready = False - while time.time() < t + timeout: - cluster_state = get_cluster_status(module) - if cluster_state == state: - ready = True - break - if not ready: - module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state)) - - -def set_node(module, state, timeout, force, node='all'): - # map states - if state == 'online': - cmd = "pcs cluster start" - if state == 'offline': - cmd = "pcs cluster stop" - if force: - cmd = "%s --force" % cmd - - nodes_state = get_node_status(module, node) - for node in nodes_state: - if node[1].strip().lower() != state: - cmd = "%s %s" % (cmd, node[0].strip()) - rc, out, err = module.run_command(cmd) - if rc == 1: - module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err)) - - t = time.time() - ready = False - while time.time() < t + timeout: - nodes_state = get_node_status(module) - for node in nodes_state: - if node[1].strip().lower() == state: - ready = True - break - if not ready: - module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state)) - - -def main(): - argument_spec = dict( - state=dict(type='str', choices=['online', 'offline', 'restart', 'cleanup']), - node=dict(type='str'), - timeout=dict(type='int', default=300), - force=dict(type='bool', default=True), - ) - - module = AnsibleModule( - argument_spec, - supports_check_mode=True, - ) - changed = False - state = module.params['state'] - node = module.params['node'] - force = module.params['force'] - timeout = module.params['timeout'] - - if state in ['online', 'offline']: - # Get cluster status - if node is None: - cluster_state = get_cluster_status(module) - if cluster_state == state: - module.exit_json(changed=changed, out=cluster_state) - else: - set_cluster(module, state, timeout, force) - cluster_state = get_cluster_status(module) - if cluster_state == state: - module.exit_json(changed=True, out=cluster_state) - else: - module.fail_json(msg="Fail to bring the cluster %s" % state) - else: - cluster_state = get_node_status(module, node) - # Check cluster state - for node_state in cluster_state: - if node_state[1].strip().lower() == state: - module.exit_json(changed=changed, out=cluster_state) - else: - # Set cluster status if needed - set_cluster(module, state, timeout, force) - cluster_state = get_node_status(module, node) - module.exit_json(changed=True, out=cluster_state) - - if state in ['restart']: - set_cluster(module, 'offline', timeout, force) - cluster_state = get_cluster_status(module) - if cluster_state == 'offline': - set_cluster(module, 'online', timeout, force) - cluster_state = get_cluster_status(module) - if cluster_state == 'online': - module.exit_json(changed=True, out=cluster_state) - else: - module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be started") - else: - module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be stopped") - - if state in ['cleanup']: - clean_cluster(module, timeout) - cluster_state = get_cluster_status(module) - module.exit_json(changed=True, - out=cluster_state) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/cobbler/cobbler_sync.py b/plugins/modules/cobbler_sync.py similarity index 67% rename from plugins/modules/remote_management/cobbler/cobbler_sync.py rename to plugins/modules/cobbler_sync.py index 157208216b..158f6ee3d6 100644 --- a/plugins/modules/remote_management/cobbler/cobbler_sync.py +++ b/plugins/modules/cobbler_sync.py @@ -1,79 +1,85 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Dag Wieers (dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Dag Wieers (dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: cobbler_sync short_description: Sync Cobbler description: -- Sync Cobbler to commit changes. + - Sync Cobbler to commit changes. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: host: description: - - The name or IP address of the Cobbler system. + - The name or IP address of the Cobbler system. default: 127.0.0.1 type: str port: description: - - Port number to be used for REST connection. - - The default value depends on parameter C(use_ssl). + - Port number to be used for REST connection. + - The default value depends on parameter O(use_ssl). type: int username: description: - - The username to log in to Cobbler. + - The username to log in to Cobbler. default: cobbler type: str password: description: - - The password to log in to Cobbler. + - The password to log in to Cobbler. type: str use_ssl: description: - - If C(no), an HTTP connection will be used instead of the default HTTPS connection. + - If V(false), an HTTP connection is used instead of the default HTTPS connection. type: bool - default: 'yes' + default: true validate_certs: description: - - If C(no), SSL certificates will not be validated. - - This should only set to C(no) when used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. + - This should only set to V(false) when used on personally controlled sites using self-signed certificates. type: bool - default: 'yes' + default: true author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) todo: notes: -- Concurrently syncing Cobbler is bound to fail with weird errors. -- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation. - More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753). -''' + - Concurrently syncing Cobbler is bound to fail with weird errors. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Commit Cobbler changes community.general.cobbler_sync: host: cobbler01 username: cobbler password: MySuperSecureP4sswOrd - run_once: yes + run_once: true delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" # Default return values -''' +""" -import datetime import ssl +import xmlrpc.client as xmlrpc_client from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client from ansible.module_utils.common.text.converters import to_text +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + def main(): module = AnsibleModule( @@ -102,7 +108,7 @@ def main(): changed=True, ) - start = datetime.datetime.utcnow() + start = now() ssl_context = None if not validate_certs: @@ -134,7 +140,7 @@ def main(): except Exception as e: module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e))) - elapsed = datetime.datetime.utcnow() - start + elapsed = now() - start module.exit_json(elapsed=elapsed.seconds, **result) diff --git a/plugins/modules/remote_management/cobbler/cobbler_system.py b/plugins/modules/cobbler_system.py similarity index 77% rename from plugins/modules/remote_management/cobbler/cobbler_system.py rename to plugins/modules/cobbler_system.py index e97be01239..80a45854c9 100644 --- a/plugins/modules/remote_management/cobbler/cobbler_system.py +++ b/plugins/modules/cobbler_system.py @@ -1,82 +1,85 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Dag Wieers (dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Dag Wieers (dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: cobbler_system short_description: Manage system objects in Cobbler description: -- Add, modify or remove systems in Cobbler + - Add, modify or remove systems in Cobbler. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full options: host: description: - - The name or IP address of the Cobbler system. + - The name or IP address of the Cobbler system. default: 127.0.0.1 type: str port: description: - - Port number to be used for REST connection. - - The default value depends on parameter C(use_ssl). + - Port number to be used for REST connection. + - The default value depends on parameter O(use_ssl). type: int username: description: - - The username to log in to Cobbler. + - The username to log in to Cobbler. default: cobbler type: str password: description: - - The password to log in to Cobbler. + - The password to log in to Cobbler. type: str use_ssl: description: - - If C(no), an HTTP connection will be used instead of the default HTTPS connection. + - If V(false), an HTTP connection is used instead of the default HTTPS connection. type: bool - default: 'yes' + default: true validate_certs: description: - - If C(no), SSL certificates will not be validated. - - This should only set to C(no) when used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. + - This should only set to V(false) when used on personally controlled sites using self-signed certificates. type: bool - default: 'yes' + default: true name: description: - - The system name to manage. + - The system name to manage. type: str properties: description: - - A dictionary with system properties. + - A dictionary with system properties. type: dict interfaces: description: - - A list of dictionaries containing interface options. + - A list of dictionaries containing interface options. type: dict sync: description: - - Sync on changes. - - Concurrently syncing Cobbler is bound to fail. + - Sync on changes. + - Concurrently syncing Cobbler is bound to fail. type: bool - default: no + default: false state: description: - - Whether the system should be present, absent or a query is made. - choices: [ absent, present, query ] + - Whether the system should be present, absent or a query is made. + choices: [absent, present, query] default: present type: str author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) notes: -- Concurrently syncing Cobbler is bound to fail with weird errors. -- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation. - More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753). -''' + - Concurrently syncing Cobbler is bound to fail with weird errors. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure the system exists in Cobbler community.general.cobbler_system: host: cobbler01 @@ -85,7 +88,7 @@ EXAMPLES = r''' name: myhost properties: profile: CentOS6-x86_64 - name_servers: [ 2.3.4.5, 3.4.5.6 ] + name_servers: [2.3.4.5, 3.4.5.6] name_servers_search: foo.com, bar.com interfaces: eth0: @@ -100,7 +103,7 @@ EXAMPLES = r''' password: ins3965! name: bdsol-aci51-apic1.cisco.com properties: - netboot_enabled: yes + netboot_enabled: true state: present delegate_to: localhost @@ -131,28 +134,30 @@ EXAMPLES = r''' name: myhost state: absent delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" systems: - description: List of systems - returned: C(state=query) and C(name) is not provided + description: List of systems. + returned: O(state=query) and O(name) is not provided type: list system: - description: (Resulting) information about the system we are working with - returned: when C(name) is provided + description: (Resulting) information about the system we are working with. + returned: when O(name) is provided type: dict -''' +""" -import copy -import datetime import ssl +import xmlrpc.client as xmlrpc_client from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import iteritems -from ansible.module_utils.six.moves import xmlrpc_client from ansible.module_utils.common.text.converters import to_text +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + IFPROPS_MAPPING = dict( bondingopts='bonding_opts', bridgeopts='bridge_opts', @@ -225,7 +230,7 @@ def main(): changed=False, ) - start = datetime.datetime.utcnow() + start = now() ssl_context = None if not validate_certs: @@ -269,9 +274,13 @@ def main(): if system: # Update existing entry - system_id = conn.get_system_handle(name, token) + system_id = '' + if LooseVersion(str(conn.version())) >= LooseVersion('3.4'): + system_id = conn.get_system_handle(name) + else: + system_id = conn.get_system_handle(name, token) - for key, value in iteritems(module.params['properties']): + for key, value in module.params['properties'].items(): if key not in system: module.warn("Property '{0}' is not a valid system property.".format(key)) if system[key] != value: @@ -288,7 +297,7 @@ def main(): result['changed'] = True if module.params['properties']: - for key, value in iteritems(module.params['properties']): + for key, value in module.params['properties'].items(): try: conn.modify_system(system_id, key, value, token) except Exception as e: @@ -297,8 +306,8 @@ def main(): # Add interface properties interface_properties = dict() if module.params['interfaces']: - for device, values in iteritems(module.params['interfaces']): - for key, value in iteritems(values): + for device, values in module.params['interfaces'].items(): + for key, value in values.items(): if key == 'name': continue if key not in IFPROPS_MAPPING: @@ -333,7 +342,7 @@ def main(): if module._diff: result['diff'] = dict(before=system, after=result['system']) - elapsed = datetime.datetime.utcnow() - start + elapsed = now() - start module.exit_json(elapsed=elapsed.seconds, **result) diff --git a/plugins/modules/packaging/language/composer.py b/plugins/modules/composer.py similarity index 52% rename from plugins/modules/packaging/language/composer.py rename to plugins/modules/composer.py index 351a104658..8301e3174f 100644 --- a/plugins/modules/packaging/language/composer.py +++ b/plugins/modules/composer.py @@ -1,114 +1,120 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2014, Dimitrios Tydeas Mengidis -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Dimitrios Tydeas Mengidis +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: composer author: - - "Dimitrios Tydeas Mengidis (@dmtrs)" - - "René Moser (@resmo)" + - "Dimitrios Tydeas Mengidis (@dmtrs)" + - "René Moser (@resmo)" short_description: Dependency Manager for PHP description: - - > - Composer is a tool for dependency management in PHP. It allows you to - declare the dependent libraries your project needs and it will install - them in your project for you. + - Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs + and it installs them in your project for you. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - command: - type: str - description: - - Composer command like "install", "update" and so on. - default: install - arguments: - type: str - description: - - Composer arguments like required package, version and so on. - executable: - type: path - description: - - Path to PHP Executable on the remote host, if PHP is not in PATH. - aliases: [ php_path ] - working_dir: - type: path - description: - - Directory of your project (see --working-dir). This is required when - the command is not run globally. - - Will be ignored if C(global_command=true). - global_command: - description: - - Runs the specified command globally. - type: bool - default: false - prefer_source: - description: - - Forces installation from package sources when possible (see --prefer-source). - default: false - type: bool - prefer_dist: - description: - - Forces installation from package dist even for dev versions (see --prefer-dist). - default: false - type: bool - no_dev: - description: - - Disables installation of require-dev packages (see --no-dev). - default: true - type: bool - no_scripts: - description: - - Skips the execution of all scripts defined in composer.json (see --no-scripts). - default: false - type: bool - no_plugins: - description: - - Disables all plugins (see --no-plugins). - default: false - type: bool - optimize_autoloader: - description: - - Optimize autoloader during autoloader dump (see --optimize-autoloader). - - Convert PSR-0/4 autoloading to classmap to get a faster autoloader. - - Recommended especially for production, but can take a bit of time to run. - default: true - type: bool - classmap_authoritative: - description: - - Autoload classes from classmap only. - - Implicitely enable optimize_autoloader. - - Recommended especially for production, but can take a bit of time to run. - default: false - type: bool - apcu_autoloader: - description: - - Uses APCu to cache found/not-found classes - default: false - type: bool - ignore_platform_reqs: - description: - - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these. - default: false - type: bool - composer_executable: - type: path - description: - - Path to composer executable on the remote host, if composer is not in C(PATH) or a custom composer is needed. - version_added: 3.2.0 + command: + type: str + description: + - Composer command like V(install), V(update) and so on. + default: install + arguments: + type: str + description: + - Composer arguments like required package, version and so on. + default: '' + executable: + type: path + description: + - Path to PHP executable on the remote host, if PHP is not in E(PATH). + aliases: [php_path] + working_dir: + type: path + description: + - Directory of your project (see C(--working-dir)). This is required when the command is not run globally. + - This is ignored if O(global_command=true). + global_command: + description: + - Runs the specified command globally. + type: bool + default: false + prefer_source: + description: + - Forces installation from package sources when possible (see C(--prefer-source)). + default: false + type: bool + prefer_dist: + description: + - Forces installation from package dist even for dev versions (see C(--prefer-dist)). + default: false + type: bool + no_dev: + description: + - Disables installation of require-dev packages (see C(--no-dev)). + default: true + type: bool + no_scripts: + description: + - Skips the execution of all scripts defined in composer.json (see C(--no-scripts)). + default: false + type: bool + no_plugins: + description: + - Disables all plugins (see C(--no-plugins)). + default: false + type: bool + optimize_autoloader: + description: + - Optimize autoloader during autoloader dump (see C(--optimize-autoloader)). + - Convert PSR-0/4 autoloading to classmap to get a faster autoloader. + - Recommended especially for production, but can take a bit of time to run. + default: true + type: bool + classmap_authoritative: + description: + - Autoload classes from classmap only. + - Implicitly enable optimize_autoloader. + - Recommended especially for production, but can take a bit of time to run. + default: false + type: bool + apcu_autoloader: + description: + - Uses APCu to cache found/not-found classes. + default: false + type: bool + ignore_platform_reqs: + description: + - Ignore C(php), C(hhvm), C(lib-*) and C(ext-*) requirements and force the installation even if the local machine does not fulfill + these. + default: false + type: bool + composer_executable: + type: path + description: + - Path to composer executable on the remote host, if composer is not in E(PATH) or a custom composer is needed. + version_added: 3.2.0 requirements: - - php - - composer installed in bin path (recommended /usr/local/bin) or specified in I(composer_executable) + - php + - composer installed in bin path (recommended C(/usr/local/bin)) or specified in O(composer_executable) notes: - - Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available. - - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues. -''' + - Default options that are always appended in each execution are C(--no-ansi), C(--no-interaction) and C(--no-progress) + if available. + - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method + to avoid issues. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Download and installs all libs and dependencies outlined in the /path/to/project/composer.lock community.general.composer: command: install @@ -125,16 +131,17 @@ EXAMPLES = ''' command: create-project arguments: package/package /path/to/project ~1.0 working_dir: /path/to/project - prefer_dist: yes + prefer_dist: true - name: Install a package globally community.general.composer: command: require - global_command: yes + global_command: true arguments: my/package -''' +""" import re +import shlex from ansible.module_utils.basic import AnsibleModule @@ -152,7 +159,7 @@ def has_changed(string): def get_available_options(module, command='install'): # get all available options from a composer command using composer help to json - rc, out, err = composer_command(module, "help %s" % command, arguments="--no-interaction --format=json") + rc, out, err = composer_command(module, ["help", command], arguments=["--no-interaction", "--format=json"]) if rc != 0: output = parse_out(err) module.fail_json(msg=output) @@ -161,9 +168,19 @@ def get_available_options(module, command='install'): return command_help_json['definition']['options'] -def composer_command(module, command, arguments="", options=None, global_command=False): +def composer_command(module, command, arguments=None, options=None): if options is None: options = [] + if arguments is None: + arguments = [] + + global_command = module.params['global_command'] + + if global_command: + global_arg = ["global"] + else: + global_arg = [] + options.extend(['--working-dir', module.params['working_dir']]) if module.params['executable'] is None: php_path = module.get_bin_path("php", True, ["/usr/local/bin"]) @@ -175,7 +192,7 @@ def composer_command(module, command, arguments="", options=None, global_command else: composer_path = module.params['composer_executable'] - cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments) + cmd = [php_path, composer_path] + global_arg + command + options + arguments return module.run_command(cmd) @@ -207,8 +224,7 @@ def main(): if re.search(r"\s", command): module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'") - arguments = module.params['arguments'] - global_command = module.params['global_command'] + arguments = shlex.split(module.params['arguments']) available_options = get_available_options(module=module, command=command) options = [] @@ -225,9 +241,6 @@ def main(): option = "--%s" % option options.append(option) - if not global_command: - options.extend(['--working-dir', "'%s'" % module.params['working_dir']]) - option_params = { 'prefer_source': 'prefer-source', 'prefer_dist': 'prefer-dist', @@ -251,7 +264,7 @@ def main(): else: module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command) - rc, out, err = composer_command(module, command, arguments, options, global_command) + rc, out, err = composer_command(module, [command], arguments, options) if rc != 0: output = parse_out(err) diff --git a/plugins/modules/clustering/consul/consul.py b/plugins/modules/consul.py similarity index 56% rename from plugins/modules/clustering/consul/consul.py rename to plugins/modules/consul.py index 9707d5431a..456335babf 100644 --- a/plugins/modules/clustering/consul/consul.py +++ b/plugins/modules/consul.py @@ -1,156 +1,160 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# (c) 2015, Steve Gargan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Steve Gargan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: consul -short_description: "Add, modify & delete services within a consul cluster." +short_description: Add, modify & delete services within a Consul cluster description: - - Registers services and checks for an agent with a consul cluster. - A service is some process running on the agent node that should be advertised by - consul's discovery mechanism. It may optionally supply a check definition, - a periodic service test to notify the consul cluster of service's health. - - "Checks may also be registered per node e.g. disk usage, or cpu usage and - notify the health of the entire node to the cluster. - Service level checks do not require a check name or id as these are derived - by Consul from the Service name and id respectively by appending 'service:' - Node level checks require a I(check_name) and optionally a I(check_id)." - - Currently, there is no complete way to retrieve the script, interval or ttl - metadata for a registered check. Without this metadata it is not possible to - tell if the data supplied with ansible represents a change to a check. As a - result this does not attempt to determine changes and will always report a - changed occurred. An API method is planned to supply this metadata so at that - stage change management will be added. - - "See U(http://consul.io) for more details." + - Registers services and checks for an agent with a Consul cluster. A service is some process running on the agent node + that should be advertised by Consul's discovery mechanism. It may optionally supply a check definition, a periodic service + test to notify the Consul cluster of service's health. + - Checks may also be registered per node, for example disk usage, or cpu usage and notify the health of the entire node + to the cluster. Service level checks do not require a check name or ID as these are derived by Consul from the Service + name and ID respectively by appending V(service:) Node level checks require a O(check_name) and optionally a O(check_id). + - Currently, there is no complete way to retrieve the script, interval or TTL metadata for a registered check. Without this + metadata it is not possible to tell if the data supplied with ansible represents a change to a check. As a result this + does not attempt to determine changes and it always reports a changed occurred. An API method is planned to supply this + metadata so at that stage change management is to be added. + - See U(http://consul.io) for more details. requirements: - - python-consul + - py-consul - requests author: "Steve Gargan (@sgargan)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - state: - type: str - description: - - register or deregister the consul service, defaults to present - default: present - choices: ['present', 'absent'] - service_name: - type: str - description: - - Unique name for the service on a node, must be unique per node, - required if registering a service. May be omitted if registering - a node level check - service_id: - type: str - description: - - the ID for the service, must be unique per node. If I(state=absent), - defaults to the service name if supplied. - host: - type: str - description: - - host of the consul agent defaults to localhost - default: localhost - port: - type: int - description: - - the port on which the consul agent is running - default: 8500 - scheme: - type: str - description: - - the protocol scheme on which the consul agent is running - default: http - validate_certs: - description: - - whether to verify the TLS certificate of the consul agent - type: bool - default: 'yes' - notes: - type: str - description: - - Notes to attach to check when registering it. - service_port: - type: int - description: - - the port on which the service is listening. Can optionally be supplied for - registration of a service, i.e. if I(service_name) or I(service_id) is set - service_address: - type: str - description: - - the address to advertise that the service will be listening on. - This value will be passed as the I(address) parameter to Consul's - C(/v1/agent/service/register) API method, so refer to the Consul API - documentation for further details. - tags: - type: list - elements: str - description: - - tags that will be attached to the service registration. - script: - type: str - description: - - the script/command that will be run periodically to check the health - of the service. Scripts require I(interval) and vice versa. - interval: - type: str - description: - - the interval at which the service check will be run. This is a number - with a s or m suffix to signify the units of seconds or minutes e.g - C(15s) or C(1m). If no suffix is supplied, m will be used by default e.g. - C(1) will be C(1m). Required if the I(script) parameter is specified. - check_id: - type: str - description: - - an ID for the service check. If I(state=absent), defaults to - I(check_name). Ignored if part of a service definition. - check_name: - type: str - description: - - a name for the service check. Required if standalone, ignored if - part of service definition. - ttl: - type: str - description: - - checks can be registered with a ttl instead of a I(script) and I(interval) - this means that the service will check in with the agent before the - ttl expires. If it doesn't the check will be considered failed. - Required if registering a check and the script an interval are missing - Similar to the interval this is a number with a s or m suffix to - signify the units of seconds or minutes e.g C(15s) or C(1m). If no suffix - is supplied, C(m) will be used by default e.g. C(1) will be C(1m) - tcp: - type: str - description: - - Checks can be registered with a TCP port. This means that consul - will check if the connection attempt to that port is successful (that is, the port is currently accepting connections). - The format is C(host:port), for example C(localhost:80). - I(interval) must also be provided with this option. - version_added: '1.3.0' - http: - type: str - description: - - checks can be registered with an HTTP endpoint. This means that consul - will check that the http endpoint returns a successful HTTP status. - I(interval) must also be provided with this option. - timeout: - type: str - description: - - A custom HTTP check timeout. The consul default is 10 seconds. - Similar to the interval this is a number with a C(s) or C(m) suffix to - signify the units of seconds or minutes, e.g. C(15s) or C(1m). - token: - type: str - description: - - the token key identifying an ACL rule set. May be required to register services. -''' + state: + type: str + description: + - Register or deregister the Consul service, defaults to present. + default: present + choices: ['present', 'absent'] + service_name: + type: str + description: + - Unique name for the service on a node, must be unique per node, required if registering a service. May be omitted + if registering a node level check. + service_id: + type: str + description: + - The ID for the service, must be unique per node. If O(state=absent), defaults to the service name if supplied. + host: + type: str + description: + - Host of the Consul agent defaults to localhost. + default: localhost + port: + type: int + description: + - The port on which the Consul agent is running. + default: 8500 + scheme: + type: str + description: + - The protocol scheme on which the Consul agent is running. + default: http + validate_certs: + description: + - Whether to verify the TLS certificate of the Consul agent. + type: bool + default: true + notes: + type: str + description: + - Notes to attach to check when registering it. + service_port: + type: int + description: + - The port on which the service is listening. Can optionally be supplied for registration of a service, that is if O(service_name) + or O(service_id) is set. + service_address: + type: str + description: + - The address to advertise that the service is listening on. This value is passed as the C(address) parameter to Consul's + C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details. + tags: + type: list + elements: str + description: + - Tags that are attached to the service registration. + script: + type: str + description: + - The script/command that is run periodically to check the health of the service. + - Requires O(interval) to be provided. + - Mutually exclusive with O(ttl), O(tcp) and O(http). + interval: + type: str + description: + - The interval at which the service check is run. This is a number with a V(s) or V(m) suffix to signify the units of + seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for example V(10) + is V(10s). + - Required if one of the parameters O(script), O(http), or O(tcp) is specified. + check_id: + type: str + description: + - An ID for the service check. If O(state=absent), defaults to O(check_name). Ignored if part of a service definition. + check_name: + type: str + description: + - Name for the service check. Required if standalone, ignored if part of service definition. + check_node: + description: + - Node name. + type: str + check_host: + description: + - Host name. + type: str + ttl: + type: str + description: + - Checks can be registered with a TTL instead of a O(script) and O(interval) this means that the service checks in with + the agent before the TTL expires. If it does not the check is considered failed. Required if registering a check and + the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix to signify + the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for + example V(10) is equivalent to V(10s). + - Mutually exclusive with O(script), O(tcp) and O(http). + tcp: + type: str + description: + - Checks can be registered with a TCP port. This means that Consul checks if the connection attempt to that port is + successful (that is, the port is currently accepting connections). The format is V(host:port), for example V(localhost:80). + - Requires O(interval) to be provided. + - Mutually exclusive with O(script), O(ttl) and O(http). + version_added: '1.3.0' + http: + type: str + description: + - Checks can be registered with an HTTP endpoint. This means that Consul checks that the http endpoint returns a successful + HTTP status. + - Requires O(interval) to be provided. + - Mutually exclusive with O(script), O(ttl) and O(tcp). + timeout: + type: str + description: + - A custom HTTP check timeout. The Consul default is 10 seconds. Similar to the interval this is a number with a V(s) + or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) + is used by default, for example V(10) is equivalent to V(10s). + token: + type: str + description: + - The token key identifying an ACL rule set. May be required to register services. +""" -EXAMPLES = ''' -- name: Register nginx service with the local consul agent +EXAMPLES = r""" +- name: Register nginx service with the local Consul agent community.general.consul: service_name: nginx service_port: 80 @@ -216,7 +220,7 @@ EXAMPLES = ''' service_id: nginx interval: 60s http: http://localhost:80/morestatus -''' +""" try: import consul @@ -240,7 +244,7 @@ from ansible.module_utils.basic import AnsibleModule def register_with_consul(module): - state = module.params.get('state') + state = module.params['state'] if state == 'present': add(module) @@ -266,10 +270,8 @@ def add(module): def remove(module): ''' removes a service or a check ''' - service_id = module.params.get('service_id') or module.params.get('service_name') - check_id = module.params.get('check_id') or module.params.get('check_name') - if not (service_id or check_id): - module.fail_json(msg='services and checks are removed by id or name. please supply a service id/name or a check id/name') + service_id = module.params['service_id'] or module.params['service_name'] + check_id = module.params['check_id'] or module.params['check_name'] if service_id: remove_service(module, service_id) else: @@ -342,63 +344,55 @@ def remove_service(module, service_id): consul_api = get_consul_api(module) service = get_service_by_id_or_name(consul_api, service_id) if service: - consul_api.agent.service.deregister(service_id, token=module.params.get('token')) + consul_api.agent.service.deregister(service_id, token=module.params['token']) module.exit_json(changed=True, id=service_id) module.exit_json(changed=False, id=service_id) def get_consul_api(module): - consulClient = consul.Consul(host=module.params.get('host'), - port=module.params.get('port'), - scheme=module.params.get('scheme'), - verify=module.params.get('validate_certs'), - token=module.params.get('token')) + consulClient = consul.Consul(host=module.params['host'], + port=module.params['port'], + scheme=module.params['scheme'], + verify=module.params['validate_certs'], + token=module.params['token']) consulClient.agent.service = PatchedConsulAgentService(consulClient) return consulClient def get_service_by_id_or_name(consul_api, service_id_or_name): ''' iterate the registered services and find one with the given id ''' - for name, service in consul_api.agent.services().items(): - if service['ID'] == service_id_or_name or service['Service'] == service_id_or_name: + for dummy, service in consul_api.agent.services().items(): + if service_id_or_name in (service['ID'], service['Service']): return ConsulService(loaded=service) def parse_check(module): - if len([p for p in (module.params.get('script'), module.params.get('ttl'), module.params.get('tcp'), module.params.get('http')) if p]) > 1: - module.fail_json( - msg='checks are either script, tcp, http or ttl driven, supplying more than one does not make sense') - - if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl') or module.params.get('tcp') or module.params.get('http'): - + if module.params['check_id'] or any(module.params[p] is not None for p in ('script', 'ttl', 'tcp', 'http')): return ConsulCheck( - module.params.get('check_id'), - module.params.get('check_name'), - module.params.get('check_node'), - module.params.get('check_host'), - module.params.get('script'), - module.params.get('interval'), - module.params.get('ttl'), - module.params.get('notes'), - module.params.get('tcp'), - module.params.get('http'), - module.params.get('timeout'), - module.params.get('service_id'), + module.params['check_id'], + module.params['check_name'], + module.params['check_node'], + module.params['check_host'], + module.params['script'], + module.params['interval'], + module.params['ttl'], + module.params['notes'], + module.params['tcp'], + module.params['http'], + module.params['timeout'], + module.params['service_id'], ) def parse_service(module): - if module.params.get('service_name'): - return ConsulService( - module.params.get('service_id'), - module.params.get('service_name'), - module.params.get('service_address'), - module.params.get('service_port'), - module.params.get('tags'), - ) - elif not module.params.get('service_name'): - module.fail_json(msg="service_name is required to configure a service.") + return ConsulService( + module.params['service_id'], + module.params['service_name'], + module.params['service_address'], + module.params['service_port'], + module.params['tags'], + ) class ConsulService(object): @@ -492,19 +486,13 @@ class ConsulCheck(object): self.check = consul.Check.ttl(self.ttl) if http: - if interval is None: - raise Exception('http check must specify interval') - self.check = consul.Check.http(http, self.interval, self.timeout) if tcp: - if interval is None: - raise Exception('tcp check must specify interval') - - regex = r"(?P.*)(?::)(?P(?:[0-9]+))$" + regex = r"(?P.*):(?P(?:[0-9]+))$" match = re.match(regex, tcp) - if match is None: + if not match: raise Exception('tcp check must be in host:port format') self.check = consul.Check.tcp(match.group('host').strip('[]'), int(match.group('port')), self.interval) @@ -512,7 +500,7 @@ class ConsulCheck(object): def validate_duration(self, name, duration): if duration: duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] - if not any((duration.endswith(suffix) for suffix in duration_units)): + if not any(duration.endswith(suffix) for suffix in duration_units): duration = "{0}s".format(duration) return duration @@ -559,7 +547,7 @@ class ConsulCheck(object): def test_dependencies(module): if not python_consul_installed: - module.fail_json(msg="python-consul required for this module. see https://python-consul.readthedocs.io/en/latest/#installation") + module.fail_json(msg="py-consul required for this module. see https://github.com/criteo/py-consul?tab=readme-ov-file#installation") def main(): @@ -586,18 +574,36 @@ def main(): http=dict(type='str'), timeout=dict(type='str'), tags=dict(type='list', elements='str'), - token=dict(no_log=True) + token=dict(no_log=True), ), + mutually_exclusive=[ + ('script', 'ttl', 'tcp', 'http'), + ], + required_if=[ + ('state', 'present', ['service_name']), + ('state', 'absent', ['service_id', 'service_name', 'check_id', 'check_name'], True), + ], + required_by={ + 'script': 'interval', + 'http': 'interval', + 'tcp': 'interval', + }, supports_check_mode=False, ) + p = module.params test_dependencies(module) + if p['state'] == 'absent' and any(p[x] for x in ['script', 'ttl', 'tcp', 'http', 'interval']): + module.fail_json( + msg="The use of parameters 'script', 'ttl', 'tcp', 'http', 'interval' along with 'state=absent' is no longer allowed." + ) try: register_with_consul(module) + except SystemExit: + raise except ConnectionError as e: - module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( - module.params.get('host'), module.params.get('port'), str(e))) + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (p['host'], p['port'], str(e))) except Exception as e: module.fail_json(msg=str(e)) diff --git a/plugins/modules/consul_acl_bootstrap.py b/plugins/modules/consul_acl_bootstrap.py new file mode 100644 index 0000000000..d7d474e9c6 --- /dev/null +++ b/plugins/modules/consul_acl_bootstrap.py @@ -0,0 +1,104 @@ +#!/usr/bin/python +# +# Copyright (c) 2024, Florian Apolloner (@apollo13) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_acl_bootstrap +short_description: Bootstrap ACLs in Consul +version_added: 8.3.0 +description: + - Allows bootstrapping of ACLs in a Consul cluster, see U(https://developer.hashicorp.com/consul/api-docs/acl#bootstrap-acls) + for details. +author: + - Florian Apolloner (@apollo13) +extends_documentation_fragment: + - community.general.consul + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Whether the token should be present or absent. + choices: ['present', 'bootstrapped'] + default: present + type: str + bootstrap_secret: + description: + - The secret to be used as secret ID for the initial token. + - Needs to be an UUID. + type: str +""" + +EXAMPLES = r""" +- name: Bootstrap the ACL system + community.general.consul_acl_bootstrap: + bootstrap_secret: 22eaeed1-bdbd-4651-724e-42ae6c43e387 +""" + +RETURN = r""" +result: + description: + - The bootstrap result as returned by the Consul HTTP API. + - B(Note:) If O(bootstrap_secret) has been specified the C(SecretID) and C(ID) do not contain the secret but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER). + If you pass O(bootstrap_secret), make sure your playbook/role does not depend on this return value! + returned: changed + type: dict + sample: + AccessorID: 834a5881-10a9-a45b-f63c-490e28743557 + CreateIndex: 25 + CreateTime: '2024-01-21T20:26:27.114612038+01:00' + Description: Bootstrap Token (Global Management) + Hash: X2AgaFhnQGRhSSF/h0m6qpX1wj/HJWbyXcxkEM/5GrY= + ID: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + Local: false + ModifyIndex: 25 + Policies: + - ID: 00000000-0000-0000-0000-000000000001 + Name: global-management + SecretID: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + RequestError, + _ConsulModule, +) + +_ARGUMENT_SPEC = { + "state": dict(type="str", choices=["present", "bootstrapped"], default="present"), + "bootstrap_secret": dict(type="str", no_log=True), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) +_ARGUMENT_SPEC.pop("token") + + +def main(): + module = AnsibleModule(_ARGUMENT_SPEC) + consul_module = _ConsulModule(module) + + data = {} + if "bootstrap_secret" in module.params: + data["BootstrapSecret"] = module.params["bootstrap_secret"] + + try: + response = consul_module.put("acl/bootstrap", data=data) + except RequestError as e: + if e.status == 403 and b"ACL bootstrap no longer allowed" in e.response_data: + return module.exit_json(changed=False) + raise + else: + return module.exit_json(changed=True, result=response) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_agent_check.py b/plugins/modules/consul_agent_check.py new file mode 100644 index 0000000000..e241c8ddf4 --- /dev/null +++ b/plugins/modules/consul_agent_check.py @@ -0,0 +1,244 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Michael Ilg +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_agent_check +short_description: Add, modify, and delete checks within a Consul cluster +version_added: 9.1.0 +description: + - Allows the addition, modification and deletion of checks in a Consul cluster using the agent. For more details on using + and configuring Checks, see U(https://developer.hashicorp.com/consul/api-docs/agent/check). + - Currently, there is no complete way to retrieve the script, interval or TTL metadata for a registered check. Without this + metadata it is not possible to tell if the data supplied with ansible represents a change to a check. As a result, the + module does not attempt to determine changes and it always reports a changed occurred. An API method is planned to supply + this metadata so at that stage change management is to be added. +author: + - Michael Ilg (@Ilgmi) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + details: + - The result is the object as it is defined in the module options and not the object structure of the Consul API. For + a better overview of what the object structure looks like, take a look at U(https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks). + diff_mode: + support: partial + details: + - In check mode the diff shows the object as it is defined in the module options and not the object structure of the + Consul API. +options: + state: + description: + - Whether the check should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Required name for the service check. + type: str + id: + description: + - Specifies a unique ID for this check on the node. This defaults to the O(name) parameter, but it may be necessary + to provide an ID for uniqueness. This value is returned in the response as V(CheckId). + type: str + interval: + description: + - The interval at which the service check is run. This is a number with a V(s) or V(m) suffix to signify the units of + seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for example V(10) + is equivalent to V(10s). + - Required if one of the parameters O(args), O(http), or O(tcp) is specified. + type: str + notes: + description: + - Notes to attach to check when registering it. + type: str + args: + description: + - Specifies command arguments to run to update the status of the check. + - Requires O(interval) to be provided. + - Mutually exclusive with O(ttl), O(tcp) and O(http). + type: list + elements: str + ttl: + description: + - Checks can be registered with a TTL instead of a O(args) and O(interval) this means that the service checks in with + the agent before the TTL expires. If it does not the check is considered failed. Required if registering a check and + the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix to signify + the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for + example V(10) is equivalent to V(10s). + - Mutually exclusive with O(args), O(tcp) and O(http). + type: str + tcp: + description: + - Checks can be registered with a TCP port. This means that Consul will check if the connection attempt to that port + is successful (that is, the port is currently accepting connections). The format is V(host:port), for example V(localhost:80). + - Requires O(interval) to be provided. + - Mutually exclusive with O(args), O(ttl) and O(http). + type: str + version_added: '1.3.0' + http: + description: + - Checks can be registered with an HTTP endpoint. This means that Consul checks that the HTTP endpoint returns a successful + HTTP status. + - Requires O(interval) to be provided. + - Mutually exclusive with O(args), O(ttl) and O(tcp). + type: str + timeout: + description: + - A custom HTTP check timeout. The Consul default is 10 seconds. Similar to the interval this is a number with a V(s) + or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) + is used by default, for example V(10) is equivalent to V(10s). + type: str + service_id: + description: + - The ID for the service, must be unique per node. If O(state=absent), defaults to the service name if supplied. + type: str +""" + +EXAMPLES = r""" +- name: Register tcp check for service 'nginx' + community.general.consul_agent_check: + name: nginx_tcp_check + service_id: nginx + interval: 60s + tcp: localhost:80 + notes: "Nginx Check" + +- name: Register http check for service 'nginx' + community.general.consul_agent_check: + name: nginx_http_check + service_id: nginx + interval: 60s + http: http://localhost:80/status + notes: "Nginx Check" + +- name: Remove check for service 'nginx' + community.general.consul_agent_check: + state: absent + id: nginx_http_check + service_id: "{{ nginx_service.ID }}" +""" + +RETURN = r""" +check: + description: The check as returned by the Consul HTTP API. + returned: always + type: dict + sample: + CheckID: nginx_check + ServiceID: nginx + Interval: 30s + Type: http + Notes: Nginx Check +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_CREATE, + OPERATION_UPDATE, + OPERATION_DELETE, + OPERATION_READ, + _ConsulModule, + validate_check, +) + +_ARGUMENT_SPEC = { + "state": dict(default="present", choices=["present", "absent"]), + "name": dict(type='str'), + "id": dict(type='str'), + "interval": dict(type='str'), + "notes": dict(type='str'), + "args": dict(type='list', elements='str'), + "http": dict(type='str'), + "tcp": dict(type='str'), + "ttl": dict(type='str'), + "timeout": dict(type='str'), + "service_id": dict(type='str'), +} + +_MUTUALLY_EXCLUSIVE = [ + ('args', 'ttl', 'tcp', 'http'), +] + +_REQUIRED_IF = [ + ('state', 'present', ['name']), + ('state', 'absent', ('id', 'name'), True), +] + +_REQUIRED_BY = { + 'args': 'interval', + 'http': 'interval', + 'tcp': 'interval', +} + +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +class ConsulAgentCheckModule(_ConsulModule): + api_endpoint = "agent/check" + result_key = "check" + unique_identifiers = ["id", "name"] + operational_attributes = {"Node", "CheckID", "Output", "ServiceName", "ServiceTags", + "Status", "Type", "ExposedPort", "Definition"} + + def endpoint_url(self, operation, identifier=None): + if operation == OPERATION_READ: + return "agent/checks" + if operation in [OPERATION_CREATE, OPERATION_UPDATE]: + return "/".join([self.api_endpoint, "register"]) + if operation == OPERATION_DELETE: + return "/".join([self.api_endpoint, "deregister", identifier]) + + return super(ConsulAgentCheckModule, self).endpoint_url(operation, identifier) + + def read_object(self): + url = self.endpoint_url(OPERATION_READ) + checks = self.get(url) + identifier = self.id_from_obj(self.params) + if identifier in checks: + return checks[identifier] + return None + + def prepare_object(self, existing, obj): + existing = super(ConsulAgentCheckModule, self).prepare_object(existing, obj) + validate_check(existing) + return existing + + def delete_object(self, obj): + if not self._module.check_mode: + self.put(self.endpoint_url(OPERATION_DELETE, obj.get("CheckID"))) + return {} + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + mutually_exclusive=_MUTUALLY_EXCLUSIVE, + required_if=_REQUIRED_IF, + required_by=_REQUIRED_BY, + supports_check_mode=True, + ) + + consul_module = ConsulAgentCheckModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_agent_service.py b/plugins/modules/consul_agent_service.py new file mode 100644 index 0000000000..7d7c94c05a --- /dev/null +++ b/plugins/modules/consul_agent_service.py @@ -0,0 +1,281 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Michael Ilg +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_agent_service +short_description: Add, modify and delete services within a Consul cluster +version_added: 9.1.0 +description: + - Allows the addition, modification and deletion of services in a Consul cluster using the agent. + - There are currently no plans to create services and checks in one. This is because the Consul API does not provide checks + for a service and the checks themselves do not match the module parameters. Therefore, only a service without checks can + be created in this module. +author: + - Michael Ilg (@Ilgmi) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff misses operational attributes. +options: + state: + description: + - Whether the service should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Unique name for the service on a node, must be unique per node, required if registering a service. + type: str + id: + description: + - Specifies a unique ID for this service. This must be unique per agent. This defaults to the O(name) parameter if not + provided. If O(state=absent), defaults to the service name if supplied. + type: str + tags: + description: + - Tags that are attached to the service registration. + type: list + elements: str + address: + description: + - The address to advertise that the service listens on. This value is passed as the C(address) parameter to Consul's + C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details. + type: str + meta: + description: + - Optional meta data used for filtering. For keys, the characters C(A-Z), C(a-z), C(0-9), C(_), C(-) are allowed. Not + allowed characters are replaced with underscores. + type: dict + service_port: + description: + - The port on which the service is listening. Can optionally be supplied for registration of a service, that is if O(name) + or O(id) is set. + type: int + enable_tag_override: + description: + - Specifies to disable the anti-entropy feature for this service's tags. If C(EnableTagOverride) is set to true then + external agents can update this service in the catalog and modify the tags. + type: bool + default: false + weights: + description: + - Specifies weights for the service. + type: dict + suboptions: + passing: + description: + - Weights for passing. + type: int + default: 1 + warning: + description: + - Weights for warning. + type: int + default: 1 + default: {"passing": 1, "warning": 1} +""" + +EXAMPLES = r""" +- name: Register nginx service with the local Consul agent + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + +- name: Register nginx with a tcp check + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + +- name: Register nginx with an http check + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + +- name: Register external service nginx available at 10.1.5.23 + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + address: 10.1.5.23 + +- name: Register nginx with some service tags + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + tags: + - prod + - webservers + +- name: Register nginx with some service meta + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + meta: + nginx_version: 1.25.3 + +- name: Remove nginx service + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + service_id: nginx + state: absent + +- name: Register celery worker service + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: celery-worker + tags: + - prod + - worker +""" + +RETURN = r""" +service: + description: The service as returned by the Consul HTTP API. + returned: always + type: dict + sample: + ID: nginx + Service: nginx + Address: localhost + Port: 80 + Tags: + - http + Meta: + - nginx_version: 1.23.3 + Datacenter: dc1 + Weights: + Passing: 1 + Warning: 1 + ContentHash: 61a245cd985261ac + EnableTagOverride: false +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_CREATE, + OPERATION_UPDATE, + OPERATION_DELETE, + _ConsulModule +) + +_CHECK_MUTUALLY_EXCLUSIVE = [('args', 'ttl', 'tcp', 'http')] +_CHECK_REQUIRED_BY = { + 'args': 'interval', + 'http': 'interval', + 'tcp': 'interval', +} + +_ARGUMENT_SPEC = { + "state": dict(default="present", choices=["present", "absent"]), + "name": dict(type='str'), + "id": dict(type='str'), + "tags": dict(type='list', elements='str'), + "address": dict(type='str'), + "meta": dict(type='dict'), + "service_port": dict(type='int'), + "enable_tag_override": dict(type='bool', default=False), + "weights": dict(type='dict', options=dict( + passing=dict(type='int', default=1, no_log=False), + warning=dict(type='int', default=1) + ), default={"passing": 1, "warning": 1}) +} + +_REQUIRED_IF = [ + ('state', 'present', ['name']), + ('state', 'absent', ('id', 'name'), True), +] + +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +class ConsulAgentServiceModule(_ConsulModule): + api_endpoint = "agent/service" + result_key = "service" + unique_identifiers = ["id", "name"] + operational_attributes = {"Service", "ContentHash", "Datacenter"} + + def endpoint_url(self, operation, identifier=None): + if operation in [OPERATION_CREATE, OPERATION_UPDATE]: + return "/".join([self.api_endpoint, "register"]) + if operation == OPERATION_DELETE: + return "/".join([self.api_endpoint, "deregister", identifier]) + + return super(ConsulAgentServiceModule, self).endpoint_url(operation, identifier) + + def prepare_object(self, existing, obj): + existing = super(ConsulAgentServiceModule, self).prepare_object(existing, obj) + if "ServicePort" in existing: + existing["Port"] = existing.pop("ServicePort") + + if "ID" not in existing: + existing["ID"] = existing["Name"] + + return existing + + def needs_update(self, api_obj, module_obj): + obj = {} + if "Service" in api_obj: + obj["Service"] = api_obj["Service"] + api_obj = self.prepare_object(api_obj, obj) + + if "Name" in module_obj: + module_obj["Service"] = module_obj.pop("Name") + if "ServicePort" in module_obj: + module_obj["Port"] = module_obj.pop("ServicePort") + + return super(ConsulAgentServiceModule, self).needs_update(api_obj, module_obj) + + def delete_object(self, obj): + if not self._module.check_mode: + url = self.endpoint_url(OPERATION_DELETE, self.id_from_obj(obj, camel_case=True)) + self.put(url) + return {} + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + required_if=_REQUIRED_IF, + supports_check_mode=True, + ) + + consul_module = ConsulAgentServiceModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_auth_method.py b/plugins/modules/consul_auth_method.py new file mode 100644 index 0000000000..88842662bb --- /dev/null +++ b/plugins/modules/consul_auth_method.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# +# Copyright (c) 2024, Florian Apolloner (@apollo13) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_auth_method +short_description: Manipulate Consul auth methods +version_added: 8.3.0 +description: + - Allows the addition, modification and deletion of auth methods in a Consul cluster using the agent. For more details on + using and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html). +author: + - Florian Apolloner (@apollo13) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff misses operational attributes. +options: + state: + description: + - Whether the token should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Specifies a name for the ACL auth method. + - The name can contain alphanumeric characters, dashes C(-), and underscores C(_). + type: str + required: true + type: + description: + - The type of auth method being configured. + - This field is immutable. + - Required when the auth method is created. + type: str + choices: ['kubernetes', 'jwt', 'oidc', 'aws-iam'] + description: + description: + - Free form human readable description of the auth method. + type: str + display_name: + description: + - An optional name to use instead of O(name) when displaying information about this auth method. + type: str + max_token_ttl: + description: + - This specifies the maximum life of any token created by this auth method. + - Can be specified in the form of V(60s) or V(5m) (that is, 60 seconds or 5 minutes, respectively). + type: str + token_locality: + description: + - Defines the kind of token that this auth method should produce. + type: str + choices: ['local', 'global'] + config: + description: + - The raw configuration to use for the chosen auth method. + - Contents vary depending upon the O(type) chosen. + - Required when the auth method is created. + type: dict +""" + +EXAMPLES = r""" +- name: Create an auth method + community.general.consul_auth_method: + name: test + type: jwt + config: + jwt_validation_pubkeys: + - | + -----BEGIN PUBLIC KEY----- + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu1SU1LfVLPHCozMxH2Mo + 4lgOEePzNm0tRgeLezV6ffAt0gunVTLw7onLRnrq0/IzW7yWR7QkrmBL7jTKEn5u + +qKhbwKfBstIs+bMY2Zkp18gnTxKLxoS2tFczGkPLPgizskuemMghRniWaoLcyeh + kd3qqGElvW/VDL5AaWTg0nLVkjRo9z+40RQzuVaE8AkAFmxZzow3x+VJYKdjykkJ + 0iT9wCS0DRTXu269V264Vf/3jvredZiKRkgwlL9xNAwxXFg0x/XFw005UWVRIkdg + cKWTjpBP2dPwVZ4WWC+9aGVd+Gyn1o0CLelf4rEjGoXbAAEgAqeGUxrcIlbjXfbc + mwIDAQAB + -----END PUBLIC KEY----- + token: "{{ consul_management_token }}" + +- name: Delete auth method + community.general.consul_auth_method: + name: test + state: absent + token: "{{ consul_management_token }}" +""" + +RETURN = r""" +auth_method: + description: The auth method as returned by the Consul HTTP API. + returned: always + type: dict + sample: + Config: + JWTValidationPubkeys: + - |- + -----BEGIN PUBLIC KEY----- + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu1SU1LfVLPHCozMxH2Mo + 4lgOEePzNm0tRgeLezV6ffAt0gunVTLw7onLRnrq0/IzW7yWR7QkrmBL7jTKEn5u + +qKhbwKfBstIs+bMY2Zkp18gnTxKLxoS2tFczGkPLPgizskuemMghRniWaoLcyeh + kd3qqGElvW/VDL5AaWTg0nLVkjRo9z+40RQzuVaE8AkAFmxZzow3x+VJYKdjykkJ + 0iT9wCS0DRTXu269V264Vf/3jvredZiKRkgwlL9xNAwxXFg0x/XFw005UWVRIkdg + cKWTjpBP2dPwVZ4WWC+9aGVd+Gyn1o0CLelf4rEjGoXbAAEgAqeGUxrcIlbjXfbc + mwIDAQAB + -----END PUBLIC KEY----- + CreateIndex: 416 + ModifyIndex: 487 + Name: test + Type: jwt +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + _ConsulModule, + camel_case_key, +) + + +def normalize_ttl(ttl): + matches = re.findall(r"(\d+)(:h|m|s)", ttl) + ttl = 0 + for value, unit in matches: + value = int(value) + if unit == "m": + value *= 60 + elif unit == "h": + value *= 60 * 60 + ttl += value + + new_ttl = "" + hours, remainder = divmod(ttl, 3600) + if hours: + new_ttl += "{0}h".format(hours) + minutes, seconds = divmod(remainder, 60) + if minutes: + new_ttl += "{0}m".format(minutes) + if seconds: + new_ttl += "{0}s".format(seconds) + return new_ttl + + +class ConsulAuthMethodModule(_ConsulModule): + api_endpoint = "acl/auth-method" + result_key = "auth_method" + unique_identifiers = ["name"] + + def map_param(self, k, v, is_update): + if k == "config" and v: + v = {camel_case_key(k2): v2 for k2, v2 in v.items()} + return super(ConsulAuthMethodModule, self).map_param(k, v, is_update) + + def needs_update(self, api_obj, module_obj): + if "MaxTokenTTL" in module_obj: + module_obj["MaxTokenTTL"] = normalize_ttl(module_obj["MaxTokenTTL"]) + return super(ConsulAuthMethodModule, self).needs_update(api_obj, module_obj) + + +_ARGUMENT_SPEC = { + "name": dict(type="str", required=True), + "type": dict(type="str", choices=["kubernetes", "jwt", "oidc", "aws-iam"]), + "description": dict(type="str"), + "display_name": dict(type="str"), + "max_token_ttl": dict(type="str", no_log=False), + "token_locality": dict(type="str", choices=["local", "global"]), + "config": dict(type="dict"), + "state": dict(default="present", choices=["present", "absent"]), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + supports_check_mode=True, + ) + consul_module = ConsulAuthMethodModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_binding_rule.py b/plugins/modules/consul_binding_rule.py new file mode 100644 index 0000000000..de1fae9357 --- /dev/null +++ b/plugins/modules/consul_binding_rule.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# +# Copyright (c) 2024, Florian Apolloner (@apollo13) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_binding_rule +short_description: Manipulate Consul binding rules +version_added: 8.3.0 +description: + - Allows the addition, modification and deletion of binding rules in a Consul cluster using the agent. For more details + on using and configuring binding rules, see U(https://developer.hashicorp.com/consul/api-docs/acl/binding-rules). +author: + - Florian Apolloner (@apollo13) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff misses operational attributes. +options: + state: + description: + - Whether the binding rule should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Specifies a name for the binding rule. + - 'Note: This is used to identify the binding rule. But since the API does not support a name, it is prefixed to the + description.' + type: str + required: true + description: + description: + - Free form human readable description of the binding rule. + type: str + auth_method: + description: + - The name of the auth method that this rule applies to. + type: str + required: true + selector: + description: + - Specifies the expression used to match this rule against valid identities returned from an auth method validation. + - If empty this binding rule matches all valid identities returned from the auth method. + type: str + bind_type: + description: + - Specifies the way the binding rule affects a token created at login. + type: str + choices: [service, node, role, templated-policy] + bind_name: + description: + - The name to bind to a token at login-time. + - What it binds to can be adjusted with different values of the O(bind_type) parameter. + type: str + bind_vars: + description: + - Specifies the templated policy variables when O(bind_type) is set to V(templated-policy). + type: dict +""" + +EXAMPLES = r""" +- name: Create a binding rule + community.general.consul_binding_rule: + name: my_name + description: example rule + auth_method: minikube + bind_type: service + bind_name: "{{ serviceaccount.name }}" + token: "{{ consul_management_token }}" + +- name: Remove a binding rule + community.general.consul_binding_rule: + name: my_name + auth_method: minikube + state: absent +""" + +RETURN = r""" +binding_rule: + description: The binding rule as returned by the Consul HTTP API. + returned: always + type: dict + sample: + Description: "my_name: example rule" + AuthMethod: minikube + Selector: serviceaccount.namespace==default + BindType: service + BindName: "{{ serviceaccount.name }}" + CreateIndex: 30 + ID: 59c8a237-e481-4239-9202-45f117950c5f + ModifyIndex: 33 +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + RequestError, + _ConsulModule, +) + + +class ConsulBindingRuleModule(_ConsulModule): + api_endpoint = "acl/binding-rule" + result_key = "binding_rule" + unique_identifiers = ["id"] + + def read_object(self): + url = "acl/binding-rules?authmethod={0}".format(self.params["auth_method"]) + try: + results = self.get(url) + for result in results: + if result.get("Description").startswith( + "{0}: ".format(self.params["name"]) + ): + return result + except RequestError as e: + if e.status == 404: + return + elif e.status == 403 and b"ACL not found" in e.response_data: + return + raise + + def module_to_obj(self, is_update): + obj = super(ConsulBindingRuleModule, self).module_to_obj(is_update) + del obj["Name"] + return obj + + def prepare_object(self, existing, obj): + final = super(ConsulBindingRuleModule, self).prepare_object(existing, obj) + name = self.params["name"] + description = final.pop("Description", "").split(": ", 1)[-1] + final["Description"] = "{0}: {1}".format(name, description) + return final + + +_ARGUMENT_SPEC = { + "name": dict(type="str", required=True), + "description": dict(type="str"), + "auth_method": dict(type="str", required=True), + "selector": dict(type="str"), + "bind_type": dict( + type="str", choices=["service", "node", "role", "templated-policy"] + ), + "bind_name": dict(type="str"), + "bind_vars": dict(type="dict"), + "state": dict(default="present", choices=["present", "absent"]), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + supports_check_mode=True, + ) + consul_module = ConsulBindingRuleModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/clustering/consul/consul_kv.py b/plugins/modules/consul_kv.py similarity index 63% rename from plugins/modules/clustering/consul/consul_kv.py rename to plugins/modules/consul_kv.py index f7b33b856e..d9354e62c5 100644 --- a/plugins/modules/clustering/consul/consul_kv.py +++ b/plugins/modules/consul_kv.py @@ -1,112 +1,112 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# (c) 2015, Steve Gargan -# (c) 2018 Genome Research Ltd. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Steve Gargan +# Copyright (c) 2018 Genome Research Ltd. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: consul_kv -short_description: Manipulate entries in the key/value store of a consul cluster +short_description: Manipulate entries in the key/value store of a Consul cluster description: - - Allows the retrieval, addition, modification and deletion of key/value entries in a - consul cluster via the agent. The entire contents of the record, including - the indices, flags and session are returned as C(value). - - If the C(key) represents a prefix then note that when a value is removed, the existing - value if any is returned as part of the results. + - Allows the retrieval, addition, modification and deletion of key/value entries in a Consul cluster using the agent. The + entire contents of the record, including the indices, flags and session are returned as C(value). + - If the O(key) represents a prefix then note that when a value is removed, the existing value if any is returned as part + of the results. - See http://www.consul.io/docs/agent/http.html#kv for more details. requirements: - - python-consul + - py-consul - requests author: - Steve Gargan (@sgargan) - Colin Nolan (@colin-nolan) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - The action to take with the supplied key and value. If the state is 'present' and `value` is set, the key - contents will be set to the value supplied and `changed` will be set to `true` only if the value was - different to the current contents. If the state is 'present' and `value` is not set, the existing value - associated to the key will be returned. The state 'absent' will remove the key/value pair, - again 'changed' will be set to true only if the key actually existed - prior to the removal. An attempt can be made to obtain or free the - lock associated with a key/value pair with the states 'acquire' or - 'release' respectively. a valid session must be supplied to make the - attempt changed will be true if the attempt is successful, false - otherwise. - type: str - choices: [ absent, acquire, present, release ] - default: present - key: - description: - - The key at which the value should be stored. - type: str - required: yes - value: - description: - - The value should be associated with the given key, required if C(state) - is C(present). - type: str - recurse: - description: - - If the key represents a prefix, each entry with the prefix can be - retrieved by setting this to C(yes). - type: bool - retrieve: - description: - - If the I(state) is C(present) and I(value) is set, perform a - read after setting the value and return this value. - default: True - type: bool - session: - description: - - The session that should be used to acquire or release a lock - associated with a key/value pair. - type: str - token: - description: - - The token key identifying an ACL rule set that controls access to - the key value pair - type: str - cas: - description: - - Used when acquiring a lock with a session. If the C(cas) is C(0), then - Consul will only put the key if it does not already exist. If the - C(cas) value is non-zero, then the key is only set if the index matches - the ModifyIndex of that key. - type: str - flags: - description: - - Opaque positive integer value that can be passed when setting a value. - type: str - host: - description: - - Host of the consul agent. - type: str - default: localhost - port: - description: - - The port on which the consul agent is running. - type: int - default: 8500 - scheme: - description: - - The protocol scheme on which the consul agent is running. - type: str - default: http - validate_certs: - description: - - Whether to verify the tls certificate of the consul agent. - type: bool - default: 'yes' -''' + state: + description: + - The action to take with the supplied key and value. If the state is V(present) and O(value) is set, the key contents + is set to the value supplied and C(changed) is set to V(true) only if the value was different to the current contents. + If the state is V(present) and O(value) is not set, the existing value associated to the key is returned. The state + V(absent) is used to remove the key/value pair, again C(changed) is set to V(true) only if the key actually existed + prior to the removal. An attempt can be made to obtain or free the lock associated with a key/value pair with the + states V(acquire) or V(release) respectively. A valid session must be supplied to make the attempt C(changed) is V(true) + if the attempt is successful, V(false) otherwise. + type: str + choices: [absent, acquire, present, release] + default: present + key: + description: + - The key at which the value should be stored. + type: str + required: true + value: + description: + - The value should be associated with the given key, required if O(state) is V(present). + type: str + recurse: + description: + - If the key represents a prefix, each entry with the prefix can be retrieved by setting this to V(true). + type: bool + retrieve: + description: + - If the O(state) is V(present) and O(value) is set, perform a read after setting the value and return this value. + default: true + type: bool + session: + description: + - The session that should be used to acquire or release a lock associated with a key/value pair. + type: str + token: + description: + - The token key identifying an ACL rule set that controls access to the key value pair. + type: str + cas: + description: + - Used when acquiring a lock with a session. If the O(cas) is V(0), then Consul only puts the key if it does not already + exist. If the O(cas) value is non-zero, then the key is only set if the index matches the ModifyIndex of that key. + type: str + flags: + description: + - Opaque positive integer value that can be passed when setting a value. + type: str + host: + description: + - Host of the Consul agent. + type: str + default: localhost + port: + description: + - The port on which the Consul agent is running. + type: int + default: 8500 + scheme: + description: + - The protocol scheme on which the Consul agent is running. + type: str + default: http + validate_certs: + description: + - Whether to verify the tls certificate of the Consul agent. + type: bool + default: true + datacenter: + description: + - The name of the datacenter to query. If unspecified, the query defaults to the datacenter of the Consul agent on O(host). + type: str + version_added: 10.0.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" # If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None` # If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None` - name: Retrieve a value from the key/value store @@ -124,7 +124,7 @@ EXAMPLES = ''' key: somekey state: absent -- name: Add a node to an arbitrary group via consul inventory (see consul.ini) +- name: Add a node to an arbitrary group using Consul inventory (see consul.ini) community.general.consul_kv: key: ansible/groups/dc1/somenode value: top_secret @@ -135,7 +135,7 @@ EXAMPLES = ''' value: 20160509 session: "{{ sessionid }}" state: acquire -''' +""" from ansible.module_utils.common.text.converters import to_text @@ -148,8 +148,8 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule -# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a -# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call, +# Note: although the py-consul implementation implies that using a key with a value of `None` with `put` has a special +# meaning (https://github.com/criteo/py-consul/blob/master/consul/api/kv.py), if not set in the subsequently API call, # the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key) NOT_SET = None @@ -278,12 +278,13 @@ def remove_value(module): data=existing) -def get_consul_api(module, token=None): +def get_consul_api(module): return consul.Consul(host=module.params.get('host'), port=module.params.get('port'), scheme=module.params.get('scheme'), verify=module.params.get('validate_certs'), - token=module.params.get('token')) + token=module.params.get('token'), + dc=module.params.get('datacenter')) def test_dependencies(module): @@ -297,6 +298,7 @@ def main(): module = AnsibleModule( argument_spec=dict( cas=dict(type='str'), + datacenter=dict(type='str'), flags=dict(type='str'), key=dict(type='str', required=True, no_log=False), host=dict(type='str', default='localhost'), diff --git a/plugins/modules/consul_policy.py b/plugins/modules/consul_policy.py new file mode 100644 index 0000000000..95d2ac48d0 --- /dev/null +++ b/plugins/modules/consul_policy.py @@ -0,0 +1,162 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Håkon Lerring +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_policy +short_description: Manipulate Consul policies +version_added: 7.2.0 +description: + - Allows the addition, modification and deletion of policies in a Consul cluster using the agent. For more details on using + and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html). +author: + - Håkon Lerring (@Hakon) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + version_added: 8.3.0 + diff_mode: + support: partial + version_added: 8.3.0 + details: + - In check mode the diff misses operational attributes. + action_group: + version_added: 8.3.0 +options: + state: + description: + - Whether the policy should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + valid_datacenters: + description: + - Valid datacenters for the policy. All if list is empty. + type: list + elements: str + name: + description: + - The name that should be associated with the policy, this is opaque to Consul. + required: true + type: str + description: + description: + - Description of the policy. + type: str + rules: + type: str + description: + - Rule document that should be associated with the current policy. +""" + +EXAMPLES = r""" +- name: Create a policy with rules + community.general.consul_policy: + host: consul1.example.com + token: some_management_acl + name: foo-access + rules: | + key "foo" { + policy = "read" + } + key "private/foo" { + policy = "deny" + } + +- name: Update the rules associated to a policy + community.general.consul_policy: + host: consul1.example.com + token: some_management_acl + name: foo-access + rules: | + key "foo" { + policy = "read" + } + key "private/foo" { + policy = "deny" + } + event "bbq" { + policy = "write" + } + +- name: Remove a policy + community.general.consul_policy: + host: consul1.example.com + token: some_management_acl + name: foo-access + state: absent +""" + +RETURN = r""" +policy: + description: The policy as returned by the Consul HTTP API. + returned: always + type: dict + sample: + CreateIndex: 632 + Description: Testing + Hash: rj5PeDHddHslkpW7Ij4OD6N4bbSXiecXFmiw2SYXg2A= + Name: foo-access + Rules: |- + key "foo" { + policy = "read" + } + key "private/foo" { + policy = "deny" + } +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_READ, + _ConsulModule, +) + +_ARGUMENT_SPEC = { + "name": dict(required=True), + "description": dict(type="str"), + "rules": dict(type="str"), + "valid_datacenters": dict(type="list", elements="str"), + "state": dict(default="present", choices=["present", "absent"]), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +class ConsulPolicyModule(_ConsulModule): + api_endpoint = "acl/policy" + result_key = "policy" + unique_identifiers = ["id"] + + def endpoint_url(self, operation, identifier=None): + if operation == OPERATION_READ: + return [self.api_endpoint, "name", self.params["name"]] + return super(ConsulPolicyModule, self).endpoint_url(operation, identifier) + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + supports_check_mode=True, + ) + consul_module = ConsulPolicyModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_role.py b/plugins/modules/consul_role.py new file mode 100644 index 0000000000..968de022a2 --- /dev/null +++ b/plugins/modules/consul_role.py @@ -0,0 +1,283 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Håkon Lerring +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_role +short_description: Manipulate Consul roles +version_added: 7.5.0 +description: + - Allows the addition, modification and deletion of roles in a Consul cluster using the agent. For more details on using + and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html). +author: + - Håkon Lerring (@Hakon) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.token + - community.general.consul.actiongroup_consul + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff misses operational attributes. + version_added: 8.3.0 + action_group: + version_added: 8.3.0 +options: + name: + description: + - A name used to identify the role. + required: true + type: str + state: + description: + - Whether the role should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + description: + description: + - Description of the role. + - If not specified, the assigned description is not changed. + type: str + policies: + type: list + elements: dict + description: + - List of policies to attach to the role. Each policy is a dict. + - If the parameter is left blank, any policies currently assigned are not changed. + - Any empty array (V([])) clears any policies previously set. + suboptions: + name: + description: + - The name of the policy to attach to this role; see M(community.general.consul_policy) for more info. + - Either this or O(policies[].id) must be specified. + type: str + id: + description: + - The ID of the policy to attach to this role; see M(community.general.consul_policy) for more info. + - Either this or O(policies[].name) must be specified. + type: str + templated_policies: + description: + - The list of templated policies that should be applied to the role. + type: list + elements: dict + version_added: 8.3.0 + suboptions: + template_name: + description: + - The templated policy name. + type: str + required: true + template_variables: + description: + - The templated policy variables. + - Not all templated policies require variables. + type: dict + service_identities: + type: list + elements: dict + description: + - List of service identities to attach to the role. + - If not specified, any service identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. + suboptions: + service_name: + description: + - The name of the node. + - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character. + - May only contain lowercase alphanumeric characters as well as V(-) and V(_). + - This suboption has been renamed from O(service_identities[].name) to O(service_identities[].service_name) in community.general + 8.3.0. The old name can still be used. + type: str + required: true + aliases: + - name + datacenters: + description: + - The datacenters where the policies are effective. + - This results in effective policy only being valid in this datacenter. + - If an empty array (V([])) is specified, the policies are valid in all datacenters. + - Including those which do not yet exist but may in the future. + type: list + elements: str + node_identities: + type: list + elements: dict + description: + - List of node identities to attach to the role. + - If not specified, any node identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. + suboptions: + node_name: + description: + - The name of the node. + - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character. + - May only contain lowercase alphanumeric characters as well as V(-) and V(_). + - This suboption has been renamed from O(node_identities[].name) to O(node_identities[].node_name) in community.general + 8.3.0. The old name can still be used. + type: str + required: true + aliases: + - name + datacenter: + description: + - The nodes datacenter. + - This results in effective policy only being valid in this datacenter. + type: str + required: true +""" + +EXAMPLES = r""" +- name: Create a role with 2 policies + community.general.consul_role: + host: consul1.example.com + token: some_management_acl + name: foo-role + policies: + - id: 783beef3-783f-f41f-7422-7087dc272765 + - name: "policy-1" + +- name: Create a role with service identity + community.general.consul_role: + host: consul1.example.com + token: some_management_acl + name: foo-role-2 + service_identities: + - name: web + datacenters: + - dc1 + +- name: Create a role with node identity + community.general.consul_role: + host: consul1.example.com + token: some_management_acl + name: foo-role-3 + node_identities: + - name: node-1 + datacenter: dc2 + +- name: Remove a role + community.general.consul_role: + host: consul1.example.com + token: some_management_acl + name: foo-role-3 + state: absent +""" + +RETURN = r""" +role: + description: The role object. + returned: success + type: dict + sample: + { + "CreateIndex": 39, + "Description": "", + "Hash": "Trt0QJtxVEfvTTIcdTUbIJRr6Dsi6E4EcwSFxx9tCYM=", + "ID": "9a300b8d-48db-b720-8544-a37c0f5dafb5", + "ModifyIndex": 39, + "Name": "foo-role", + "Policies": [ + { + "ID": "b1a00172-d7a1-0e66-a12e-7a4045c4b774", + "Name": "foo-access" + } + ] + } +operation: + description: The operation performed on the role. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_READ, + _ConsulModule, +) + + +class ConsulRoleModule(_ConsulModule): + api_endpoint = "acl/role" + result_key = "role" + unique_identifiers = ["id"] + + def endpoint_url(self, operation, identifier=None): + if operation == OPERATION_READ: + return [self.api_endpoint, "name", self.params["name"]] + return super(ConsulRoleModule, self).endpoint_url(operation, identifier) + + +NAME_ID_SPEC = dict( + name=dict(type="str"), + id=dict(type="str"), +) + +NODE_ID_SPEC = dict( + node_name=dict(type="str", required=True, aliases=["name"]), + datacenter=dict(type="str", required=True), +) + +SERVICE_ID_SPEC = dict( + service_name=dict(type="str", required=True, aliases=["name"]), + datacenters=dict(type="list", elements="str"), +) + +TEMPLATE_POLICY_SPEC = dict( + template_name=dict(type="str", required=True), + template_variables=dict(type="dict"), +) + +_ARGUMENT_SPEC = { + "name": dict(type="str", required=True), + "description": dict(type="str"), + "policies": dict( + type="list", + elements="dict", + options=NAME_ID_SPEC, + mutually_exclusive=[("name", "id")], + required_one_of=[("name", "id")], + ), + "templated_policies": dict( + type="list", + elements="dict", + options=TEMPLATE_POLICY_SPEC, + ), + "node_identities": dict( + type="list", + elements="dict", + options=NODE_ID_SPEC, + ), + "service_identities": dict( + type="list", + elements="dict", + options=SERVICE_ID_SPEC, + ), + "state": dict(default="present", choices=["present", "absent"]), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + supports_check_mode=True, + ) + consul_module = ConsulRoleModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_session.py b/plugins/modules/consul_session.py new file mode 100644 index 0000000000..acfb8e5504 --- /dev/null +++ b/plugins/modules/consul_session.py @@ -0,0 +1,297 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Steve Gargan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: consul_session +short_description: Manipulate Consul sessions +description: + - Allows the addition, modification and deletion of sessions in a Consul cluster. These sessions can then be used in conjunction + with key value pairs to implement distributed locks. In depth documentation for working with sessions can be found at + U(http://www.consul.io/docs/internals/sessions.html). +author: + - Steve Gargan (@sgargan) + - Håkon Lerring (@Hakon) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none + action_group: + version_added: 8.3.0 +options: + id: + description: + - ID of the session, required when O(state) is either V(info) or V(remove). + type: str + state: + description: + - Whether the session should be present, in other words it should be created if it does not exist, or absent, removed + if present. If created, the O(id) for the session is returned in the output. If V(absent), O(id) is required to remove + the session. Info for a single session, all the sessions for a node or all available sessions can be retrieved by + specifying V(info), V(node) or V(list) for the O(state); for V(node) or V(info), the node O(name) or session O(id) + is required as parameter. + choices: [absent, info, list, node, present] + type: str + default: present + name: + description: + - The name that should be associated with the session. Required when O(state=node) is used. + type: str + delay: + description: + - The optional lock delay that can be attached to the session when it is created. Locks for invalidated sessions ar + blocked from being acquired until this delay has expired. Durations are in seconds. + type: int + default: 15 + node: + description: + - The name of the node that with which the session is associated. By default this is the name of the agent. + type: str + datacenter: + description: + - The name of the datacenter in which the session exists or should be created. + type: str + checks: + description: + - Checks that are used to verify the session health. If all the checks fail, the session is invalidated and any locks + associated with the session are released and can be acquired once the associated lock delay has expired. + type: list + elements: str + behavior: + description: + - The optional behavior that can be attached to the session when it is created. This controls the behavior when a session + is invalidated. + choices: [delete, release] + type: str + default: release + ttl: + description: + - Specifies the duration of a session in seconds (between 10 and 86400). + type: int + version_added: 5.4.0 + token: + version_added: 5.6.0 +""" + +EXAMPLES = r""" +- name: Register basic session with Consul + community.general.consul_session: + name: session1 + +- name: Register a session with an existing check + community.general.consul_session: + name: session_with_check + checks: + - existing_check_name + +- name: Register a session with lock_delay + community.general.consul_session: + name: session_with_delay + delay: 20s + +- name: Retrieve info about session by id + community.general.consul_session: + id: session_id + state: info + +- name: Retrieve active sessions + community.general.consul_session: + state: list + +- name: Register session with a ttl + community.general.consul_session: + name: session-with-ttl + ttl: 600 # sec +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, _ConsulModule +) + + +def execute(module, consul_module): + + state = module.params.get('state') + + if state in ['info', 'list', 'node']: + lookup_sessions(module, consul_module) + elif state == 'present': + update_session(module, consul_module) + else: + remove_session(module, consul_module) + + +def list_sessions(consul_module, datacenter): + return consul_module.get( + 'session/list', + params={'dc': datacenter}) + + +def list_sessions_for_node(consul_module, node, datacenter): + return consul_module.get( + ('session', 'node', node), + params={'dc': datacenter}) + + +def get_session_info(consul_module, session_id, datacenter): + return consul_module.get( + ('session', 'info', session_id), + params={'dc': datacenter}) + + +def lookup_sessions(module, consul_module): + + datacenter = module.params.get('datacenter') + + state = module.params.get('state') + try: + if state == 'list': + sessions_list = list_sessions(consul_module, datacenter) + # Ditch the index, this can be grabbed from the results + if sessions_list and len(sessions_list) >= 2: + sessions_list = sessions_list[1] + module.exit_json(changed=True, + sessions=sessions_list) + elif state == 'node': + node = module.params.get('node') + sessions = list_sessions_for_node(consul_module, node, datacenter) + module.exit_json(changed=True, + node=node, + sessions=sessions) + elif state == 'info': + session_id = module.params.get('id') + + session_by_id = get_session_info(consul_module, session_id, datacenter) + module.exit_json(changed=True, + session_id=session_id, + sessions=session_by_id) + + except Exception as e: + module.fail_json(msg="Could not retrieve session info %s" % e) + + +def create_session(consul_module, name, behavior, ttl, node, + lock_delay, datacenter, checks): + create_data = { + "LockDelay": lock_delay, + "Node": node, + "Name": name, + "Checks": checks, + "Behavior": behavior, + } + if ttl is not None: + create_data["TTL"] = "%ss" % str(ttl) # TTL is in seconds + create_session_response_dict = consul_module.put( + 'session/create', + params={ + 'dc': datacenter}, + data=create_data) + return create_session_response_dict["ID"] + + +def update_session(module, consul_module): + + name = module.params.get('name') + delay = module.params.get('delay') + checks = module.params.get('checks') + datacenter = module.params.get('datacenter') + node = module.params.get('node') + behavior = module.params.get('behavior') + ttl = module.params.get('ttl') + + try: + session = create_session(consul_module, + name=name, + behavior=behavior, + ttl=ttl, + node=node, + lock_delay=delay, + datacenter=datacenter, + checks=checks + ) + module.exit_json(changed=True, + session_id=session, + name=name, + behavior=behavior, + ttl=ttl, + delay=delay, + checks=checks, + node=node) + except Exception as e: + module.fail_json(msg="Could not create/update session %s" % e) + + +def destroy_session(consul_module, session_id): + return consul_module.put(('session', 'destroy', session_id)) + + +def remove_session(module, consul_module): + session_id = module.params.get('id') + + try: + destroy_session(consul_module, session_id) + + module.exit_json(changed=True, + session_id=session_id) + except Exception as e: + module.fail_json(msg="Could not remove session with id '%s' %s" % ( + session_id, e)) + + +def main(): + argument_spec = dict( + checks=dict(type='list', elements='str'), + delay=dict(type='int', default='15'), + behavior=dict( + type='str', + default='release', + choices=[ + 'release', + 'delete']), + ttl=dict(type='int'), + id=dict(type='str'), + name=dict(type='str'), + node=dict(type='str'), + state=dict( + type='str', + default='present', + choices=[ + 'absent', + 'info', + 'list', + 'node', + 'present']), + datacenter=dict(type='str'), + **AUTH_ARGUMENTS_SPEC + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'node', ['name']), + ('state', 'info', ['id']), + ('state', 'remove', ['id']), + ], + supports_check_mode=False + ) + consul_module = _ConsulModule(module) + + try: + execute(module, consul_module) + except Exception as e: + module.fail_json(msg=str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/consul_token.py b/plugins/modules/consul_token.py new file mode 100644 index 0000000000..cbe49ee2af --- /dev/null +++ b/plugins/modules/consul_token.py @@ -0,0 +1,326 @@ +#!/usr/bin/python +# +# Copyright (c) 2024, Florian Apolloner (@apollo13) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_token +short_description: Manipulate Consul tokens +version_added: 8.3.0 +description: + - Allows the addition, modification and deletion of tokens in a Consul cluster using the agent. For more details on using + and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html). +author: + - Florian Apolloner (@apollo13) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.token + - community.general.consul.actiongroup_consul + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff misses operational attributes. + action_group: + version_added: 8.3.0 +options: + state: + description: + - Whether the token should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + accessor_id: + description: + - Specifies a UUID to use as the token's Accessor ID. If not specified a UUID is generated for this field. + type: str + secret_id: + description: + - Specifies a UUID to use as the token's Secret ID. If not specified a UUID is generated for this field. + type: str + description: + description: + - Free form human readable description of the token. + type: str + policies: + type: list + elements: dict + description: + - List of policies to attach to the token. Each policy is a dict. + - If the parameter is left blank, any policies currently assigned are not changed. + - Any empty array (V([])) clears any policies previously set. + suboptions: + name: + description: + - The name of the policy to attach to this token; see M(community.general.consul_policy) for more info. + - Either this or O(policies[].id) must be specified. + type: str + id: + description: + - The ID of the policy to attach to this token; see M(community.general.consul_policy) for more info. + - Either this or O(policies[].name) must be specified. + type: str + roles: + type: list + elements: dict + description: + - List of roles to attach to the token. Each role is a dict. + - If the parameter is left blank, any roles currently assigned are not changed. + - Any empty array (V([])) clears any roles previously set. + suboptions: + name: + description: + - The name of the role to attach to this token; see M(community.general.consul_role) for more info. + - Either this or O(roles[].id) must be specified. + type: str + id: + description: + - The ID of the role to attach to this token; see M(community.general.consul_role) for more info. + - Either this or O(roles[].name) must be specified. + type: str + templated_policies: + description: + - The list of templated policies that should be applied to the role. + type: list + elements: dict + suboptions: + template_name: + description: + - The templated policy name. + type: str + required: true + template_variables: + description: + - The templated policy variables. + - Not all templated policies require variables. + type: dict + service_identities: + type: list + elements: dict + description: + - List of service identities to attach to the token. + - If not specified, any service identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. + suboptions: + service_name: + description: + - The name of the service. + - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character. + - May only contain lowercase alphanumeric characters as well as V(-) and V(_). + type: str + required: true + datacenters: + description: + - The datacenters where the token is effective. + - If an empty array (V([])) is specified, the token is valid in all datacenters. + - Including those which do not yet exist but may in the future. + type: list + elements: str + node_identities: + type: list + elements: dict + description: + - List of node identities to attach to the token. + - If not specified, any node identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. + suboptions: + node_name: + description: + - The name of the node. + - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character. + - May only contain lowercase alphanumeric characters as well as V(-) and V(_). + type: str + required: true + datacenter: + description: + - The nodes datacenter. + - This results in effective token only being valid in this datacenter. + type: str + required: true + local: + description: + - If true, indicates that the token should not be replicated globally and instead be local to the current datacenter. + type: bool + expiration_ttl: + description: + - This is a convenience field and if set it initializes the C(expiration_time). Can be specified in the form of V(60s) + or V(5m) (that is, 60 seconds or 5 minutes, respectively). Ingored when the token is updated! + type: str +""" + +EXAMPLES = r""" +- name: Create / Update a token by accessor_id + community.general.consul_token: + state: present + accessor_id: 07a7de84-c9c7-448a-99cc-beaf682efd21 + token: 8adddd91-0bd6-d41d-ae1a-3b49cfa9a0e8 + roles: + - name: role1 + - name: role2 + service_identities: + - service_name: service1 + datacenters: [dc1, dc2] + node_identities: + - node_name: node1 + datacenter: dc1 + expiration_ttl: 50m + +- name: Delete a token + community.general.consul_token: + state: absent + accessor_id: 07a7de84-c9c7-448a-99cc-beaf682efd21 + token: 8adddd91-0bd6-d41d-ae1a-3b49cfa9a0e8 +""" + +RETURN = r""" +token: + description: The token as returned by the Consul HTTP API. + returned: always + type: dict + sample: + AccessorID: 07a7de84-c9c7-448a-99cc-beaf682efd21 + CreateIndex: 632 + CreateTime: "2024-01-14T21:53:01.402749174+01:00" + Description: Testing + Hash: rj5PeDHddHslkpW7Ij4OD6N4bbSXiecXFmiw2SYXg2A= + Local: false + ModifyIndex: 633 + SecretID: bd380fba-da17-7cee-8576-8d6427c6c930 + ServiceIdentities: ["ServiceName": "test"] +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + _ConsulModule, +) + + +def normalize_link_obj(api_obj, module_obj, key): + api_objs = api_obj.get(key) + module_objs = module_obj.get(key) + if api_objs is None or module_objs is None: + return + name_to_id = {i["Name"]: i["ID"] for i in api_objs} + id_to_name = {i["ID"]: i["Name"] for i in api_objs} + + for obj in module_objs: + identifier = obj.get("ID") + name = obj.get("Name") + if identifier and not name and identifier in id_to_name: + obj["Name"] = id_to_name[identifier] + if not identifier and name and name in name_to_id: + obj["ID"] = name_to_id[name] + + +class ConsulTokenModule(_ConsulModule): + api_endpoint = "acl/token" + result_key = "token" + unique_identifiers = ["accessor_id"] + + create_only_fields = {"expiration_ttl"} + + def read_object(self): + # if `accessor_id` is not supplied we can only create objects and are not idempotent + if not self.id_from_obj(self.params): + return None + return super(ConsulTokenModule, self).read_object() + + def needs_update(self, api_obj, module_obj): + # SecretID is usually not supplied + if "SecretID" not in module_obj and "SecretID" in api_obj: + del api_obj["SecretID"] + normalize_link_obj(api_obj, module_obj, "Roles") + normalize_link_obj(api_obj, module_obj, "Policies") + # ExpirationTTL is only supported on create, not for update + # it writes to ExpirationTime, so we need to remove that as well + if "ExpirationTTL" in module_obj: + del module_obj["ExpirationTTL"] + return super(ConsulTokenModule, self).needs_update(api_obj, module_obj) + + +NAME_ID_SPEC = dict( + name=dict(type="str"), + id=dict(type="str"), +) + +NODE_ID_SPEC = dict( + node_name=dict(type="str", required=True), + datacenter=dict(type="str", required=True), +) + +SERVICE_ID_SPEC = dict( + service_name=dict(type="str", required=True), + datacenters=dict(type="list", elements="str"), +) + +TEMPLATE_POLICY_SPEC = dict( + template_name=dict(type="str", required=True), + template_variables=dict(type="dict"), +) + + +_ARGUMENT_SPEC = { + "description": dict(), + "accessor_id": dict(), + "secret_id": dict(no_log=True), + "roles": dict( + type="list", + elements="dict", + options=NAME_ID_SPEC, + mutually_exclusive=[("name", "id")], + required_one_of=[("name", "id")], + ), + "policies": dict( + type="list", + elements="dict", + options=NAME_ID_SPEC, + mutually_exclusive=[("name", "id")], + required_one_of=[("name", "id")], + ), + "templated_policies": dict( + type="list", + elements="dict", + options=TEMPLATE_POLICY_SPEC, + ), + "node_identities": dict( + type="list", + elements="dict", + options=NODE_ID_SPEC, + ), + "service_identities": dict( + type="list", + elements="dict", + options=SERVICE_ID_SPEC, + ), + "local": dict(type="bool"), + "expiration_ttl": dict(type="str"), + "state": dict(default="present", choices=["present", "absent"]), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + required_if=[("state", "absent", ["accessor_id"])], + supports_check_mode=True, + ) + consul_module = ConsulTokenModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/packaging/os/copr.py b/plugins/modules/copr.py similarity index 84% rename from plugins/modules/packaging/os/copr.py rename to plugins/modules/copr.py index cb31e8c9fb..4d627ceb8f 100644 --- a/plugins/modules/packaging/os/copr.py +++ b/plugins/modules/copr.py @@ -1,49 +1,66 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Silvie Chlupova -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, Silvie Chlupova +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" ---- module: copr short_description: Manage one of the Copr repositories version_added: 2.0.0 description: This module can enable, disable or remove the specified repository. author: Silvie Chlupova (@schlupov) requirements: - - dnf - - dnf-plugins-core + - dnf + - dnf-plugins-core notes: - - Supports C(check_mode). + - Supports C(check_mode). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - host: - description: The Copr host to work with. - default: copr.fedorainfracloud.org - type: str - protocol: - description: This indicate which protocol to use with the host. - default: https - type: str - name: - description: Copr directory name, for example C(@copr/copr-dev). - required: true - type: str - state: - description: - - Whether to set this project as C(enabled), C(disabled) or C(absent). - default: enabled - type: str - choices: [absent, enabled, disabled] - chroot: - description: - - The name of the chroot that you want to enable/disable/remove in the project, - for example C(epel-7-x86_64). Default chroot is determined by the operating system, - version of the operating system, and architecture on which the module is run. - type: str + host: + description: The Copr host to work with. + default: copr.fedorainfracloud.org + type: str + protocol: + description: This indicate which protocol to use with the host. + default: https + type: str + name: + description: Copr directory name, for example C(@copr/copr-dev). + required: true + type: str + state: + description: + - Whether to set this project as V(enabled), V(disabled), or V(absent). + default: enabled + type: str + choices: [absent, enabled, disabled] + chroot: + description: + - The name of the chroot that you want to enable/disable/remove in the project, for example V(epel-7-x86_64). Default + chroot is determined by the operating system, version of the operating system, and architecture on which the module + is run. + type: str + includepkgs: + description: List of packages to include. + required: false + type: list + elements: str + version_added: 9.4.0 + excludepkgs: + description: List of packages to exclude. + required: false + type: list + elements: str + version_added: 9.4.0 """ EXAMPLES = r""" @@ -58,6 +75,13 @@ EXAMPLES = r""" community.general.copr: state: absent name: '@copr/integration_tests' + +- name: Install Caddy + community.general.copr: + name: '@caddy/caddy' + chroot: fedora-rawhide-{{ ansible_facts.architecture }} + includepkgs: + - caddy """ RETURN = r""" @@ -77,6 +101,7 @@ repo: import stat import os import traceback +from urllib.error import HTTPError try: import dnf @@ -84,15 +109,29 @@ try: import dnf.repodict from dnf.conf import Conf HAS_DNF_PACKAGES = True + DNF_IMP_ERR = None except ImportError: DNF_IMP_ERR = traceback.format_exc() HAS_DNF_PACKAGES = False -from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.module_utils.common import respawn from ansible.module_utils.basic import missing_required_lib -from ansible.module_utils import distro # pylint: disable=import-error -from ansible.module_utils.basic import AnsibleModule # pylint: disable=import-error -from ansible.module_utils.urls import open_url # pylint: disable=import-error +from ansible.module_utils import distro +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import open_url + + +def _respawn_dnf(): + if respawn.has_respawned(): + return + system_interpreters = ( + "/usr/libexec/platform-python", + "/usr/bin/python3", + "/usr/bin/python", + ) + interpreter = respawn.probe_interpreters_for_module(system_interpreters, "dnf") + if interpreter: + respawn.respawn_module(interpreter) class CoprModule(object): @@ -231,6 +270,12 @@ class CoprModule(object): """ if not repo_content: repo_content = self._download_repo_info() + if self.ansible_module.params["includepkgs"]: + includepkgs_value = ','.join(self.ansible_module.params['includepkgs']) + repo_content = repo_content.rstrip('\n') + '\nincludepkgs={0}\n'.format(includepkgs_value) + if self.ansible_module.params["excludepkgs"]: + excludepkgs_value = ','.join(self.ansible_module.params['excludepkgs']) + repo_content = repo_content.rstrip('\n') + '\nexcludepkgs={0}\n'.format(excludepkgs_value) if self._compare_repo_content(repo_filename_path, repo_content): return False if not self.check_mode: @@ -446,11 +491,14 @@ def run_module(): name=dict(type="str", required=True), state=dict(type="str", choices=["enabled", "disabled", "absent"], default="enabled"), chroot=dict(type="str"), + includepkgs=dict(type='list', elements="str"), + excludepkgs=dict(type='list', elements="str"), ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) params = module.params if not HAS_DNF_PACKAGES: + _respawn_dnf() module.fail_json(msg=missing_required_lib("dnf"), exception=DNF_IMP_ERR) CoprModule.ansible_module = module diff --git a/plugins/modules/cpanm.py b/plugins/modules/cpanm.py new file mode 100644 index 0000000000..39844d5f74 --- /dev/null +++ b/plugins/modules/cpanm.py @@ -0,0 +1,294 @@ +#!/usr/bin/python + +# Copyright (c) 2012, Franck Cuny +# Copyright (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: cpanm +short_description: Manages Perl library dependencies +description: + - Manage Perl library dependencies using cpanminus. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + type: str + description: + - The Perl library to install. Valid values change according to the O(mode), see notes for more details. + - Note that for installing from a local path the parameter O(from_path) should be used. + aliases: [pkg] + from_path: + type: path + description: + - The local directory or C(tar.gz) file to install from. + notest: + description: + - Do not run unit tests. + type: bool + default: false + locallib: + description: + - Specify the install base to install modules. + type: path + mirror: + description: + - Specifies the base URL for the CPAN mirror to use. + type: str + mirror_only: + description: + - Use the mirror's index file instead of the CPAN Meta DB. + type: bool + default: false + installdeps: + description: + - Only install dependencies. + type: bool + default: false + install_recommendations: + description: + - If V(true), installs dependencies declared as recommends per META spec. + - If V(false), it ensures the dependencies declared as recommends are not installed, overriding any decision made earlier + in E(PERL_CPANM_OPT). + - If parameter is not set, C(cpanm) uses its existing defaults. + - When these dependencies fail to install, cpanm continues the installation, since they are just recommendation. + type: bool + version_added: 10.3.0 + install_suggestions: + description: + - If V(true), installs dependencies declared as suggests per META spec. + - If V(false), it ensures the dependencies declared as suggests are not installed, overriding any decision made earlier + in E(PERL_CPANM_OPT). + - If parameter is not set, C(cpanm) uses its existing defaults. + - When these dependencies fail to install, cpanm continues the installation, since they are just suggestion. + type: bool + version_added: 10.3.0 + version: + description: + - Version specification for the perl module. When O(mode) is V(new), C(cpanm) version operators are accepted. + type: str + executable: + description: + - Override the path to the cpanm executable. + type: path + mode: + description: + - Controls the module behavior. See notes below for more details. + - The default changed from V(compatibility) to V(new) in community.general 9.0.0. + - 'O(mode=new): The O(name) parameter may refer to a module name, a distribution file, a HTTP URL or a git repository + URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized. This is the default mode + from community.general 9.0.0 onwards.' + - 'O(mode=compatibility): This was the default mode before community.general 9.0.0. O(name) must be either a module + name or a distribution file. If the perl module given by O(name) is installed (at the exact O(version) when specified), + then nothing happens. Otherwise, it is installed using the C(cpanm) executable. O(name) cannot be an URL, or a git + URL. C(cpanm) version specifiers do not work in this mode.' + - 'B(ATTENTION): V(compatibility) mode is deprecated and will be removed in community.general 13.0.0.' + type: str + choices: [compatibility, new] + default: new + version_added: 3.0.0 + name_check: + description: + - When O(mode=new), this parameter can be used to check if there is a module O(name) installed (at O(version), when + specified). + type: str + version_added: 3.0.0 +notes: + - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. +seealso: + - name: C(cpanm) command manual page + description: Manual page for the command. + link: https://metacpan.org/dist/App-cpanminus/view/bin/cpanm +author: + - "Franck Cuny (@fcuny)" + - "Alexei Znamensky (@russoz)" +""" + +EXAMPLES = r""" +- name: Install Dancer perl package + community.general.cpanm: + name: Dancer + +- name: Install version 0.99_05 of the Plack perl package + community.general.cpanm: + name: MIYAGAWA/Plack-0.99_05.tar.gz + +- name: Install Dancer into the specified locallib + community.general.cpanm: + name: Dancer + locallib: /srv/webapps/my_app/extlib + +- name: Install perl dependencies from local directory + community.general.cpanm: + from_path: /srv/webapps/my_app/src/ + +- name: Install Dancer perl package without running the unit tests in indicated locallib + community.general.cpanm: + name: Dancer + notest: true + locallib: /srv/webapps/my_app/extlib + +- name: Install Dancer perl package from a specific mirror + community.general.cpanm: + name: Dancer + mirror: 'http://cpan.cpantesters.org/' + +- name: Install Dancer perl package into the system root path + become: true + community.general.cpanm: + name: Dancer + +- name: Install Dancer if it is not already installed OR the installed version is older than version 1.0 + community.general.cpanm: + name: Dancer + version: '1.0' +""" + +RETURN = r""" +cpanm_version: + description: Version of CPANMinus. + type: str + returned: always + sample: "1.7047" + version_added: 10.0.0 +""" + + +import os +import re + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + +class CPANMinus(ModuleHelper): + output_params = ['name', 'version'] + module = dict( + argument_spec=dict( + name=dict(type='str', aliases=['pkg']), + version=dict(type='str'), + from_path=dict(type='path'), + notest=dict(type='bool', default=False), + locallib=dict(type='path'), + mirror=dict(type='str'), + mirror_only=dict(type='bool', default=False), + installdeps=dict(type='bool', default=False), + install_recommendations=dict(type='bool'), + install_suggestions=dict(type='bool'), + executable=dict(type='path'), + mode=dict(type='str', default='new', choices=['compatibility', 'new']), + name_check=dict(type='str') + ), + required_one_of=[('name', 'from_path')], + + ) + command = 'cpanm' + command_args_formats = dict( + notest=cmd_runner_fmt.as_bool("--notest"), + locallib=cmd_runner_fmt.as_opt_val('--local-lib'), + mirror=cmd_runner_fmt.as_opt_val('--mirror'), + mirror_only=cmd_runner_fmt.as_bool("--mirror-only"), + installdeps=cmd_runner_fmt.as_bool("--installdeps"), + install_recommendations=cmd_runner_fmt.as_bool("--with-recommends", "--without-recommends", ignore_none=True), + install_suggestions=cmd_runner_fmt.as_bool("--with-suggests", "--without-suggests", ignore_none=True), + pkg_spec=cmd_runner_fmt.as_list(), + cpanm_version=cmd_runner_fmt.as_fixed("--version"), + ) + + def __init_module__(self): + v = self.vars + if v.mode == "compatibility": + if v.name_check: + self.do_raise("Parameter name_check can only be used with mode=new") + self.deprecate("'mode=compatibility' is deprecated, use 'mode=new' instead", version='13.0.0', collection_name="community.general") + else: + if v.name and v.from_path: + self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'") + + self.command = v.executable if v.executable else self.command + self.runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True) + self.vars.binary = self.runner.binary + + with self.runner("cpanm_version") as ctx: + rc, out, err = ctx.run() + line = out.split('\n')[0] + match = re.search(r"version\s+([\d\.]+)\s+", line) + if not match: + self.do_raise("Failed to determine version number. First line of output: {0}".format(line)) + self.vars.cpanm_version = match.group(1) + + def _is_package_installed(self, name, locallib, version): + def process(rc, out, err): + return rc == 0 + + if name is None or name.endswith('.tar.gz'): + return False + version = "" if version is None else " " + version + + env = {"PERL5LIB": "%s/lib/perl5" % locallib} if locallib else {} + runner = CmdRunner(self.module, ["perl", "-le"], {"mod": cmd_runner_fmt.as_list()}, check_rc=False, environ_update=env) + with runner("mod", output_process=process) as ctx: + return ctx.run(mod='use %s%s;' % (name, version)) + + def sanitize_pkg_spec_version(self, pkg_spec, version): + if version is None: + return pkg_spec + if pkg_spec.endswith('.tar.gz'): + self.do_raise(msg="parameter 'version' must not be used when installing from a file") + if os.path.isdir(pkg_spec): + self.do_raise(msg="parameter 'version' must not be used when installing from a directory") + if pkg_spec.endswith('.git'): + if version.startswith('~'): + self.do_raise(msg="operator '~' not allowed in version parameter when installing from git repository") + version = version if version.startswith('@') else '@' + version + elif version[0] not in ('@', '~'): + version = '~' + version + return pkg_spec + version + + def __run__(self): + def process(rc, out, err): + if self.vars.mode == "compatibility" and rc != 0: + self.do_raise(msg=err, cmd=self.vars.cmd_args) + return 'is up to date' not in err and 'is up to date' not in out + + v = self.vars + pkg_param = 'from_path' if v.from_path else 'name' + + if v.mode == 'compatibility': + if self._is_package_installed(v.name, v.locallib, v.version): + return + pkg_spec = v[pkg_param] + else: + installed = self._is_package_installed(v.name_check, v.locallib, v.version) if v.name_check else False + if installed: + return + pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version) + + with self.runner([ + 'notest', + 'locallib', + 'mirror', + 'mirror_only', + 'installdeps', + 'install_recommendations', + 'install_suggestions', + 'pkg_spec' + ], output_process=process) as ctx: + self.changed = ctx.run(pkg_spec=pkg_spec) + + +def main(): + CPANMinus.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/cronvar.py b/plugins/modules/cronvar.py similarity index 89% rename from plugins/modules/system/cronvar.py rename to plugins/modules/cronvar.py index 9871668ac0..b67b94fe95 100644 --- a/plugins/modules/system/cronvar.py +++ b/plugins/modules/cronvar.py @@ -1,8 +1,8 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Cronvar Plugin: The goal of this plugin is to provide an idempotent # method for set cron variable values. It should play well with the @@ -13,67 +13,71 @@ # This module is based on the crontab module. -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: cronvar short_description: Manage variables in crontabs description: - Use this module to manage crontab variables. - This module allows you to create, update, or delete cron variable definitions. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: name: description: - Name of the crontab variable. type: str - required: yes + required: true value: description: - The value to set this variable to. - - Required if C(state=present). + - Required if O(state=present). type: str insertafter: description: - - If specified, the variable will be inserted after the variable specified. - - Used with C(state=present). + - If specified, the variable is inserted after the variable specified. + - Used with O(state=present). type: str insertbefore: description: - - Used with C(state=present). If specified, the variable will be inserted - just before the variable specified. + - Used with O(state=present). If specified, the variable is inserted just before the variable specified. type: str state: description: - Whether to ensure that the variable is present or absent. type: str - choices: [ absent, present ] + choices: [absent, present] default: present user: description: - The specific user whose crontab should be modified. - - This parameter defaults to C(root) when unset. + - This parameter defaults to V(root) when unset. type: str cron_file: description: - If specified, uses this file instead of an individual user's crontab. - - Without a leading C(/), this is assumed to be in I(/etc/cron.d). - - With a leading C(/), this is taken as absolute. + - Without a leading V(/), this is assumed to be in C(/etc/cron.d). + - With a leading V(/), this is taken as absolute. type: str backup: description: - - If set, create a backup of the crontab before it is modified. - The location of the backup is returned in the C(backup) variable by this module. + - If set, create a backup of the crontab before it is modified. The location of the backup is returned in the C(backup) + variable by this module. type: bool - default: no + default: false requirements: - cron author: -- Doug Luce (@dougluce) -''' + - Doug Luce (@dougluce) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists community.general.cronvar: name: EMAIL @@ -90,7 +94,7 @@ EXAMPLES = r''' value: /var/log/yum-autoupdate.log user: root cron_file: ansible_yum-autoupdate -''' +""" import os import platform @@ -99,9 +103,9 @@ import re import shlex import sys import tempfile +from shlex import quote as shlex_quote from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import shlex_quote class CronVarError(Exception): @@ -129,6 +133,9 @@ class CronVar(object): self.cron_file = cron_file else: self.cron_file = os.path.join('/etc/cron.d', cron_file) + parent_dir = os.path.dirname(self.cron_file) + if parent_dir and not os.path.isdir(parent_dir): + module.fail_json(msg="Parent directory '{}' does not exist for cron_file: '{}'".format(parent_dir, cron_file)) else: self.cron_file = None @@ -140,9 +147,8 @@ class CronVar(object): if self.cron_file: # read the cronfile try: - f = open(self.cron_file, 'r') - self.lines = f.read().splitlines() - f.close() + with open(self.cron_file, 'r') as f: + self.lines = f.read().splitlines() except IOError: # cron file does not exist return @@ -174,6 +180,7 @@ class CronVar(object): fileh = open(backup_file, 'w') elif self.cron_file: fileh = open(self.cron_file, 'w') + path = None else: filed, path = tempfile.mkstemp(prefix='crontab') fileh = os.fdopen(filed, 'w') @@ -387,6 +394,8 @@ def main(): old_value = cronvar.find_variable(name) if ensure_present: + if value == "" and old_value != "": + value = '""' if old_value is None: cronvar.add_variable(name, value, insertbefore, insertafter) changed = True diff --git a/plugins/modules/system/crypttab.py b/plugins/modules/crypttab.py similarity index 84% rename from plugins/modules/system/crypttab.py rename to plugins/modules/crypttab.py index 8eeec56d3d..4eb8e4b6c2 100644 --- a/plugins/modules/system/crypttab.py +++ b/plugins/modules/crypttab.py @@ -1,62 +1,63 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2014, Steve -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Steve +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: crypttab short_description: Encrypted Linux block devices description: - Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or - optionally prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/) - will be stripped from I(name). + - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or optionally prefixed with V(/dev/mapper/), + as it appears in the filesystem. V(/dev/mapper/) is stripped from O(name). type: str - required: yes + required: true state: description: - - Use I(present) to add a line to C(/etc/crypttab) or update its definition - if already present. - - Use I(absent) to remove a line with matching I(name). - - Use I(opts_present) to add options to those already present; options with - different values will be updated. - - Use I(opts_absent) to remove options from the existing set. + - Use V(present) to add a line to C(/etc/crypttab) or update its definition if already present. + - Use V(absent) to remove a line with matching O(name). + - Use V(opts_present) to add options to those already present; options with different values are updated. + - Use V(opts_absent) to remove options from the existing set. type: str - required: yes - choices: [ absent, opts_absent, opts_present, present ] + required: true + choices: [absent, opts_absent, opts_present, present] backing_device: description: - - Path to the underlying block device or file, or the UUID of a block-device - prefixed with I(UUID=). + - Path to the underlying block device or file, or the UUID of a block-device prefixed with V(UUID=). type: str password: description: - - Encryption password, the path to a file containing the password, or - C(-) or unset if the password should be entered at boot. + - Encryption password, the path to a file containing the password, or V(-) or unset if the password should be entered + at boot. type: path opts: description: - - A comma-delimited list of options. See C(crypttab(5) ) for details. + - A comma-delimited list of options. See V(crypttab(5\)) for details. type: str path: description: - - Path to file to use instead of C(/etc/crypttab). + - Path to file to use instead of V(/etc/crypttab). - This might be useful in a chroot environment. type: path default: /etc/crypttab author: -- Steve (@groks) -''' + - Steve (@groks) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set the options explicitly a device which must already exist community.general.crypttab: name: luks-home @@ -69,8 +70,16 @@ EXAMPLES = r''' state: opts_present opts: discard loop: '{{ ansible_mounts }}' - when: "'/dev/mapper/luks-' in {{ item.device }}" -''' + when: "'/dev/mapper/luks-' in item.device" + +- name: Add entry to /etc/crypttab for luks-home with password file + community.general.crypttab: + name: luks-home + backing_device: UUID=123e4567-e89b-12d3-a456-426614174000 + password: /root/keys/luks-home.key + opts: discard,cipher=aes-cbc-essiv:sha256 + state: present +""" import os import traceback @@ -113,7 +122,7 @@ def main(): ('backing_device', backing_device), ('password', password), ('opts', opts)): - if (arg is not None and (' ' in arg or '\t' in arg or arg == '')): + if arg is not None and (' ' in arg or '\t' in arg or arg == ''): module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name, **module.params) @@ -151,11 +160,8 @@ def main(): changed, reason = existing_line.opts.remove(opts) if changed and not module.check_mode: - try: - f = open(path, 'wb') + with open(path, 'wb') as f: f.write(to_bytes(crypttab, errors='surrogate_or_strict')) - finally: - f.close() module.exit_json(changed=changed, msg=reason, **module.params) @@ -170,12 +176,9 @@ class Crypttab(object): os.makedirs(os.path.dirname(path)) open(path, 'a').close() - try: - f = open(path, 'r') + with open(path, 'r') as f: for line in f.readlines(): self._lines.append(Line(line)) - finally: - f.close() def add(self, line): self._lines.append(line) diff --git a/plugins/modules/database/mssql/mssql_script.py b/plugins/modules/database/mssql/mssql_script.py deleted file mode 100644 index bb80607ccf..0000000000 --- a/plugins/modules/database/mssql/mssql_script.py +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python - -# Copyright: (c) 2021, Kris Budde = 2.7 - - pymssql - -author: - - Kris Budde (@kbudde) -''' - -EXAMPLES = r''' -- name: Check DB connection - community.general.mssql_script: - login_user: "{{ mssql_login_user }}" - login_password: "{{ mssql_login_password }}" - login_host: "{{ mssql_host }}" - login_port: "{{ mssql_port }}" - db: master - script: "SELECT 1" - -- name: Query with parameter - community.general.mssql_script: - login_user: "{{ mssql_login_user }}" - login_password: "{{ mssql_login_password }}" - login_host: "{{ mssql_host }}" - login_port: "{{ mssql_port }}" - script: | - SELECT name, state_desc FROM sys.databases WHERE name = %(dbname)s - params: - dbname: msdb - register: result_params -- assert: - that: - - result_params.query_results[0][0][0][0] == 'msdb' - - result_params.query_results[0][0][0][1] == 'ONLINE' - -- name: two batches with default output - community.general.mssql_script: - login_user: "{{ mssql_login_user }}" - login_password: "{{ mssql_login_password }}" - login_host: "{{ mssql_host }}" - login_port: "{{ mssql_port }}" - script: | - SELECT 'Batch 0 - Select 0' - SELECT 'Batch 0 - Select 1' - GO - SELECT 'Batch 1 - Select 0' - register: result_batches -- assert: - that: - - result_batches.query_results | length == 2 # two batch results - - result_batches.query_results[0] | length == 2 # two selects in first batch - - result_batches.query_results[0][0] | length == 1 # one row in first select - - result_batches.query_results[0][0][0] | length == 1 # one column in first row - - result_batches.query_results[0][0][0][0] == 'Batch 0 - Select 0' # each row contains a list of values. - -- name: two batches with dict output - community.general.mssql_script: - login_user: "{{ mssql_login_user }}" - login_password: "{{ mssql_login_password }}" - login_host: "{{ mssql_host }}" - login_port: "{{ mssql_port }}" - output: dict - script: | - SELECT 'Batch 0 - Select 0' as b0s0 - SELECT 'Batch 0 - Select 1' as b0s1 - GO - SELECT 'Batch 1 - Select 0' as b1s0 - register: result_batches_dict -- assert: - that: - - result_batches_dict.query_results_dict | length == 2 # two batch results - - result_batches_dict.query_results_dict[0] | length == 2 # two selects in first batch - - result_batches_dict.query_results_dict[0][0] | length == 1 # one row in first select - - result_batches_dict.query_results_dict[0][0][0]['b0s0'] == 'Batch 0 - Select 0' # column 'b0s0' of first row -''' - -RETURN = r''' -query_results: - description: List of batches (queries separated by C(GO) keyword). - type: list - elements: list - returned: success and I(output=default) - sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]] - contains: - queries: - description: - - List of result sets of each query. - - If a query returns no results, the results of this and all the following queries will not be included in the output. - - Use the C(GO) keyword in I(script) to separate queries. - type: list - elements: list - contains: - rows: - description: List of rows returned by query. - type: list - elements: list - contains: - column_value: - description: - - List of column values. - - Any non-standard JSON type is converted to string. - type: list - example: ["Batch 0 - Select 0"] - returned: success, if output is default -query_results_dict: - description: List of batches (queries separated by C(GO) keyword). - type: list - elements: list - returned: success and I(output=dict) - sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]] - contains: - queries: - description: - - List of result sets of each query. - - If a query returns no results, the results of this and all the following queries will not be included in the output. - Use 'GO' keyword to separate queries. - type: list - elements: list - contains: - rows: - description: List of rows returned by query. - type: list - elements: list - contains: - column_dict: - description: - - Dictionary of column names and values. - - Any non-standard JSON type is converted to string. - type: dict - example: {"col_name": "Batch 0 - Select 0"} - returned: success, if output is dict -''' - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -import traceback -import json -PYMSSQL_IMP_ERR = None -try: - import pymssql -except ImportError: - PYMSSQL_IMP_ERR = traceback.format_exc() - MSSQL_FOUND = False -else: - MSSQL_FOUND = True - - -def clean_output(o): - return str(o) - - -def run_module(): - module_args = dict( - name=dict(required=False, aliases=['db'], default=''), - login_user=dict(), - login_password=dict(no_log=True), - login_host=dict(required=True), - login_port=dict(type='int', default=1433), - script=dict(required=True), - output=dict(default='default', choices=['dict', 'default']), - params=dict(type='dict'), - ) - - result = dict( - changed=False, - ) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) - if not MSSQL_FOUND: - module.fail_json(msg=missing_required_lib( - 'pymssql'), exception=PYMSSQL_IMP_ERR) - - db = module.params['name'] - login_user = module.params['login_user'] - login_password = module.params['login_password'] - login_host = module.params['login_host'] - login_port = module.params['login_port'] - script = module.params['script'] - output = module.params['output'] - sql_params = module.params['params'] - - login_querystring = login_host - if login_port != 1433: - login_querystring = "%s:%s" % (login_host, login_port) - - if login_user is not None and login_password is None: - module.fail_json( - msg="when supplying login_user argument, login_password must also be provided") - - try: - conn = pymssql.connect( - user=login_user, password=login_password, host=login_querystring, database=db) - cursor = conn.cursor() - except Exception as e: - if "Unknown database" in str(e): - errno, errstr = e.args - module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) - else: - module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your " - "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf") - - conn.autocommit(True) - - query_results_key = 'query_results' - if output == 'dict': - cursor = conn.cursor(as_dict=True) - query_results_key = 'query_results_dict' - - queries = script.split('\nGO\n') - result['changed'] = True - if module.check_mode: - module.exit_json(**result) - - query_results = [] - try: - for query in queries: - cursor.execute(query, sql_params) - qry_result = [] - rows = cursor.fetchall() - while rows: - qry_result.append(rows) - rows = cursor.fetchall() - query_results.append(qry_result) - except Exception as e: - return module.fail_json(msg="query failed", query=query, error=str(e), **result) - - # ensure that the result is json serializable - qry_results = json.loads(json.dumps(query_results, default=clean_output)) - - result[query_results_key] = qry_results - module.exit_json(**result) - - -def main(): - run_module() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/database/saphana/hana_query.py b/plugins/modules/database/saphana/hana_query.py deleted file mode 100644 index ac026d5adc..0000000000 --- a/plugins/modules/database/saphana/hana_query.py +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Rainer Leber -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: hana_query -short_description: Execute SQL on HANA -version_added: 3.2.0 -description: This module executes SQL statements on HANA with hdbsql. -options: - sid: - description: The system ID. - type: str - required: true - instance: - description: The instance number. - type: str - required: true - user: - description: A dedicated username. The user could be also in hdbuserstore. Defaults to C(SYSTEM). - type: str - default: SYSTEM - userstore: - description: If C(true) the user must be in hdbuserstore. - type: bool - default: false - version_added: 3.5.0 - password: - description: - - The password to connect to the database. - - "B(Note:) Since the passwords have to be passed as command line arguments, I(userstore=true) should - be used whenever possible, as command line arguments can be seen by other users - on the same machine." - type: str - autocommit: - description: Autocommit the statement. - type: bool - default: true - host: - description: The Host IP address. The port can be defined as well. - type: str - database: - description: Define the database on which to connect. - type: str - encrypted: - description: Use encrypted connection. Defaults to C(false). - type: bool - default: false - filepath: - description: - - One or more files each containing one SQL query to run. - - Must be a string or list containing strings. - type: list - elements: path - query: - description: - - SQL query to run. - - Must be a string or list containing strings. Please note that if you supply a string, it will be split by commas (C(,)) to a list. - It is better to supply a one-element list instead to avoid mangled input. - type: list - elements: str -notes: - - Does not support C(check_mode). -author: - - Rainer Leber (@rainerleber) -''' - -EXAMPLES = r''' -- name: Simple select query - community.general.hana_query: - sid: "hdb" - instance: "01" - password: "Test123" - query: "select user_name from users" - -- name: Run several queries - community.general.hana_query: - sid: "hdb" - instance: "01" - password: "Test123" - query: - - "select user_name from users;" - - select * from SYSTEM; - host: "localhost" - autocommit: False - -- name: Run several queries from file - community.general.hana_query: - sid: "hdb" - instance: "01" - password: "Test123" - filepath: - - /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt - - /tmp/HANA.txt - host: "localhost" - -- name: Run several queries from user store - community.general.hana_query: - sid: "hdb" - instance: "01" - user: hdbstoreuser - userstore: true - query: - - "select user_name from users;" - - select * from users; - autocommit: False -''' - -RETURN = r''' -query_result: - description: List containing results of all queries executed (one sublist for every query). - returned: on success - type: list - elements: list - sample: [[{"Column": "Value1"}, {"Column": "Value2"}], [{"Column": "Value1"}, {"Column": "Value2"}]] -''' - -import csv -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import StringIO -from ansible.module_utils.common.text.converters import to_native - - -def csv_to_list(rawcsv): - reader_raw = csv.DictReader(StringIO(rawcsv)) - reader = [dict((k, v.strip()) for k, v in row.items()) for row in reader_raw] - return list(reader) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - sid=dict(type='str', required=True), - instance=dict(type='str', required=True), - encrypted=dict(type='bool', default=False), - host=dict(type='str', required=False), - user=dict(type='str', default="SYSTEM"), - userstore=dict(type='bool', default=False), - password=dict(type='str', no_log=True), - database=dict(type='str', required=False), - query=dict(type='list', elements='str', required=False), - filepath=dict(type='list', elements='path', required=False), - autocommit=dict(type='bool', default=True), - ), - required_one_of=[('query', 'filepath')], - required_if=[('userstore', False, ['password'])], - supports_check_mode=False, - ) - rc, out, err, out_raw = [0, [], "", ""] - - params = module.params - - sid = (params['sid']).upper() - instance = params['instance'] - user = params['user'] - userstore = params['userstore'] - password = params['password'] - autocommit = params['autocommit'] - host = params['host'] - database = params['database'] - encrypted = params['encrypted'] - - filepath = params['filepath'] - query = params['query'] - - bin_path = "/usr/sap/{sid}/HDB{instance}/exe/hdbsql".format(sid=sid, instance=instance) - - try: - command = [module.get_bin_path(bin_path, required=True)] - except Exception as e: - module.fail_json(msg='Failed to find hdbsql at the expected path "{0}". Please check SID and instance number: "{1}"'.format(bin_path, to_native(e))) - - if encrypted is True: - command.extend(['-attemptencrypt']) - if autocommit is False: - command.extend(['-z']) - if host is not None: - command.extend(['-n', host]) - if database is not None: - command.extend(['-d', database]) - # -x Suppresses additional output, such as the number of selected rows in a result set. - if userstore: - command.extend(['-x', '-U', user]) - else: - command.extend(['-x', '-i', instance, '-u', user, '-p', password]) - - if filepath is not None: - command.extend(['-I']) - for p in filepath: - # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# -I /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt, - # iterates through files and append the output to var out. - query_command = command + [p] - (rc, out_raw, err) = module.run_command(query_command) - out.append(csv_to_list(out_raw)) - if query is not None: - for q in query: - # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# "select user_name from users", - # iterates through multiple commands and append the output to var out. - query_command = command + [q] - (rc, out_raw, err) = module.run_command(query_command) - out.append(csv_to_list(out_raw)) - changed = True - - module.exit_json(changed=changed, rc=rc, query_result=out, stderr=err) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/datadog/datadog_downtime.py b/plugins/modules/datadog_downtime.py similarity index 51% rename from plugins/modules/monitoring/datadog/datadog_downtime.py rename to plugins/modules/datadog_downtime.py index ef308bdabe..82365ff06a 100644 --- a/plugins/modules/monitoring/datadog/datadog_downtime.py +++ b/plugins/modules/datadog_downtime.py @@ -1,148 +1,154 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Datadog, Inc -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, Datadog, Inc +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: datadog_downtime short_description: Manages Datadog downtimes version_added: 2.0.0 description: - Manages downtimes within Datadog. - - Options as described on U(https://docs.datadoghq.com/api/v1/downtimes/s). + - Options as described on U(https://docs.datadoghq.com/api/v1/downtimes/). author: - Datadog (@Datadog) requirements: - datadog-api-client - Python 3.6+ +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - api_key: - description: - - Your Datadog API key. - required: true - type: str - api_host: - description: - - The URL to the Datadog API. - - This value can also be set with the C(DATADOG_HOST) environment variable. - required: false - default: https://api.datadoghq.com - type: str - app_key: - description: - - Your Datadog app key. - required: true - type: str - state: - description: - - The designated state of the downtime. - required: false - choices: ["present", "absent"] - default: present - type: str - id: - description: - - The identifier of the downtime. - - If empty, a new downtime gets created, otherwise it is either updated or deleted depending of the C(state). - - To keep your playbook idempotent, you should save the identifier in a file and read it in a lookup. - type: int + api_key: + description: + - Your Datadog API key. + required: true + type: str + api_host: + description: + - The URL to the Datadog API. + - This value can also be set with the E(DATADOG_HOST) environment variable. + required: false + default: https://api.datadoghq.com + type: str + app_key: + description: + - Your Datadog app key. + required: true + type: str + state: + description: + - The designated state of the downtime. + required: false + choices: ["present", "absent"] + default: present + type: str + id: + description: + - The identifier of the downtime. + - If empty, a new downtime gets created, otherwise it is either updated or deleted depending of the O(state). + - To keep your playbook idempotent, you should save the identifier in a file and read it in a lookup. + type: int + monitor_tags: + description: + - A list of monitor tags to which the downtime applies. + - The resulting downtime applies to monitors that match ALL provided monitor tags. + type: list + elements: str + scope: + description: + - A list of scopes to which the downtime applies. + - The resulting downtime applies to sources that matches ALL provided scopes. + type: list + elements: str + monitor_id: + description: + - The ID of the monitor to mute. If not provided, the downtime applies to all monitors. + type: int + downtime_message: + description: + - A message to include with notifications for this downtime. + - Email notifications can be sent to specific users by using the same "@username" notation as events. + type: str + start: + type: int + description: + - POSIX timestamp to start the downtime. If not provided, the downtime starts the moment it is created. + end: + type: int + description: + - POSIX timestamp to end the downtime. If not provided, the downtime is in effect until you cancel it. + timezone: + description: + - The timezone for the downtime. + type: str + rrule: + description: + - The C(RRULE) standard for defining recurring events. + - For example, to have a recurring event on the first day of each month, select a type of rrule and set the C(FREQ) + to C(MONTHLY) and C(BYMONTHDAY) to C(1). + - Most common rrule options from the iCalendar Spec are supported. + - Attributes specifying the duration in C(RRULE) are not supported (for example C(DTSTART), C(DTEND), C(DURATION)). + type: str +""" + +EXAMPLES = r""" +- name: Create a downtime + register: downtime_var + community.general.datadog_downtime: + state: present monitor_tags: - description: - - A list of monitor tags to which the downtime applies. - - The resulting downtime applies to monitors that match ALL provided monitor tags. - type: list - elements: str - scope: - description: - - A list of scopes to which the downtime applies. - - The resulting downtime applies to sources that matches ALL provided scopes. - type: list - elements: str - monitor_id: - description: - - The ID of the monitor to mute. If not provided, the downtime applies to all monitors. - type: int - downtime_message: - description: - - A message to include with notifications for this downtime. - - Email notifications can be sent to specific users by using the same "@username" notation as events. - type: str - start: - type: int - description: - - POSIX timestamp to start the downtime. If not provided, the downtime starts the moment it is created. - end: - type: int - description: - - POSIX timestamp to end the downtime. If not provided, the downtime is in effect until you cancel it. - timezone: - description: - - The timezone for the downtime. - type: str - rrule: - description: - - The C(RRULE) standard for defining recurring events. - - For example, to have a recurring event on the first day of each month, - select a type of rrule and set the C(FREQ) to C(MONTHLY) and C(BYMONTHDAY) to C(1). - - Most common rrule options from the iCalendar Spec are supported. - - Attributes specifying the duration in C(RRULE) are not supported (e.g. C(DTSTART), C(DTEND), C(DURATION)). - type: str + - "foo:bar" + downtime_message: "Downtime for foo:bar" + scope: "test" + api_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + app_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + # Lookup the id in the file and ignore errors if the file doesn't exits, so downtime gets created + id: "{{ lookup('file', inventory_hostname ~ '_downtime_id.txt', errors='ignore') }}" +- name: Save downtime id to file for later updates and idempotence + delegate_to: localhost + copy: + content: "{{ downtime.downtime.id }}" + dest: "{{ inventory_hostname ~ '_downtime_id.txt' }}" """ -EXAMPLES = """ - - name: Create a downtime - register: downtime_var - community.general.datadog_downtime: - state: present - monitor_tags: - - "foo:bar" - downtime_message: "Downtime for foo:bar" - scope: "test" - api_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - app_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - # Lookup the id in the file and ignore errors if the file doesn't exits, so downtime gets created - id: "{{ lookup('file', inventory_hostname ~ '_downtime_id.txt', errors='ignore') }}" - - name: Save downtime id to file for later updates and idempotence - delegate_to: localhost - copy: - content: "{{ downtime.downtime.id }}" - dest: "{{ inventory_hostname ~ '_downtime_id.txt' }}" -""" - -RETURN = """ +RETURN = r""" # Returns the downtime JSON dictionary from the API response under the C(downtime) key. # See https://docs.datadoghq.com/api/v1/downtimes/#schedule-a-downtime for more details. downtime: - description: The downtime returned by the API. - type: dict - returned: always - sample: { - "active": true, - "canceled": null, - "creator_id": 1445416, - "disabled": false, - "downtime_type": 2, - "end": null, - "id": 1055751000, - "message": "Downtime for foo:bar", - "monitor_id": null, - "monitor_tags": [ - "foo:bar" - ], - "parent_id": null, - "recurrence": null, - "scope": [ - "test" - ], - "start": 1607015009, - "timezone": "UTC", - "updater_id": null + description: The downtime returned by the API. + type: dict + returned: always + sample: + { + "active": true, + "canceled": null, + "creator_id": 1445416, + "disabled": false, + "downtime_type": 2, + "end": null, + "id": 1055751000, + "message": "Downtime for foo:bar", + "monitor_id": null, + "monitor_tags": [ + "foo:bar" + ], + "parent_id": null, + "recurrence": null, + "scope": [ + "test" + ], + "start": 1607015009, + "timezone": "UTC", + "updater_id": null } """ @@ -150,7 +156,6 @@ import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib # Import Datadog -from ansible.module_utils.common.text.converters import to_native DATADOG_IMP_ERR = None HAS_DATADOG = True @@ -168,18 +173,18 @@ def main(): module = AnsibleModule( argument_spec=dict( api_key=dict(required=True, no_log=True), - api_host=dict(required=False, default="https://api.datadoghq.com"), + api_host=dict(default="https://api.datadoghq.com"), app_key=dict(required=True, no_log=True), - state=dict(required=False, choices=["present", "absent"], default="present"), - monitor_tags=dict(required=False, type="list", elements="str"), - scope=dict(required=False, type="list", elements="str"), - monitor_id=dict(required=False, type="int"), - downtime_message=dict(required=False, no_log=True), - start=dict(required=False, type="int"), - end=dict(required=False, type="int"), - timezone=dict(required=False, type="str"), - rrule=dict(required=False, type="str"), - id=dict(required=False, type="int"), + state=dict(choices=["present", "absent"], default="present"), + monitor_tags=dict(type="list", elements="str"), + scope=dict(type="list", elements="str"), + monitor_id=dict(type="int"), + downtime_message=dict(no_log=True), + start=dict(type="int"), + end=dict(type="int"), + timezone=dict(type="str"), + rrule=dict(type="str"), + id=dict(type="int"), ) ) @@ -241,7 +246,8 @@ def build_downtime(module): downtime.timezone = module.params["timezone"] if module.params["rrule"]: downtime.recurrence = DowntimeRecurrence( - rrule=module.params["rrule"] + rrule=module.params["rrule"], + type="rrule", ) return downtime diff --git a/plugins/modules/monitoring/datadog/datadog_event.py b/plugins/modules/datadog_event.py similarity index 63% rename from plugins/modules/monitoring/datadog/datadog_event.py rename to plugins/modules/datadog_event.py index 6284b5bf23..c34951992e 100644 --- a/plugins/modules/monitoring/datadog/datadog_event.py +++ b/plugins/modules/datadog_event.py @@ -1,85 +1,99 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Author: Artūras 'arturaz' Šlajus # Author: Naoya Nakazawa # # This module is proudly sponsored by iGeolise (www.igeolise.com) and # Tiny Lab Productions (www.tinylabproductions.com). -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: datadog_event -short_description: Posts events to Datadog service +short_description: Posts events to Datadog service description: -- "Allows to post events to Datadog (www.datadoghq.com) service." -- "Uses http://docs.datadoghq.com/api/#events API." + - Allows to post events to Datadog (www.datadoghq.com) service. + - Uses http://docs.datadoghq.com/api/#events API. author: -- "Artūras `arturaz` Šlajus (@arturaz)" -- "Naoya Nakazawa (@n0ts)" + - "Artūras 'arturaz' Šlajus (@arturaz)" + - "Naoya Nakazawa (@n0ts)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - api_key: - type: str - description: ["Your DataDog API key."] - required: true - app_key: - type: str - description: ["Your DataDog app key."] - required: true - title: - type: str - description: ["The event title."] - required: true - text: - type: str - description: ["The body of the event."] - required: true - date_happened: - type: int - description: - - POSIX timestamp of the event. - - Default value is now. - priority: - type: str - description: ["The priority of the event."] - default: normal - choices: [normal, low] - host: - type: str - description: - - Host name to associate with the event. - - If not specified, it defaults to the remote system's hostname. - api_host: - type: str - description: - - DataDog API endpoint URL. - version_added: '3.3.0' - tags: - type: list - elements: str - description: ["Comma separated list of tags to apply to the event."] - alert_type: - type: str - description: ["Type of alert."] - default: info - choices: ['error', 'warning', 'info', 'success'] - aggregation_key: - type: str - description: ["An arbitrary string to use for aggregation."] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' -''' + api_key: + type: str + description: + - Your DataDog API key. + required: true + app_key: + type: str + description: + - Your DataDog app key. + required: true + title: + type: str + description: + - The event title. + required: true + text: + type: str + description: + - The body of the event. + required: true + date_happened: + type: int + description: + - POSIX timestamp of the event. + - Default value is now. + priority: + type: str + description: + - The priority of the event. + default: normal + choices: [normal, low] + host: + type: str + description: + - Host name to associate with the event. + - If not specified, it defaults to the remote system's hostname. + api_host: + type: str + description: + - DataDog API endpoint URL. + version_added: '3.3.0' + tags: + type: list + elements: str + description: + - Comma separated list of tags to apply to the event. + alert_type: + type: str + description: + - Type of alert. + default: info + choices: ['error', 'warning', 'info', 'success'] + aggregation_key: + type: str + description: + - An arbitrary string to use for aggregation. + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + type: bool + default: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Post an event with low priority community.general.datadog_event: title: Testing from ansible @@ -107,8 +121,7 @@ EXAMPLES = ''' - aa - b - '#host:{{ inventory_hostname }}' - -''' +""" import platform import traceback diff --git a/plugins/modules/monitoring/datadog/datadog_monitor.py b/plugins/modules/datadog_monitor.py similarity index 58% rename from plugins/modules/monitoring/datadog/datadog_monitor.py rename to plugins/modules/datadog_monitor.py index ffc2bcd657..2b84d7dbd8 100644 --- a/plugins/modules/monitoring/datadog/datadog_monitor.py +++ b/plugins/modules/datadog_monitor.py @@ -1,172 +1,207 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2015, Sebastian Kornehl -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Sebastian Kornehl +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: datadog_monitor short_description: Manages Datadog monitors description: - Manages monitors within Datadog. - Options as described on https://docs.datadoghq.com/api/. - - The type C(event-v2) was added in community.general 4.8.0. author: Sebastian Kornehl (@skornehl) requirements: [datadog] +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - api_key: - description: - - Your Datadog API key. - required: true - type: str - api_host: - description: - - The URL to the Datadog API. Default value is C(https://api.datadoghq.com). - - This value can also be set with the C(DATADOG_HOST) environment variable. - required: false - type: str - version_added: '0.2.0' - app_key: - description: - - Your Datadog app key. - required: true - type: str - state: - description: - - The designated state of the monitor. - required: true - choices: ['present', 'absent', 'mute', 'unmute'] - type: str - tags: - description: - - A list of tags to associate with your monitor when creating or updating. - - This can help you categorize and filter monitors. - type: list - elements: str - type: - description: - - The type of the monitor. - - The types C(query alert), C(trace-analytics alert) and C(rum alert) were added in community.general 2.1.0. - - The type C(composite) was added in community.general 3.4.0. - choices: - - metric alert - - service check - - event alert - - event-v2 alert - - process alert - - log alert - - query alert - - trace-analytics alert - - rum alert - - composite - type: str - query: - description: - - The monitor query to notify on. - - Syntax varies depending on what type of monitor you are creating. - type: str - name: - description: - - The name of the alert. - required: true - type: str - notification_message: - description: - - A message to include with notifications for this monitor. - - Email notifications can be sent to specific users by using the same '@username' notation as events. - - Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'. - type: str - silenced: - type: dict - description: - - Dictionary of scopes to silence, with timestamps or None. - - Each scope will be muted until the given POSIX timestamp or forever if the value is None. - notify_no_data: - description: - - Whether this monitor will notify when data stops reporting. - type: bool - default: 'no' - no_data_timeframe: - description: - - The number of minutes before a monitor will notify when data stops reporting. - - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks. - - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service. - type: str - timeout_h: - description: - - The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state. - type: str - renotify_interval: - description: - - The number of minutes after the last notification before a monitor will re-notify on the current status. - - It will only re-notify if it is not resolved. - type: str - escalation_message: - description: - - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. - - Not applicable if I(renotify_interval=None). - type: str - notify_audit: - description: - - Whether tagged users will be notified on changes to this monitor. - type: bool - default: 'no' - thresholds: - type: dict - description: - - A dictionary of thresholds by status. - - Only available for service checks and metric alerts. - - Because each of them can have multiple thresholds, we do not define them directly in the query. - - "If not specified, it defaults to: C({'ok': 1, 'critical': 1, 'warning': 1})." - locked: - description: - - Whether changes to this monitor should be restricted to the creator or admins. - type: bool - default: 'no' - require_full_window: - description: - - Whether this monitor needs a full window of data before it gets evaluated. - - We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped. - type: bool - new_host_delay: - description: - - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts. - - This gives the host time to fully initialize. - type: str - evaluation_delay: - description: - - Time to delay evaluation (in seconds). - - Effective for sparse values. - type: str - id: - description: - - The ID of the alert. - - If set, will be used instead of the name to locate the alert. - type: str - include_tags: - description: - - Whether notifications from this monitor automatically inserts its triggering tags into the title. - type: bool - default: yes - version_added: 1.3.0 - priority: - description: - - Integer from 1 (high) to 5 (low) indicating alert severity. - type: int - version_added: 4.6.0 -''' + api_key: + description: + - Your Datadog API key. + required: true + type: str + api_host: + description: + - The URL to the Datadog API. Default value is V(https://api.datadoghq.com). + - This value can also be set with the E(DATADOG_HOST) environment variable. + required: false + type: str + version_added: '0.2.0' + app_key: + description: + - Your Datadog app key. + required: true + type: str + state: + description: + - The designated state of the monitor. + required: true + choices: ['present', 'absent', 'mute', 'unmute'] + type: str + tags: + description: + - A list of tags to associate with your monitor when creating or updating. + - This can help you categorize and filter monitors. + type: list + elements: str + type: + description: + - The type of the monitor. + - The types V(query alert), V(trace-analytics alert) and V(rum alert) were added in community.general 2.1.0. + - The type V(composite) was added in community.general 3.4.0. + - The type V(event-v2 alert) was added in community.general 4.8.0. + choices: + - metric alert + - service check + - event alert + - event-v2 alert + - process alert + - log alert + - query alert + - trace-analytics alert + - rum alert + - composite + type: str + query: + description: + - The monitor query to notify on. + - Syntax varies depending on what type of monitor you are creating. + type: str + name: + description: + - The name of the alert. + required: true + type: str + notification_message: + description: + - A message to include with notifications for this monitor. + - Email notifications can be sent to specific users by using the same '@username' notation as events. + - Monitor message template variables can be accessed by using double square brackets, in other words C([[) and C(]]). + type: str + silenced: + type: dict + description: + - Dictionary of scopes to silence, with timestamps or None. + - Each scope is muted until the given POSIX timestamp or forever if the value is V(None). + notify_no_data: + description: + - Whether this monitor notifies when data stops reporting. + type: bool + default: false + no_data_timeframe: + description: + - The number of minutes before a monitor notifies when data stops reporting. + - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks. + - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service. + type: str + timeout_h: + description: + - The number of hours of the monitor not reporting data before it automatically resolves from a triggered state. + type: str + renotify_interval: + description: + - The number of minutes after the last notification before a monitor re-notifies on the current status. + - It only re-notifies if it is not resolved. + type: str + escalation_message: + description: + - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. + - Not applicable if O(renotify_interval=none). + type: str + notify_audit: + description: + - Whether tagged users are notified on changes to this monitor. + type: bool + default: false + thresholds: + type: dict + description: + - A dictionary of thresholds by status. + - Only available for service checks and metric alerts. + - Because each of them can have multiple thresholds, we do not define them directly in the query. + - "If not specified, it defaults to: V({'ok': 1, 'critical': 1, 'warning': 1})." + locked: + description: + - Whether changes to this monitor should be restricted to the creator or admins. + type: bool + default: false + require_full_window: + description: + - Whether this monitor needs a full window of data before it gets evaluated. + - We highly recommend you set this to V(false) for sparse metrics, otherwise some evaluations are skipped. + type: bool + new_host_delay: + description: + - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts. + - This gives the host time to fully initialize. + type: str + evaluation_delay: + description: + - Time to delay evaluation (in seconds). + - Effective for sparse values. + type: str + id: + description: + - The ID of the alert. + - If set, it is used instead of O(name) to locate the alert. + type: str + include_tags: + description: + - Whether notifications from this monitor automatically inserts its triggering tags into the title. + type: bool + default: true + version_added: 1.3.0 + priority: + description: + - Integer from V(1) (high) to V(5) (low) indicating alert severity. + type: int + version_added: 4.6.0 + notification_preset_name: + description: + - Toggles the display of additional content sent in the monitor notification. + choices: + - show_all + - hide_query + - hide_handles + - hide_all + type: str + version_added: 7.1.0 + renotify_occurrences: + description: + - The number of times re-notification messages should be sent on the current status at the provided re-notification + interval. + type: int + version_added: 7.1.0 + renotify_statuses: + description: + - The types of monitor statuses for which re-notification messages are sent. + choices: + - alert + - warn + - no data + type: list + elements: str + version_added: 7.1.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a metric monitor community.general.datadog_monitor: type: "metric alert" name: "Test monitor" state: "present" + renotify_interval: 30 + renotify_occurrences: 1 + renotify_statuses: ["warn"] + notification_preset_name: "show_all" query: "datadog.agent.up.over('host:host1').last(2).count_by_status()" notification_message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog." api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" @@ -201,7 +236,8 @@ EXAMPLES = ''' api_host: https://api.datadoghq.eu api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" -''' +""" + import traceback # Import Datadog @@ -237,15 +273,18 @@ def main(): renotify_interval=dict(), escalation_message=dict(), notify_audit=dict(default=False, type='bool'), - thresholds=dict(type='dict', default=None), - tags=dict(type='list', elements='str', default=None), + thresholds=dict(type='dict'), + tags=dict(type='list', elements='str'), locked=dict(default=False, type='bool'), require_full_window=dict(type='bool'), new_host_delay=dict(), evaluation_delay=dict(), id=dict(), - include_tags=dict(required=False, default=True, type='bool'), + include_tags=dict(default=True, type='bool'), priority=dict(type='int'), + notification_preset_name=dict(choices=['show_all', 'hide_query', 'hide_handles', 'hide_all']), + renotify_occurrences=dict(type='int'), + renotify_statuses=dict(type='list', elements='str', choices=['alert', 'warn', 'no data']), ) ) @@ -360,6 +399,9 @@ def install_monitor(module): "new_host_delay": module.params['new_host_delay'], "evaluation_delay": module.params['evaluation_delay'], "include_tags": module.params['include_tags'], + "notification_preset_name": module.params['notification_preset_name'], + "renotify_occurrences": module.params['renotify_occurrences'], + "renotify_statuses": module.params['renotify_statuses'], } if module.params['type'] == "service check": @@ -391,7 +433,7 @@ def mute_monitor(module): module.fail_json(msg="Monitor %s not found!" % module.params['name']) elif monitor['options']['silenced']: module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") - elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0): + elif module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0: module.exit_json(changed=False) try: if module.params['silenced'] is None or module.params['silenced'] == "": diff --git a/plugins/modules/system/dconf.py b/plugins/modules/dconf.py similarity index 59% rename from plugins/modules/system/dconf.py rename to plugins/modules/dconf.py index 636ca536ee..e9e9d82514 100644 --- a/plugins/modules/system/dconf.py +++ b/plugins/modules/dconf.py @@ -1,48 +1,52 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Branko Majic -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Branko Majic +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: dconf author: - - "Branko Majic (@azaghal)" + - "Branko Majic (@azaghal)" short_description: Modify and read dconf database description: - - This module allows modifications and reading of C(dconf) database. The module - is implemented as a wrapper around C(dconf) tool. Please see the dconf(1) man - page for more details. - - Since C(dconf) requires a running D-Bus session to change values, the module - will try to detect an existing session and reuse it, or run the tool via - C(dbus-run-session). + - This module allows modifications and reading of C(dconf) database. The module is implemented as a wrapper around C(dconf) + tool. Please see the dconf(1) man page for more details. + - Since C(dconf) requires a running D-Bus session to change values, the module tries to detect an existing session and reuse + it, or run the tool using C(dbus-run-session). +requirements: + - Optionally the C(gi.repository) Python library (usually included in the OS on hosts which have C(dconf)); this is to become + a non-optional requirement in a future major release of community.general. notes: - - This module depends on C(psutil) Python library (version 4.0.0 and upwards), - C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on - distribution you are using, you may need to install additional packages to - have these available. - - Detection of existing, running D-Bus session, required to change settings - via C(dconf), is not 100% reliable due to implementation details of D-Bus - daemon itself. This might lead to running applications not picking-up - changes on the fly if options are changed via Ansible and - C(dbus-run-session). - - Keep in mind that the C(dconf) CLI tool, which this module wraps around, - utilises an unusual syntax for the values (GVariant). For example, if you - wanted to provide a string value, the correct syntax would be - C(value="'myvalue'") - with single quotes as part of the Ansible parameter - value. - - When using loops in combination with a value like - :code:`"[('xkb', 'us'), ('xkb', 'se')]"`, you need to be aware of possible - type conversions. Applying a filter :code:`"{{ item.value | string }}"` - to the parameter variable can avoid potential conversion problems. - - The easiest way to figure out exact syntax/value you need to provide for a - key is by making the configuration change in application affected by the - key, and then having a look at value set via commands C(dconf dump - /path/to/dir/) or C(dconf read /path/to/key). + - This module depends on C(psutil) Python library (version 4.0.0 and upwards), C(dconf), C(dbus-send), and C(dbus-run-session) + binaries. Depending on distribution you are using, you may need to install additional packages to have these available. + - This module uses the C(gi.repository) Python library when available for accurate comparison of values in C(dconf) to values + specified in Ansible code. C(gi.repository) is likely to be present on most systems which have C(dconf) but may not be + present everywhere. When it is missing, a simple string comparison between values is used, and there may be false positives, + that is, Ansible may think that a value is being changed when it is not. This fallback is to be removed in a future version + of this module, at which point the module will stop working on hosts without C(gi.repository). + - Detection of existing, running D-Bus session, required to change settings using C(dconf), is not 100% reliable due to + implementation details of D-Bus daemon itself. This might lead to running applications not picking-up changes on-the-fly + if options are changed using Ansible and C(dbus-run-session). + - Keep in mind that the C(dconf) CLI tool, which this module wraps around, utilises an unusual syntax for the values (GVariant). + For example, if you wanted to provide a string value, the correct syntax would be O(value="'myvalue'") - with single quotes + as part of the Ansible parameter value. + - When using loops in combination with a value like V("[('xkb', 'us'\), ('xkb', 'se'\)]"), you need to be aware of possible + type conversions. Applying a filter V({{ item.value | string }}) to the parameter variable can avoid potential conversion + problems. + - The easiest way to figure out exact syntax/value you need to provide for a key is by making the configuration change in + application affected by the key, and then having a look at value set using commands C(dconf dump /path/to/dir/) or C(dconf + read /path/to/key). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: key: type: str @@ -50,28 +54,30 @@ options: description: - A dconf key to modify or read from the dconf database. value: - type: str + type: raw required: false description: - - Value to set for the specified dconf key. Value should be specified in - GVariant format. Due to complexity of this format, it is best to have a - look at existing values in the dconf database. - - Required for I(state=present). + - Value to set for the specified dconf key. Value should be specified in GVariant format. Due to complexity of this + format, it is best to have a look at existing values in the dconf database. + - Required for O(state=present). + - Although the type is specified as "raw", it should typically be specified as a string. However, boolean values in + particular are handled properly even when specified as booleans rather than strings (in fact, handling booleans properly + is why the type of this parameter is "raw"). state: type: str required: false default: present - choices: [ 'read', 'present', 'absent' ] + choices: ['read', 'present', 'absent'] description: - The action to take upon the key/value. -''' +""" RETURN = r""" value: - description: value associated with the requested key - returned: success, state was "read" - type: str - sample: "'Default'" + description: Value associated with the requested key. + returned: success, state was "read" + type: str + sample: "'Default'" """ EXAMPLES = r""" @@ -118,17 +124,27 @@ EXAMPLES = r""" import os -import traceback +import sys + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.respawn import ( + has_respawned, + probe_interpreters_for_module, + respawn_module, +) +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils import deps + +glib_module_name = 'gi.repository.GLib' -PSUTIL_IMP_ERR = None try: - import psutil - HAS_PSUTIL = True + from gi.repository.GLib import Variant, GError except ImportError: - PSUTIL_IMP_ERR = traceback.format_exc() - HAS_PSUTIL = False + Variant = None + GError = AttributeError -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +with deps.declare("psutil"): + import psutil class DBusWrapper(object): @@ -250,6 +266,29 @@ class DconfPreference(object): # Check if dconf binary exists self.dconf_bin = self.module.get_bin_path('dconf', required=True) + @staticmethod + def variants_are_equal(canonical_value, user_value): + """Compare two string GVariant representations for equality. + + Assumes `canonical_value` is "canonical" in the sense that the type of + the variant is specified explicitly if it cannot be inferred; this is + true for textual representations of variants generated by the `dconf` + command. The type of `canonical_value` is used to parse `user_value`, + so the latter does not need to be explicitly typed. + + Returns True if the two values are equal. + """ + if canonical_value is None: + # It's unset in dconf database, so anything the user is trying to + # set is a change. + return False + try: + variant1 = Variant.parse(None, canonical_value) + variant2 = Variant.parse(variant1.get_type(), user_value) + return variant1 == variant2 + except GError: + return canonical_value == user_value + def read(self, key): """ Retrieves current value associated with the dconf key. @@ -290,7 +329,7 @@ class DconfPreference(object): """ # If no change is needed (or won't be done due to check_mode), notify # caller straight away. - if value == self.read(key): + if self.variants_are_equal(self.read(key), value): return False elif self.check_mode: return True @@ -304,7 +343,7 @@ class DconfPreference(object): rc, out, err = dbus_wrapper.run_command(command) if rc != 0: - self.module.fail_json(msg='dconf failed while write the value with error: %s' % err, + self.module.fail_json(msg='dconf failed while writing key %s, value %s with error: %s' % (key, value, err), out=out, err=err) @@ -342,7 +381,7 @@ class DconfPreference(object): rc, out, err = dbus_wrapper.run_command(command) if rc != 0: - self.module.fail_json(msg='dconf failed while reseting the value with error: %s' % err, + self.module.fail_json(msg='dconf failed while resetting the value with error: %s' % err, out=out, err=err) @@ -356,17 +395,61 @@ def main(): argument_spec=dict( state=dict(default='present', choices=['present', 'absent', 'read']), key=dict(required=True, type='str', no_log=False), - value=dict(required=False, default=None, type='str'), + # Converted to str below after special handling of bool. + value=dict(type='raw'), ), - supports_check_mode=True + supports_check_mode=True, + required_if=[ + ('state', 'present', ['value']), + ], ) - if not HAS_PSUTIL: - module.fail_json(msg=missing_required_lib("psutil"), exception=PSUTIL_IMP_ERR) + if Variant is None: + # This interpreter can't see the GLib module. To try to fix that, we'll + # look in common locations for system-owned interpreters that can see + # it; if we find one, we'll respawn under it. Otherwise we'll proceed + # with degraded performance, without the ability to parse GVariants. + # Later (in a different PR) we'll actually deprecate this degraded + # performance level and fail with an error if the library can't be + # found. - # If present state was specified, value must be provided. - if module.params['state'] == 'present' and module.params['value'] is None: - module.fail_json(msg='State "present" requires "value" to be set.') + if has_respawned(): + # This shouldn't be possible; short-circuit early if it happens. + module.fail_json( + msg="%s must be installed and visible from %s." % + (glib_module_name, sys.executable)) + + interpreters = ['/usr/bin/python3', '/usr/bin/python'] + + interpreter = probe_interpreters_for_module( + interpreters, glib_module_name) + + if interpreter: + # Found the Python bindings; respawn this module under the + # interpreter where we found them. + respawn_module(interpreter) + # This is the end of the line for this process, it will exit here + # once the respawned module has completed. + + # Try to be forgiving about the user specifying a boolean as the value, or + # more accurately about the fact that YAML and Ansible are quite insistent + # about converting strings that look like booleans into booleans. Convert + # the boolean into a string of the type dconf will understand. Any type for + # the value other than boolean is just converted into a string directly. + if module.params['value'] is not None: + if isinstance(module.params['value'], bool): + module.params['value'] = 'true' if module.params['value'] else 'false' + else: + module.params['value'] = to_native( + module.params['value'], errors='surrogate_or_strict') + + if Variant is None: + module.warn( + 'WARNING: The gi.repository Python library is not available; ' + 'using string comparison to check value equality. This fallback ' + 'will be deprecated in a future version of community.general.') + + deps.validate(module) # Create wrapper instance. dconf = DconfPreference(module, module.check_mode) diff --git a/plugins/modules/decompress.py b/plugins/modules/decompress.py new file mode 100644 index 0000000000..3746810ca9 --- /dev/null +++ b/plugins/modules/decompress.py @@ -0,0 +1,201 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Stanislav Shamilov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: decompress +short_description: Decompresses compressed files +version_added: 10.1.0 +description: + - Decompresses compressed files. + - The source (compressed) file and destination (decompressed) files are on the remote host. + - Source file can be deleted after decompression. +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + src: + description: + - Remote absolute path for the file to decompress. + type: path + required: true + dest: + description: + - The file name of the destination file where the compressed file is decompressed. + - If the destination file exists, it is truncated and overwritten. + - If not specified, the destination filename is derived from O(src) by removing the compression format extension. For + example, when O(src) is V(/path/to/file.txt.gz) and O(format) is V(gz), O(dest) is V(/path/to/file.txt). If the O(src) + file does not have an extension for the current O(format), the O(dest) filename is made by appending C(_decompressed) + to the O(src) filename. For instance, when O(src) is V(/path/to/file.myextension), the (dest) filename is V(/path/to/file.myextension_decompressed). + type: path + format: + description: + - The type of compression to use to decompress. + type: str + choices: [gz, bz2, xz] + default: gz + remove: + description: + - Remove original compressed file after decompression. + type: bool + default: false +requirements: + - Requires C(lzma) (standard library of Python 3) if using C(xz) format. +author: + - Stanislav Shamilov (@shamilovstas) +""" + +EXAMPLES = r""" +- name: Decompress file /path/to/file.txt.gz into /path/to/file.txt (gz compression is used by default) + community.general.decompress: + src: /path/to/file.txt.gz + dest: /path/to/file.txt + +- name: Decompress file /path/to/file.txt.gz into /path/to/file.txt + community.general.decompress: + src: /path/to/file.txt.gz + +- name: Decompress file compressed with bzip2 + community.general.decompress: + src: /path/to/file.txt.bz2 + dest: /path/to/file.bz2 + format: bz2 + +- name: Decompress file and delete the compressed file afterwards + community.general.decompress: + src: /path/to/file.txt.gz + dest: /path/to/file.txt + remove: true +""" + +RETURN = r""" +dest: + description: Path to decompressed file. + type: str + returned: success + sample: /path/to/file.txt +""" + +import bz2 +import filecmp +import gzip +import os +import shutil +import tempfile + +from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ModuleHelper +from ansible.module_utils.common.text.converters import to_native, to_bytes +from ansible_collections.community.general.plugins.module_utils import deps + +with deps.declare("lzma"): + import lzma + + +def lzma_decompress(src): + return lzma.open(src, "rb") + + +def bz2_decompress(src): + return bz2.open(src, "rb") + + +def gzip_decompress(src): + return gzip.open(src, "rb") + + +def decompress(b_src, b_dest, handler): + with handler(b_src) as src_file: + with open(b_dest, "wb") as dest_file: + shutil.copyfileobj(src_file, dest_file) + + +class Decompress(ModuleHelper): + destination_filename_template = "%s_decompressed" + output_params = 'dest' + + module = dict( + argument_spec=dict( + src=dict(type='path', required=True), + dest=dict(type='path'), + format=dict(type='str', default='gz', choices=['gz', 'bz2', 'xz']), + remove=dict(type='bool', default=False) + ), + add_file_common_args=True, + supports_check_mode=True + ) + + def __init_module__(self): + self.handlers = {"gz": gzip_decompress, "bz2": bz2_decompress, "xz": lzma_decompress} + if self.vars.dest is None: + self.vars.dest = self.get_destination_filename() + deps.validate(self.module) + self.configure() + + def configure(self): + b_dest = to_bytes(self.vars.dest, errors='surrogate_or_strict') + b_src = to_bytes(self.vars.src, errors='surrogate_or_strict') + if not os.path.exists(b_src): + if self.vars.remove and os.path.exists(b_dest): + self.module.exit_json(changed=False) + else: + self.do_raise(msg="Path does not exist: '%s'" % b_src) + if os.path.isdir(b_src): + self.do_raise(msg="Cannot decompress directory '%s'" % b_src) + if os.path.isdir(b_dest): + self.do_raise(msg="Destination is a directory, cannot decompress: '%s'" % b_dest) + + def __run__(self): + b_dest = to_bytes(self.vars.dest, errors='surrogate_or_strict') + b_src = to_bytes(self.vars.src, errors='surrogate_or_strict') + + file_args = self.module.load_file_common_arguments(self.module.params, path=self.vars.dest) + handler = self.handlers[self.vars.format] + try: + tempfd, temppath = tempfile.mkstemp(dir=self.module.tmpdir) + self.module.add_cleanup_file(temppath) + b_temppath = to_bytes(temppath, errors='surrogate_or_strict') + decompress(b_src, b_temppath, handler) + except OSError as e: + self.do_raise(msg="Unable to create temporary file '%s'" % to_native(e)) + + if os.path.exists(b_dest): + self.changed = not filecmp.cmp(b_temppath, b_dest, shallow=False) + else: + self.changed = True + + if self.changed and not self.module.check_mode: + try: + self.module.atomic_move(b_temppath, b_dest) + except OSError: + self.do_raise(msg="Unable to move temporary file '%s' to '%s'" % (b_temppath, self.vars.dest)) + + if self.vars.remove and not self.check_mode: + os.remove(b_src) + self.changed = self.module.set_fs_attributes_if_different(file_args, self.changed) + + def get_destination_filename(self): + src = self.vars.src + fmt_extension = ".%s" % self.vars.format + if src.endswith(fmt_extension) and len(src) > len(fmt_extension): + filename = src[:-len(fmt_extension)] + else: + filename = Decompress.destination_filename_template % src + return filename + + +def main(): + Decompress.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/deploy_helper.py b/plugins/modules/deploy_helper.py similarity index 78% rename from plugins/modules/web_infrastructure/deploy_helper.py rename to plugins/modules/deploy_helper.py index f73c9c1f18..d9380d36f4 100644 --- a/plugins/modules/web_infrastructure/deploy_helper.py +++ b/plugins/modules/deploy_helper.py @@ -1,120 +1,114 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2014, Jasper N. Brouwer -# (c) 2014, Ramon de la Fuente +# Copyright (c) 2014, Jasper N. Brouwer +# Copyright (c) 2014, Ramon de la Fuente # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: deploy_helper author: "Ramon de la Fuente (@ramondelafuente)" -short_description: Manages some of the steps common in deploying projects. +short_description: Manages some of the steps common in deploying projects description: - - The Deploy Helper manages some of the steps common in deploying software. - It creates a folder structure, manages a symlink for the current release - and cleans up old releases. - - "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact. - C(project_path), whatever you set in the path parameter, - C(current_path), the path to the symlink that points to the active release, - C(releases_path), the path to the folder to keep releases in, - C(shared_path), the path to the folder to keep shared resources in, - C(unfinished_filename), the file to check for to recognize unfinished builds, - C(previous_release), the release the 'current' symlink is pointing to, - C(previous_release_path), the full path to the 'current' symlink target, - C(new_release), either the 'release' parameter or a generated timestamp, - C(new_release_path), the path to the new release folder (not created by the module)." + - The Deploy Helper manages some of the steps common in deploying software. It creates a folder structure, manages a symlink + for the current release and cleans up old releases. + - Running it with the O(state=query) or O(state=present) returns the C(deploy_helper) fact. C(project_path), whatever you + set in the O(path) parameter, C(current_path), the path to the symlink that points to the active release, C(releases_path), + the path to the folder to keep releases in, C(shared_path), the path to the folder to keep shared resources in, C(unfinished_filename), + the file to check for to recognize unfinished builds, C(previous_release), the release the 'current' symlink is pointing + to, C(previous_release_path), the full path to the 'current' symlink target, C(new_release), either the O(release) parameter + or a generated timestamp, C(new_release_path), the path to the new release folder (not created by the module). +attributes: + check_mode: + support: full + diff_mode: + support: none options: path: type: path - required: True + required: true aliases: ['dest'] description: - - the root path of the project. Alias I(dest). - Returned in the C(deploy_helper.project_path) fact. - + - The root path of the project. Returned in the C(deploy_helper.project_path) fact. state: type: str description: - - the state of the project. - C(query) will only gather facts, - C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders, - C(finalize) will remove the unfinished_filename file, create a symlink to the newly - deployed release and optionally clean old releases, - C(clean) will remove failed & old releases, - C(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with C(state=absent)) - choices: [ present, finalize, absent, clean, query ] + - The state of the project. + - V(query) gathers facts. + - V(present) creates the project C(root) folder, and in it the C(releases) and C(shared) folders. + - V(finalize) removes the unfinished_filename file, creates a symlink to the newly deployed release and optionally cleans + old releases. + - V(clean) removes failed & old releases. + - V(absent) removes the project folder (synonymous to the M(ansible.builtin.file) module with O(state=absent)). + choices: [present, finalize, absent, clean, query] default: present release: type: str description: - - the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359'). - This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize). - You can use the generated fact C(release={{ deploy_helper.new_release }}). - + - The release version that is being deployed. Defaults to a timestamp format C(%Y%m%d%H%M%S) (for example V(20141119223359)). + This parameter is optional during O(state=present), but needs to be set explicitly for O(state=finalize). You can + use the generated fact C(release={{ deploy_helper.new_release }}). releases_path: type: str description: - - the name of the folder that will hold the releases. This can be relative to C(path) or absolute. - Returned in the C(deploy_helper.releases_path) fact. + - The name of the folder that holds the releases. This can be relative to O(path) or absolute. Returned in the C(deploy_helper.releases_path) + fact. default: releases shared_path: type: path description: - - the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute. - If this is set to an empty string, no shared folder will be created. - Returned in the C(deploy_helper.shared_path) fact. + - The name of the folder that holds the shared resources. This can be relative to O(path) or absolute. If this is set + to an empty string, no shared folder is created. Returned in the C(deploy_helper.shared_path) fact. default: shared current_path: type: path description: - - the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean). + - The name of the symlink that is created when the deploy is finalized. Used in O(state=finalize) and O(state=clean). Returned in the C(deploy_helper.current_path) fact. default: current unfinished_filename: type: str description: - - the name of the file that indicates a deploy has not finished. All folders in the releases_path that - contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is - automatically deleted from the I(new_release_path) during C(state=finalize). + - The name of the file that indicates a deploy has not finished. All folders in the O(releases_path) that contain this + file are deleted on O(state=finalize) with O(clean=true), or O(state=clean). This file is automatically deleted from + the C(new_release_path) during O(state=finalize). default: DEPLOY_UNFINISHED clean: description: - - Whether to run the clean procedure in case of C(state=finalize). + - Whether to run the clean procedure in case of O(state=finalize). type: bool - default: 'yes' + default: true keep_releases: type: int description: - - the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds - will be deleted first, so only correct releases will count. The current version will not count. + - The number of old releases to keep when cleaning. Used in O(state=finalize) and O(state=clean). Any unfinished builds + are deleted first, so only correct releases count. The current version does not count. default: 5 notes: - - Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden - parameters to both calls, otherwise the second call will overwrite the facts of the first one. - - When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a - new naming strategy without problems. - - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent - unless you pass your own release name with C(release). Due to the nature of deploying software, this should not - be much of a problem. -extends_documentation_fragment: files -''' - -EXAMPLES = ''' + - Facts are only returned for O(state=query) and O(state=present). If you use both, you should pass any overridden parameters + to both calls, otherwise the second call overwrites the facts of the first one. + - When using O(state=clean), the releases are ordered by I(creation date). You should be able to switch to a new naming + strategy without problems. + - Because of the default behaviour of generating the C(new_release) fact, this module is not idempotent unless you pass + your own release name with O(release). Due to the nature of deploying software, this should not be much of a problem. +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes +""" +EXAMPLES = r""" # General explanation, starting with an example folder structure for a project: # root: @@ -182,10 +176,10 @@ EXAMPLES = ''' src: '{{ deploy_helper.shared_path }}/{{ item.src }}' state: link with_items: - - path: app/sessions - src: sessions - - path: web/uploads - src: uploads + - path: app/sessions + src: sessions + - path: web/uploads + src: uploads - name: Finalize the deploy, removing the unfinished file and switching the symlink community.general.deploy_helper: path: /path/to/root @@ -233,7 +227,7 @@ EXAMPLES = ''' path: /path/to/root release: '{{ deploy_helper.new_release }}' state: finalize - clean: False + clean: false - community.general.deploy_helper: path: /path/to/root state: clean @@ -267,7 +261,8 @@ EXAMPLES = ''' path: /path/to/root - ansible.builtin.debug: var: deploy_helper -''' +""" + import os import shutil import time diff --git a/plugins/modules/cloud/dimensiondata/dimensiondata_network.py b/plugins/modules/dimensiondata_network.py similarity index 83% rename from plugins/modules/cloud/dimensiondata/dimensiondata_network.py rename to plugins/modules/dimensiondata_network.py index 64cc8b118a..80ac17d47d 100644 --- a/plugins/modules/cloud/dimensiondata/dimensiondata_network.py +++ b/plugins/modules/dimensiondata_network.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2016 Dimension Data # Authors: @@ -7,23 +6,28 @@ # - Bert Diwa # - Adam Friedman # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: dimensiondata_network short_description: Create, update, and delete MCP 1.0 & 2.0 networks extends_documentation_fragment: -- community.general.dimensiondata -- community.general.dimensiondata_wait + - community.general.dimensiondata + - community.general.dimensiondata_wait + - community.general.attributes description: - - Create, update, and delete MCP 1.0 & 2.0 networks + - Create, update, and delete MCP 1.0 & 2.0 networks. author: 'Aimon Bustardo (@aimonb)' +attributes: + check_mode: + support: none + diff_mode: + support: none options: name: description: @@ -48,9 +52,9 @@ options: choices: [present, absent] default: present type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create an MCP 1.0 network community.general.dimensiondata_network: region: na @@ -72,43 +76,43 @@ EXAMPLES = ''' location: NA1 name: mynet state: absent -''' +""" -RETURN = ''' +RETURN = r""" network: - description: Dictionary describing the network. - returned: On success when I(state=present). - type: complex - contains: - id: - description: Network ID. - type: str - sample: "8c787000-a000-4050-a215-280893411a7d" - name: - description: Network name. - type: str - sample: "My network" - description: - description: Network description. - type: str - sample: "My network description" - location: - description: Datacenter location. - type: str - sample: NA3 - status: - description: Network status. (MCP 2.0 only) - type: str - sample: NORMAL - private_net: - description: Private network subnet. (MCP 1.0 only) - type: str - sample: "10.2.3.0" - multicast: - description: Multicast enabled? (MCP 1.0 only) - type: bool - sample: false -''' + description: Dictionary describing the network. + returned: On success when O(state=present). + type: complex + contains: + id: + description: Network ID. + type: str + sample: "8c787000-a000-4050-a215-280893411a7d" + name: + description: Network name. + type: str + sample: "My network" + description: + description: Network description. + type: str + sample: "My network description" + location: + description: Datacenter location. + type: str + sample: NA3 + status: + description: Network status. (MCP 2.0 only). + type: str + sample: NORMAL + private_net: + description: Private network subnet. (MCP 1.0 only). + type: str + sample: "10.2.3.0" + multicast: + description: Multicast enabled? (MCP 1.0 only). + type: bool + sample: false +""" import traceback from ansible.module_utils.basic import AnsibleModule @@ -134,7 +138,7 @@ class DimensionDataNetworkModule(DimensionDataModule): module=AnsibleModule( argument_spec=DimensionDataModule.argument_spec_with_wait( name=dict(type='str', required=True), - description=dict(type='str', required=False), + description=dict(type='str'), service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']), state=dict(default='present', choices=['present', 'absent']) ), diff --git a/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py b/plugins/modules/dimensiondata_vlan.py similarity index 83% rename from plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py rename to plugins/modules/dimensiondata_vlan.py index 26c621f44b..8f3de75b25 100644 --- a/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py +++ b/plugins/modules/dimensiondata_vlan.py @@ -1,39 +1,29 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# # Copyright (c) 2016 Dimension Data -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # # Authors: # - Adam Friedman -# -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: dimensiondata_vlan -short_description: Manage a VLAN in a Cloud Control network domain. +short_description: Manage a VLAN in a Cloud Control network domain extends_documentation_fragment: -- community.general.dimensiondata -- community.general.dimensiondata_wait + - community.general.dimensiondata + - community.general.dimensiondata_wait + - community.general.attributes description: - Manage VLANs in Cloud Control network domains. author: 'Adam Friedman (@tintoy)' +attributes: + check_mode: + support: none + diff_mode: + support: none options: name: description: @@ -44,37 +34,41 @@ options: description: - A description of the VLAN. type: str + default: '' network_domain: description: - - The Id or name of the target network domain. + - The ID or name of the target network domain. required: true type: str private_ipv4_base_address: description: - - The base address for the VLAN's IPv4 network (e.g. 192.168.1.0). + - The base address for the VLAN's IPv4 network (for example V(192.168.1.0)). type: str + default: '' private_ipv4_prefix_size: description: - - The size of the IPv4 address space, e.g 24. - - Required, if C(private_ipv4_base_address) is specified. + - The size of the IPv4 address space, for example V(24). + - Required, if O(private_ipv4_base_address) is specified. type: int + default: 0 state: description: - The desired state for the target VLAN. - - C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist). + - V(readonly) ensures that the state is only ever read, not modified (the module fails if the resource does not exist). choices: [present, absent, readonly] default: present type: str allow_expand: description: - - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses. - - If C(False), the module will fail under these conditions. + - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently + possesses. + - If V(false), the module fails under these conditions. - This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible). type: bool - default: 'no' -''' + default: false +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add or update VLAN community.general.dimensiondata_vlan: region: na @@ -85,7 +79,7 @@ EXAMPLES = ''' private_ipv4_base_address: 192.168.23.0 private_ipv4_prefix_size: 24 state: present - wait: yes + wait: true - name: Read / get VLAN details community.general.dimensiondata_vlan: @@ -94,7 +88,7 @@ EXAMPLES = ''' network_domain: test_network name: my_vlan1 state: readonly - wait: yes + wait: true - name: Delete a VLAN community.general.dimensiondata_vlan: @@ -103,60 +97,60 @@ EXAMPLES = ''' network_domain: test_network name: my_vlan_1 state: absent - wait: yes -''' + wait: true +""" -RETURN = ''' +RETURN = r""" vlan: - description: Dictionary describing the VLAN. - returned: On success when I(state) is 'present' - type: complex - contains: - id: - description: VLAN ID. - type: str - sample: "aaaaa000-a000-4050-a215-2808934ccccc" - name: - description: VLAN name. - type: str - sample: "My VLAN" - description: - description: VLAN description. - type: str - sample: "My VLAN description" - location: - description: Datacenter location. - type: str - sample: NA3 - private_ipv4_base_address: - description: The base address for the VLAN's private IPV4 network. - type: str - sample: 192.168.23.0 - private_ipv4_prefix_size: - description: The prefix size for the VLAN's private IPV4 network. - type: int - sample: 24 - private_ipv4_gateway_address: - description: The gateway address for the VLAN's private IPV4 network. - type: str - sample: 192.168.23.1 - private_ipv6_base_address: - description: The base address for the VLAN's IPV6 network. - type: str - sample: 2402:9900:111:1195:0:0:0:0 - private_ipv6_prefix_size: - description: The prefix size for the VLAN's IPV6 network. - type: int - sample: 64 - private_ipv6_gateway_address: - description: The gateway address for the VLAN's IPV6 network. - type: str - sample: 2402:9900:111:1195:0:0:0:1 - status: - description: VLAN status. - type: str - sample: NORMAL -''' + description: Dictionary describing the VLAN. + returned: On success when O(state=present) + type: complex + contains: + id: + description: VLAN ID. + type: str + sample: "aaaaa000-a000-4050-a215-2808934ccccc" + name: + description: VLAN name. + type: str + sample: "My VLAN" + description: + description: VLAN description. + type: str + sample: "My VLAN description" + location: + description: Datacenter location. + type: str + sample: NA3 + private_ipv4_base_address: + description: The base address for the VLAN's private IPV4 network. + type: str + sample: 192.168.23.0 + private_ipv4_prefix_size: + description: The prefix size for the VLAN's private IPV4 network. + type: int + sample: 24 + private_ipv4_gateway_address: + description: The gateway address for the VLAN's private IPV4 network. + type: str + sample: 192.168.23.1 + private_ipv6_base_address: + description: The base address for the VLAN's IPV6 network. + type: str + sample: 2402:9900:111:1195:0:0:0:0 + private_ipv6_prefix_size: + description: The prefix size for the VLAN's IPV6 network. + type: int + sample: 64 + private_ipv6_gateway_address: + description: The gateway address for the VLAN's IPV6 network. + type: str + sample: 2402:9900:111:1195:0:0:0:1 + status: + description: VLAN status. + type: str + sample: NORMAL +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError @@ -190,7 +184,7 @@ class DimensionDataVlanModule(DimensionDataModule): network_domain=dict(required=True, type='str'), private_ipv4_base_address=dict(default='', type='str'), private_ipv4_prefix_size=dict(default=0, type='int'), - allow_expand=dict(required=False, default=False, type='bool'), + allow_expand=dict(default=False, type='bool'), state=dict(default='present', choices=['present', 'absent', 'readonly']) ), required_together=DimensionDataModule.required_together() diff --git a/plugins/modules/notification/discord.py b/plugins/modules/discord.py similarity index 85% rename from plugins/modules/notification/discord.py rename to plugins/modules/discord.py index 27dc6fc85c..9cb732eb02 100644 --- a/plugins/modules/notification/discord.py +++ b/plugins/modules/discord.py @@ -1,14 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Christian Wollinger -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Christian Wollinger +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: discord short_description: Send Discord messages version_added: 3.1.0 @@ -17,25 +15,32 @@ description: author: Christian Wollinger (@cwollinger) seealso: - name: API documentation - description: Documentation for Discord API + description: Documentation for Discord API. link: https://discord.com/developers/docs/resources/webhook#execute-webhook +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: webhook_id: description: - The webhook ID. - - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})." - required: yes + - 'Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token}).' + required: true type: str webhook_token: description: - The webhook token. - - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})." - required: yes + - 'Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token}).' + required: true type: str content: description: - Content of the message to the Discord channel. - - At least one of I(content) and I(embeds) must be specified. + - At least one of O(content) and O(embeds) must be specified. type: str username: description: @@ -47,20 +52,20 @@ options: type: str tts: description: - - Set this to C(true) if this is a TTS (Text to Speech) message. + - Set this to V(true) if this is a TTS (Text to Speech) message. type: bool default: false embeds: description: - Send messages as Embeds to the Discord channel. - Embeds can have a colored border, embedded images, text fields and more. - - "Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object)" - - At least one of I(content) and I(embeds) must be specified. + - 'Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object).' + - At least one of O(content) and O(embeds) must be specified. type: list elements: dict -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Send a message to the Discord channel community.general.discord: webhook_id: "00000" @@ -111,7 +116,7 @@ EXAMPLES = """ timestamp: "{{ ansible_date_time.iso8601 }}" """ -RETURN = """ +RETURN = r""" http_code: description: - Response Code returned by Discord API. diff --git a/plugins/modules/django_check.py b/plugins/modules/django_check.py new file mode 100644 index 0000000000..f2ee357072 --- /dev/null +++ b/plugins/modules/django_check.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: django_check +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin check) +version_added: 9.1.0 +description: + - This module is a wrapper for the execution of C(django-admin check). +extends_documentation_fragment: + - community.general.attributes + - community.general.django +options: + databases: + description: + - Specify databases to run checks against. + - If not specified, Django does not run database tests. + - The parameter has been renamed to O(databases) in community.general 11.3.0. The old name is still available as an alias. + type: list + elements: str + aliases: ["database"] + deploy: + description: + - Include additional checks relevant in a deployment setting. + type: bool + default: false + fail_level: + description: + - Message level that triggers failure. + - Default is the Django default value. Check the documentation for the version being used. + type: str + choices: [CRITICAL, ERROR, WARNING, INFO, DEBUG] + tags: + description: + - Restrict checks to specific tags. + type: list + elements: str + apps: + description: + - Restrict checks to specific applications. + - Default is to check all applications. + type: list + elements: str +notes: + - The outcome of the module is found in the common return values RV(ignore:stdout), RV(ignore:stderr), RV(ignore:rc). + - The module fails if RV(ignore:rc) is not zero. +attributes: + check_mode: + support: full + diff_mode: + support: none +""" + +EXAMPLES = r""" +- name: Check the entire project + community.general.django_check: + settings: myproject.settings + +- name: Create the project using specific databases + community.general.django_check: + database: + - somedb + - myotherdb + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = r""" +run_info: + description: Command-line execution information. + type: dict + returned: success and C(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 + version_added: 10.0.0 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper + + +class DjangoCheck(DjangoModuleHelper): + module = dict( + argument_spec=dict( + databases=dict(type="list", elements="str", aliases=["database"]), + deploy=dict(type="bool", default=False), + fail_level=dict(type="str", choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]), + tags=dict(type="list", elements="str"), + apps=dict(type="list", elements="str"), + ), + supports_check_mode=True, + ) + django_admin_cmd = "check" + django_admin_arg_order = "database_stacked_dash deploy fail_level tags apps" + + def __init_module__(self): + self.vars.set("database_stacked_dash", self.vars.databases, output=False) + + +def main(): + DjangoCheck.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/django_command.py b/plugins/modules/django_command.py new file mode 100644 index 0000000000..a6c3f409e5 --- /dev/null +++ b/plugins/modules/django_command.py @@ -0,0 +1,92 @@ +#!/usr/bin/python +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: django_command +author: + - Alexei Znamensky (@russoz) +short_description: Run Django admin commands +version_added: 9.0.0 +description: + - This module allows the execution of arbitrary Django admin commands. +extends_documentation_fragment: + - community.general.attributes + - community.general.django +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + command: + description: + - Django admin command. It must be a valid command accepted by C(python -m django) at the target system. + type: str + required: true + extra_args: + type: list + elements: str + description: + - List of extra arguments passed to the django admin command. +""" + +EXAMPLES = r""" +- name: Check the project + community.general.django_command: + command: check + settings: myproject.settings + +- name: Check the project in specified python path, using virtual environment + community.general.django_command: + command: check + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = r""" +run_info: + description: Command-line execution information. + type: dict + returned: success and O(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 + version_added: 10.0.0 +""" + +import shlex + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper +from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt + + +class DjangoCommand(DjangoModuleHelper): + module = dict( + argument_spec=dict( + command=dict(type="str", required=True), + extra_args=dict(type="list", elements="str"), + ), + supports_check_mode=False, + ) + arg_formats = dict( + extra_args=cmd_runner_fmt.as_list(), + ) + django_admin_arg_order = "extra_args" + + def __init_module__(self): + self.vars.command = shlex.split(self.vars.command) + + +def main(): + DjangoCommand.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/django_createcachetable.py b/plugins/modules/django_createcachetable.py new file mode 100644 index 0000000000..76a31ab0b1 --- /dev/null +++ b/plugins/modules/django_createcachetable.py @@ -0,0 +1,74 @@ +#!/usr/bin/python +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: django_createcachetable +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin createcachetable) +version_added: 9.1.0 +description: + - This module is a wrapper for the execution of C(django-admin createcachetable). +extends_documentation_fragment: + - community.general.attributes + - community.general.django + - community.general.django.database +attributes: + check_mode: + support: full + diff_mode: + support: none +""" + +EXAMPLES = r""" +- name: Create cache table in the default database + community.general.django_createcachetable: + settings: myproject.settings + +- name: Create cache table in the other database + community.general.django_createcachetable: + database: myotherdb + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = r""" +run_info: + description: Command-line execution information. + type: dict + returned: success and O(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 + version_added: 10.0.0 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper + + +class DjangoCreateCacheTable(DjangoModuleHelper): + module = dict( + supports_check_mode=True, + ) + django_admin_cmd = "createcachetable" + django_admin_arg_order = "noinput database_dash dry_run" + _django_args = ["database_dash"] + _check_mode_arg = "dry_run" + + def __init_module__(self): + self.vars.set("database_dash", self.vars.database, output=False) + + +def main(): + DjangoCreateCacheTable.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/django_dumpdata.py b/plugins/modules/django_dumpdata.py new file mode 100644 index 0000000000..5c819b2755 --- /dev/null +++ b/plugins/modules/django_dumpdata.py @@ -0,0 +1,124 @@ +#!/usr/bin/python +# Copyright (c) 2025, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: django_dumpdata +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin dumpdata) +version_added: 11.3.0 +description: + - This module is a wrapper for the execution of C(django-admin dumpdata). +extends_documentation_fragment: + - community.general.attributes + - community.general.django + - community.general.django.database + - community.general.django.data +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + all: + description: Dump all records, including those which might otherwise be filtered or modified by a custom manager. + type: bool + indent: + description: + - Indentation size for the output. + - Default is not to indent, so the output is generated in one single line. + type: int + natural_foreign: + description: Use natural keys when serializing for foreign keys. + type: bool + natural_primary: + description: Omit primary keys when serializing. + type: bool + primary_keys: + description: + - List of primary keys to include in the dump. + - Only available when dumping one single model. + type: list + elements: str + aliases: ["pks"] + fixture: + description: + - Path to the output file. + - The fixture filename may end with V(.bz2), V(.gz), V(.lzma) or V(.xz), in which case the corresponding + compression format will be used. + - This corresponds to the C(--output) parameter for the C(django-admin dumpdata) command. + type: path + aliases: [output] + required: true + apps_models: + description: + - Dump only the applications and models listed in the dump. + - Format must be either V(app_label) or V(app_label.ModelName). + - If not passed, all applications and models are to be dumped. + type: list + elements: str +""" + +EXAMPLES = r""" +- name: Dump all data + community.general.django_dumpdata: + settings: myproject.settings + fixture: /tmp/mydata.json + +- name: Dump data excluding certain apps, into a compressed JSON file + community.general.django_dumpdata: + settings: myproject.settings + database: myotherdb + excludes: + - auth + - contenttypes + fixture: /tmp/mydata.json.gz +""" + +RETURN = r""" +run_info: + description: Command-line execution information. + type: dict + returned: success and O(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper + + +class DjangoDumpData(DjangoModuleHelper): + module = dict( + argument_spec=dict( + all=dict(type="bool"), + indent=dict(type="int"), + natural_foreign=dict(type="bool"), + natural_primary=dict(type="bool"), + primary_keys=dict(type="list", elements="str", aliases=["pks"], no_log=False), + # the underlying vardict does not allow the name "output" + fixture=dict(type="path", required=True, aliases=["output"]), + apps_models=dict(type="list", elements="str"), + ), + supports_check_mode=False, + ) + django_admin_cmd = "dumpdata" + django_admin_arg_order = "all format indent excludes database_dash natural_foreign natural_primary primary_keys fixture apps_models" + _django_args = ["data", "database_dash"] + + def __init_module__(self): + self.vars.set("database_dash", self.vars.database, output=False) + + +def main(): + DjangoDumpData.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/django_loaddata.py b/plugins/modules/django_loaddata.py new file mode 100644 index 0000000000..75b388de9a --- /dev/null +++ b/plugins/modules/django_loaddata.py @@ -0,0 +1,90 @@ +#!/usr/bin/python +# Copyright (c) 2025, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: django_loaddata +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin loaddata) +version_added: 11.3.0 +description: + - This module is a wrapper for the execution of C(django-admin loaddata). +extends_documentation_fragment: + - community.general.attributes + - community.general.django + - community.general.django.database + - community.general.django.data +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + app: + description: Specifies a single app to look for fixtures in rather than looking in all apps. + type: str + ignore_non_existent: + description: Ignores fields and models that may have been removed since the fixture was originally generated. + type: bool + fixtures: + description: + - List of paths to the fixture files. + type: list + elements: path +""" + +EXAMPLES = r""" +- name: Dump all data + community.general.django_dumpdata: + settings: myproject.settings + +- name: Create cache table in the other database + community.general.django_createcachetable: + database: myotherdb + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = r""" +run_info: + description: Command-line execution information. + type: dict + returned: success and O(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper + + +class DjangoLoadData(DjangoModuleHelper): + module = dict( + argument_spec=dict( + app=dict(type="str"), + ignore_non_existent=dict(type="bool"), + fixtures=dict(type="list", elements="path"), + ), + supports_check_mode=False, + ) + django_admin_cmd = "loaddata" + django_admin_arg_order = "database_dash ignore_non_existent app format excludes fixtures" + _django_args = ["data", "database_dash"] + + def __init_module__(self): + self.vars.set("database_dash", self.vars.database, output=False) + + +def main(): + DjangoLoadData.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/django_manage.py b/plugins/modules/django_manage.py similarity index 62% rename from plugins/modules/web_infrastructure/django_manage.py rename to plugins/modules/django_manage.py index 4ced7452bb..ddda99849e 100644 --- a/plugins/modules/web_infrastructure/django_manage.py +++ b/plugins/modules/django_manage.py @@ -1,133 +1,158 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, Scott Anderson -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2022, Alexei Znamensky +# Copyright (c) 2013, Scott Anderson +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: django_manage -short_description: Manages a Django application. +short_description: Manages a Django application description: - - Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the - C(virtualenv) parameter, all management commands will be executed by the given C(virtualenv) installation. + - Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the O(virtualenv) parameter, + all management commands are executed by the given C(virtualenv) installation. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: command: description: - - The name of the Django management command to run. Built in commands are C(cleanup), C(collectstatic), - C(flush), C(loaddata), C(migrate), C(syncdb), C(test), and C(validate). - - Other commands can be entered, but will fail if they're unknown to Django. Other commands that may - prompt for user input should be run with the C(--noinput) flag. - - The module will perform some basic parameter validation (when applicable) to the commands C(cleanup), - C(collectstatic), C(createcachetable), C(flush), C(loaddata), C(migrate), C(syncdb), C(test), and C(validate). + - The name of the Django management command to run. The commands listed below are built in this module and have some + basic parameter validation. + - V(collectstatic) - Collects the static files into C(STATIC_ROOT). + - V(createcachetable) - Creates the cache tables for use with the database cache backend. + - V(flush) - Removes all data from the database. + - V(loaddata) - Searches for and loads the contents of the named O(fixtures) into the database. + - V(migrate) - Synchronizes the database state with models and migrations. + - V(test) - Runs tests for all installed apps. + - Custom commands can be entered, but they fail unless they are known to Django. Custom commands that may prompt for + user input should be run with the C(--noinput) flag. + - Support for the values V(cleanup), V(syncdb), V(validate) was removed in community.general 9.0.0. See note about supported + versions of Django. type: str required: true project_path: description: - - The path to the root of the Django application where B(manage.py) lives. + - The path to the root of the Django application where C(manage.py) lives. type: path required: true aliases: [app_path, chdir] settings: description: - - The Python path to the application's settings module, such as C(myapp.settings). + - The Python path to the application's settings module, such as V(myapp.settings). type: path required: false pythonpath: description: - - A directory to add to the Python path. Typically used to include the settings module if it is located - external to the application directory. + - A directory to add to the Python path. Typically used to include the settings module if it is located external to + the application directory. + - This would be equivalent to adding O(pythonpath)'s value to the E(PYTHONPATH) environment variable. type: path required: false aliases: [python_path] virtualenv: description: - - An optional path to a I(virtualenv) installation to use while running the manage application. + - An optional path to a C(virtualenv) installation to use while running the manage application. + - The virtual environment must exist, otherwise the module fails. type: path aliases: [virtual_env] apps: description: - - A list of space-delimited apps to target. Used by the C(test) command. + - A list of space-delimited apps to target. Used by the V(test) command. type: str required: false cache_table: description: - - The name of the table used for database-backed caching. Used by the C(createcachetable) command. + - The name of the table used for database-backed caching. Used by the V(createcachetable) command. type: str required: false clear: description: - Clear the existing files before trying to copy or link the original file. - - Used only with the C(collectstatic) command. The C(--noinput) argument will be added automatically. + - Used only with the V(collectstatic) command. The C(--noinput) argument is added automatically. required: false - default: no + default: false type: bool database: description: - - The database to target. Used by the C(createcachetable), C(flush), C(loaddata), C(syncdb), - and C(migrate) commands. + - The database to target. Used by the V(createcachetable), V(flush), V(loaddata), V(syncdb), and V(migrate) commands. type: str required: false failfast: description: - - Fail the command immediately if a test fails. Used by the C(test) command. + - Fail the command immediately if a test fails. Used by the V(test) command. required: false default: false type: bool aliases: [fail_fast] fixtures: description: - - A space-delimited list of fixture file names to load in the database. B(Required) by the C(loaddata) command. + - A space-delimited list of fixture file names to load in the database. B(Required) by the V(loaddata) command. type: str required: false skip: description: - - Will skip over out-of-order missing migrations, you can only use this parameter with C(migrate) command. + - Skips over out-of-order missing migrations, you can only use this parameter with V(migrate) command. required: false type: bool merge: description: - - Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this - parameter with C(migrate) command. + - Runs out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with + V(migrate) command. required: false type: bool link: description: - - Will create links to the files instead of copying them, you can only use this parameter with - C(collectstatic) command. + - Creates links to the files instead of copying them, you can only use this parameter with V(collectstatic) command. required: false type: bool testrunner: description: - - "From the Django docs: Controls the test runner class that is used to execute tests." + - Controls the test runner class that is used to execute tests. - This parameter is passed as-is to C(manage.py). type: str required: false aliases: [test_runner] -notes: - - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the I(virtualenv) parameter - is specified. - - This module will create a virtualenv if the I(virtualenv) parameter is specified and a virtual environment does not already - exist at the given location. - - This module assumes English error messages for the C(createcachetable) command to detect table existence, - unfortunately. - - To be able to use the C(migrate) command with django versions < 1.7, you must have C(south) installed and added - as an app in your settings. - - To be able to use the C(collectstatic) command, you must have enabled staticfiles in your settings. - - Your C(manage.py) application must be executable (rwxr-xr-x), and must have a valid shebang, - i.e. C(#!/usr/bin/env python), for invoking the appropriate Python interpreter. -requirements: [ "virtualenv", "django" ] -author: "Scott Anderson (@tastychutney)" -''' -EXAMPLES = """ +notes: + - 'B(ATTENTION): Support for Django releases older than 4.1 has been removed in community.general version 9.0.0. While the + module allows for free-form commands, not verifying the version of Django being used, it is B(strongly recommended) to + use a more recent version of the framework.' + - Please notice that Django 4.1 requires Python 3.8 or greater. + - This module does not create a virtualenv if the O(virtualenv) parameter is specified and a virtual environment does not + already exist at the given location. This behavior changed in community.general version 9.0.0. + - The recommended way to create a virtual environment in Ansible is by using M(ansible.builtin.pip). + - This module assumes English error messages for the V(createcachetable) command to detect table existence, unfortunately. + - To be able to use the V(collectstatic) command, you must have enabled C(staticfiles) in your settings. + - Your C(manage.py) application must be executable (C(rwxr-xr-x)), and must have a valid shebang, for example C(#!/usr/bin/env + python), for invoking the appropriate Python interpreter. +seealso: + - name: django-admin and manage.py Reference + description: Reference for C(django-admin) or C(manage.py) commands. + link: https://docs.djangoproject.com/en/4.1/ref/django-admin/ + - name: Django Download page + description: The page showing how to get Django and the timeline of supported releases. + link: https://www.djangoproject.com/download/ + - name: What Python version can I use with Django? + description: From the Django FAQ, the response to Python requirements for the framework. + link: https://docs.djangoproject.com/en/dev/faq/install/#what-python-version-can-i-use-with-django +requirements: ["django >= 4.1"] +author: + - Alexei Znamensky (@russoz) + - Scott Anderson (@tastychutney) +""" + +EXAMPLES = r""" - name: Run cleanup on the application installed in django_dir community.general.django_manage: - command: cleanup + command: clearsessions project_path: "{{ django_dir }}" - name: Load the initial_data fixture into the application @@ -138,7 +163,7 @@ EXAMPLES = """ - name: Run syncdb on the application community.general.django_manage: - command: syncdb + command: migrate project_path: "{{ django_dir }}" settings: "{{ settings_app_name }}" pythonpath: "{{ settings_dir }}" @@ -182,11 +207,7 @@ def _ensure_virtualenv(module): activate = os.path.join(vbin, 'activate') if not os.path.exists(activate): - virtualenv = module.get_bin_path('virtualenv', True) - vcmd = [virtualenv, venv_param] - rc, out_venv, err_venv = module.run_command(vcmd) - if rc != 0: - _fail(module, vcmd, out_venv, err_venv) + module.fail_json(msg='%s does not point to a valid virtual environment' % venv_param) os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"]) os.environ["VIRTUAL_ENV"] = venv_param @@ -204,11 +225,6 @@ def loaddata_filter_output(line): return "Installed" in line and "Installed 0 object" not in line -def syncdb_filter_output(line): - return ("Creating table " in line) \ - or ("Installed" in line and "Installed 0 object" not in line) - - def migrate_filter_output(line): return ("Migrating forwards " in line) \ or ("Installed" in line and "Installed 0 object" not in line) \ @@ -221,13 +237,10 @@ def collectstatic_filter_output(line): def main(): command_allowed_param_map = dict( - cleanup=(), createcachetable=('cache_table', 'database', ), flush=('database', ), loaddata=('database', 'fixtures', ), - syncdb=('database', ), test=('failfast', 'testrunner', 'apps', ), - validate=(), migrate=('apps', 'skip', 'merge', 'database',), collectstatic=('clear', 'link', ), ) @@ -239,7 +252,6 @@ def main(): # forces --noinput on every command that needs it noinput_commands = ( 'flush', - 'syncdb', 'migrate', 'test', 'collectstatic', diff --git a/plugins/modules/dnf_config_manager.py b/plugins/modules/dnf_config_manager.py new file mode 100644 index 0000000000..847e912115 --- /dev/null +++ b/plugins/modules/dnf_config_manager.py @@ -0,0 +1,225 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Andrew Hyatt +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: dnf_config_manager +short_description: Enable or disable dnf repositories using config-manager +version_added: 8.2.0 +description: + - This module enables or disables repositories using the C(dnf config-manager) sub-command. +author: Andrew Hyatt (@ahyattdev) +requirements: + - dnf + - dnf-plugins-core +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Repository ID, for example V(crb). + default: [] + required: false + type: list + elements: str + state: + description: + - Whether the repositories should be V(enabled) or V(disabled). + default: enabled + required: false + type: str + choices: [enabled, disabled] +notes: + - Does not work with C(dnf5). +seealso: + - module: ansible.builtin.dnf + - module: ansible.builtin.yum_repository +""" + +EXAMPLES = r""" +- name: Ensure the crb repository is enabled + community.general.dnf_config_manager: + name: crb + state: enabled + +- name: Ensure the appstream and zfs repositories are disabled + community.general.dnf_config_manager: + name: + - appstream + - zfs + state: disabled +""" + +RETURN = r""" +repo_states_pre: + description: Repo IDs before action taken. + returned: success + type: dict + contains: + enabled: + description: Enabled repository IDs. + returned: success + type: list + elements: str + disabled: + description: Disabled repository IDs. + returned: success + type: list + elements: str + sample: + enabled: + - appstream + - baseos + - crb + disabled: + - appstream-debuginfo + - appstream-source + - baseos-debuginfo + - baseos-source + - crb-debug + - crb-source +repo_states_post: + description: Repository states after action taken. + returned: success + type: dict + contains: + enabled: + description: Enabled repository IDs. + returned: success + type: list + elements: str + disabled: + description: Disabled repository IDs. + returned: success + type: list + elements: str + sample: + enabled: + - appstream + - baseos + - crb + disabled: + - appstream-debuginfo + - appstream-source + - baseos-debuginfo + - baseos-source + - crb-debug + - crb-source +changed_repos: + description: Repositories changed. + returned: success + type: list + elements: str + sample: ["crb"] +""" + +from ansible.module_utils.basic import AnsibleModule +import os +import re + +DNF_BIN = "/usr/bin/dnf" +REPO_ID_RE = re.compile(r'^Repo-id\s*:\s*(\S+)$') +REPO_STATUS_RE = re.compile(r'^Repo-status\s*:\s*(disabled|enabled)$') + + +def get_repo_states(module): + rc, out, err = module.run_command([DNF_BIN, 'repolist', '--all', '--verbose'], check_rc=True) + + repos = dict() + last_repo = '' + for i, line in enumerate(out.split('\n')): + m = REPO_ID_RE.match(line) + if m: + if len(last_repo) > 0: + module.fail_json(msg='dnf repolist parse failure: parsed another repo id before next status') + last_repo = m.group(1) + continue + m = REPO_STATUS_RE.match(line) + if m: + if len(last_repo) == 0: + module.fail_json(msg='dnf repolist parse failure: parsed status before repo id') + repos[last_repo] = m.group(1) + last_repo = '' + return repos + + +def set_repo_states(module, repo_ids, state): + module.run_command([DNF_BIN, 'config-manager', '--assumeyes', '--set-{0}'.format(state)] + repo_ids, check_rc=True) + + +def pack_repo_states_for_return(states): + enabled = [] + disabled = [] + for repo_id in states: + if states[repo_id] == 'enabled': + enabled.append(repo_id) + else: + disabled.append(repo_id) + + # Sort for consistent results + enabled.sort() + disabled.sort() + + return {'enabled': enabled, 'disabled': disabled} + + +def main(): + module_args = dict( + name=dict(type='list', elements='str', default=[]), + state=dict(type='str', choices=['enabled', 'disabled'], default='enabled') + ) + + result = dict( + changed=False + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + module.run_command_environ_update = dict(LANGUAGE='C', LC_ALL='C') + + if not os.path.exists(DNF_BIN): + module.fail_json(msg="%s was not found" % DNF_BIN) + + repo_states = get_repo_states(module) + result['repo_states_pre'] = pack_repo_states_for_return(repo_states) + + desired_repo_state = module.params['state'] + names = module.params['name'] + + to_change = [] + for repo_id in names: + if repo_id not in repo_states: + module.fail_json(msg="did not find repo with ID '{0}' in dnf repolist --all --verbose".format(repo_id)) + if repo_states[repo_id] != desired_repo_state: + to_change.append(repo_id) + result['changed'] = len(to_change) > 0 + result['changed_repos'] = to_change + + if module.check_mode: + module.exit_json(**result) + + if len(to_change) > 0: + set_repo_states(module, to_change, desired_repo_state) + + repo_states_post = get_repo_states(module) + result['repo_states_post'] = pack_repo_states_for_return(repo_states_post) + + for repo_id in to_change: + if repo_states_post[repo_id] != desired_repo_state: + module.fail_json(msg="dnf config-manager failed to make '{0}' {1}".format(repo_id, desired_repo_state)) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/packaging/os/dnf_versionlock.py b/plugins/modules/dnf_versionlock.py similarity index 60% rename from plugins/modules/packaging/os/dnf_versionlock.py rename to plugins/modules/dnf_versionlock.py index fca33fd83c..e6fa546107 100644 --- a/plugins/modules/packaging/os/dnf_versionlock.py +++ b/plugins/modules/dnf_versionlock.py @@ -1,79 +1,73 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Roberto Moreda -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright (c) 2021, Roberto Moreda +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: dnf_versionlock version_added: '4.0.0' short_description: Locks package versions in C(dnf) based systems description: -- Locks package versions using the C(versionlock) plugin in C(dnf) based - systems. This plugin takes a set of name and versions for packages and - excludes all other versions of those packages. This allows you to for example - protect packages from being updated by newer versions. The state of the - plugin that reflects locking of packages is the C(locklist). + - Locks package versions using the C(versionlock) plugin in C(dnf) based systems. This plugin takes a set of name and versions + for packages and excludes all other versions of those packages. This allows you to for example protect packages from being + updated by newer versions. The state of the plugin that reflects locking of packages is the C(locklist). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: partial + details: + - The logics of the C(versionlock) plugin for corner cases could be confusing, so please take in account that this module + does its best to give a C(check_mode) prediction on what is going to happen. In case of doubt, check the documentation + of the plugin. + - Sometimes the module could predict changes in C(check_mode) that are not fulfilled because C(versionlock) concludes + that there is already a entry in C(locklist) that already matches. + diff_mode: + support: none options: name: description: - - Package name spec to add or exclude to or delete from the C(locklist) - using the format expected by the C(dnf repoquery) command. - - This parameter is mutually exclusive with I(state=clean). + - Package name spec to add or exclude to or delete from the C(locklist) using the format expected by the C(dnf repoquery) + command. + - This parameter is mutually exclusive with O(state=clean). type: list required: false elements: str default: [] raw: description: - - Do not resolve package name specs to NEVRAs to find specific version - to lock to. Instead the package name specs are used as they are. This - enables locking to not yet available versions of the package. + - Do not resolve package name specs to NEVRAs to find specific version to lock to. Instead the package name specs are + used as they are. This enables locking to not yet available versions of the package. type: bool default: false state: description: - - Whether to add (C(present) or C(excluded)) to or remove (C(absent) or - C(clean)) from the C(locklist). - - C(present) will add a package name spec to the C(locklist). If there is a - installed package that matches, then only that version will be added. - Otherwise, all available package versions will be added. - - C(excluded) will add a package name spec as excluded to the - C(locklist). It means that packages represented by the package name - spec will be excluded from transaction operations. All available - package versions will be added. - - C(absent) will delete entries in the C(locklist) that match the - package name spec. - - C(clean) will delete all entries in the C(locklist). This option is - mutually exclusive with C(name). - choices: [ 'absent', 'clean', 'excluded', 'present' ] + - Whether to add (V(present) or V(excluded)) to or remove (V(absent) or V(clean)) from the C(locklist). + - V(present) adds a package name spec to the C(locklist). If there is a installed package that matches, then only that + version is added. Otherwise, all available package versions are added. + - V(excluded) adds a package name spec as excluded to the C(locklist). It means that packages represented by the package + name spec are excluded from transaction operations. All available package versions are added. + - V(absent) deletes entries in the C(locklist) that match the package name spec. + - V(clean) deletes all entries in the C(locklist). This option is mutually exclusive with O(name). + choices: ['absent', 'clean', 'excluded', 'present'] type: str default: present notes: - - The logics of the C(versionlock) plugin for corner cases could be - confusing, so please take in account that this module will do its best to - give a C(check_mode) prediction on what is going to happen. In case of - doubt, check the documentation of the plugin. - - Sometimes the module could predict changes in C(check_mode) that will not - be such because C(versionlock) concludes that there is already a entry in - C(locklist) that already matches. - - In an ideal world, the C(versionlock) plugin would have a dry-run option to - know for sure what is going to happen. So far we have to work with a best - guess as close as possible to the behaviour inferred from its code. - - For most of cases where you want to lock and unlock specific versions of a - package, this works fairly well. - - Supports C(check_mode). + - In an ideal world, the C(versionlock) plugin would have a dry-run option to know for sure what is going to happen. So + far we have to work with a best guess as close as possible to the behaviour inferred from its code. + - For most of cases where you want to lock and unlock specific versions of a package, this works fairly well. + - Does not work with C(dnf5). + - This module requires Python 3.6 or greater to run, which should not be a problem for most systems that use C(dnf). requirements: - dnf - dnf-plugin-versionlock author: - Roberto Moreda (@moreda) -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Prevent installed nginx from being updated community.general.dnf_versionlock: name: nginx @@ -88,12 +82,12 @@ EXAMPLES = r''' - name: Remove lock from nginx to be updated again community.general.dnf_versionlock: - package: nginx + name: nginx state: absent - name: Exclude bind 32:9.11 from installs or updates community.general.dnf_versionlock: - package: bind-32:9.11* + name: bind-32:9.11* state: excluded - name: Keep bash package in major version 4 @@ -105,34 +99,34 @@ EXAMPLES = r''' - name: Delete all entries in the locklist of versionlock community.general.dnf_versionlock: state: clean -''' +""" -RETURN = r''' +RETURN = r""" locklist_pre: - description: Locklist before module execution. - returned: success - type: list - elements: str - sample: [ 'bash-0:4.4.20-1.el8_4.*', '!bind-32:9.11.26-4.el8_4.*' ] + description: Locklist before module execution. + returned: success + type: list + elements: str + sample: ["bash-0:4.4.20-1.el8_4.*", "!bind-32:9.11.26-4.el8_4.*"] locklist_post: - description: Locklist after module execution. - returned: success and (not check mode or state is clean) - type: list - elements: str - sample: [ 'bash-0:4.4.20-1.el8_4.*' ] + description: Locklist after module execution. + returned: success and (not check mode or state is clean) + type: list + elements: str + sample: ["bash-0:4.4.20-1.el8_4.*"] specs_toadd: - description: Package name specs meant to be added by versionlock. - returned: success - type: list - elements: str - sample: [ 'bash' ] + description: Package name specs meant to be added by versionlock. + returned: success + type: list + elements: str + sample: ["bash"] specs_todelete: - description: Package name specs meant to be deleted by versionlock. - returned: success - type: list - elements: str - sample: [ 'bind' ] -''' + description: Package name specs meant to be deleted by versionlock. + returned: success + type: list + elements: str + sample: ["bind"] +""" from ansible.module_utils.basic import AnsibleModule import fnmatch @@ -142,8 +136,7 @@ import re DNF_BIN = "/usr/bin/dnf" VERSIONLOCK_CONF = "/etc/dnf/plugins/versionlock.conf" # NEVRA regex. -NEVRA_RE = re.compile(r"^(?P.+)-(?P\d+):(?P.+)-" - r"(?P.+)\.(?P.+)$") +NEVRA_RE = re.compile(r"^(?P.+)-(?P\d+):(?P.+)-(?P.+)\.(?P.+)$") def do_versionlock(module, command, patterns=None, raw=False): @@ -184,6 +177,7 @@ def match(entry, pattern): m = NEVRA_RE.match(entry) if not m: return False + # indexing a match object with [] is a Python 3.6+ construct for name in ( '%s' % m["name"], '%s.%s' % (m["name"], m["arch"]), @@ -227,6 +221,43 @@ def get_packages(module, patterns, only_installed=False): return packages_available_map_name_evrs +def get_package_mgr(): + for bin_path in (DNF_BIN,): + if os.path.exists(bin_path): + return "dnf5" if os.path.realpath(bin_path) == "/usr/bin/dnf5" else "dnf" + # fallback to dnf + return "dnf" + + +def get_package_list(module, package_mgr="dnf"): + if package_mgr == "dnf": + return do_versionlock(module, "list").split() + + package_list = [] + if package_mgr == "dnf5": + stanza_start = False + package_name = None + for line in do_versionlock(module, "list").splitlines(): + if line.startswith(("#", " ")): + continue + if line.startswith("Package name:"): + stanza_start = True + dummy, name = line.split(":", 1) + name = name.strip() + pkg_name = get_packages(module, patterns=[name]) + package_name = "%s-%s.*" % (name, pkg_name[name].pop()) + if package_name and package_name not in package_list: + package_list.append(package_name) + if line.startswith("evr"): + dummy, package_version = line.split("=", 1) + package_version = package_version.strip() + if stanza_start: + if package_name and package_name not in package_list: + package_list.append(package_name) + stanza_start = False + return package_list + + def main(): module = AnsibleModule( argument_spec=dict( @@ -245,9 +276,10 @@ def main(): msg = "" # Check module pre-requisites. - if not os.path.exists(DNF_BIN): - module.fail_json(msg="%s was not found" % DNF_BIN) - if not os.path.exists(VERSIONLOCK_CONF): + global DNF_BIN + DNF_BIN = module.get_bin_path('dnf', True) + package_mgr = get_package_mgr() + if package_mgr == "dnf" and not os.path.exists(VERSIONLOCK_CONF): module.fail_json(msg="plugin versionlock is required") # Check incompatible options. @@ -256,7 +288,7 @@ def main(): if state != "clean" and not patterns: module.fail_json(msg="name list is required for %s state" % state) - locklist_pre = do_versionlock(module, "list").split() + locklist_pre = get_package_list(module, package_mgr=package_mgr) specs_toadd = [] specs_todelete = [] @@ -266,8 +298,7 @@ def main(): if raw: # Add raw patterns as specs to add. for p in patterns: - if ((p if state == "present" else "!" + p) - not in locklist_pre): + if (p if state == "present" else "!" + p) not in locklist_pre: specs_toadd.append(p) else: # Get available packages that match the patterns. @@ -291,8 +322,7 @@ def main(): for evr in packages_map_name_evrs[name]: locklist_entry = "%s-%s.*" % (name, evr) - if (locklist_entry if state == "present" - else "!%s" % locklist_entry) not in locklist_pre: + if (locklist_entry if state == "present" else "!%s" % locklist_entry) not in locklist_pre: specs_toadd.append(locklist_entry) if specs_toadd and not module.check_mode: @@ -335,7 +365,7 @@ def main(): "specs_todelete": specs_todelete } if not module.check_mode: - response["locklist_post"] = do_versionlock(module, "list").split() + response["locklist_post"] = get_package_list(module, package_mgr=package_mgr) else: if state == "clean": response["locklist_post"] = [] diff --git a/plugins/modules/net_tools/dnsimple.py b/plugins/modules/dnsimple.py similarity index 87% rename from plugins/modules/net_tools/dnsimple.py rename to plugins/modules/dnsimple.py index a4d531c76d..1e9fc8f317 100644 --- a/plugins/modules/net_tools/dnsimple.py +++ b/plugins/modules/dnsimple.py @@ -1,40 +1,45 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: Ansible Project +# Copyright Ansible Project # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: dnsimple short_description: Interface with dnsimple.com (a DNS hosting service) description: - - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)." + - 'Manages domains and records using the DNSimple API, see the docs: U(http://developer.dnsimple.com/).' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: account_email: description: - - Account email. If omitted, the environment variables C(DNSIMPLE_EMAIL) and C(DNSIMPLE_API_TOKEN) will be looked for. - - "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)." - - "C(.dnsimple) config files are only supported in dnsimple-python<2.0.0" + - Account email. If omitted, the environment variables E(DNSIMPLE_EMAIL) and E(DNSIMPLE_API_TOKEN) are looked for. + - 'If those variables are not found, a C(.dnsimple) file is looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started).' + - C(.dnsimple) config files are only supported in dnsimple-python<2.0.0. type: str account_api_token: description: - - Account API token. See I(account_email) for more information. + - Account API token. See O(account_email) for more information. type: str domain: description: - - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. - - If omitted, a list of domains will be returned. - - If domain is present but the domain doesn't exist, it will be created. + - Domain to work with. Can be the domain name (for example V(mydomain.com)) or the numeric ID of the domain in DNSimple. + - If omitted, a list of domains is returned. + - If domain is present but the domain does not exist, it is created. type: str record: description: - - Record to add, if blank a record for the domain will be created, supports the wildcard (*). + - Record to add, if blank a record for the domain is created, supports the wildcard (*). type: str record_ids: description: @@ -44,7 +49,23 @@ options: type: description: - The type of DNS record to create. - choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL', 'CAA' ] + choices: + - A + - ALIAS + - CNAME + - MX + - SPF + - URL + - TXT + - NS + - SRV + - NAPTR + - PTR + - AAAA + - SSHFP + - HINFO + - POOL + - CAA type: str ttl: description: @@ -62,30 +83,30 @@ options: type: int state: description: - - whether the record should exist or not. - choices: [ 'present', 'absent' ] + - Whether the record should exist or not. + choices: ['present', 'absent'] default: present type: str solo: description: - Whether the record should be the only one for that record type and record name. - - Only use with C(state) is set to C(present) on a record. + - Only use with O(state) is set to V(present) on a record. type: 'bool' - default: no + default: false sandbox: description: - Use the DNSimple sandbox environment. - Requires a dedicated account in the dnsimple sandbox environment. - Check U(https://developer.dnsimple.com/sandbox/) for more information. type: 'bool' - default: no + default: false version_added: 3.5.0 requirements: - "dnsimple >= 2.0.0" author: "Alex Coomans (@drcapulet)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Authenticate using email and API token and fetch all domains community.general.dnsimple: account_email: test@example.com @@ -141,9 +162,9 @@ EXAMPLES = ''' value: example.com state: absent delegate_to: localhost -''' +""" -RETURN = r"""# """ +RETURN = r"""#""" import traceback import re @@ -167,10 +188,10 @@ class DNSimpleV2(): def dnsimple_client(self): """creates a dnsimple client object""" if self.account_email and self.account_api_token: - client = Client(sandbox=self.sandbox, email=self.account_email, access_token=self.account_api_token) + client = Client(sandbox=self.sandbox, email=self.account_email, access_token=self.account_api_token, user_agent="ansible/community.general") else: msg = "Option account_email or account_api_token not provided. " \ - "Dnsimple authentiction with a .dnsimple config file is not " \ + "Dnsimple authentication with a .dnsimple config file is not " \ "supported with dnsimple-python>=2.0.0" raise DNSimpleException(msg) client.identity.whoami() @@ -217,24 +238,24 @@ class DNSimpleV2(): self.client.domains.delete_domain(self.account.id, domain) def get_records(self, zone, dnsimple_filter=None): - """return dns ressource records which match a specified filter""" + """return dns resource records which match a specified filter""" records_list = self._get_paginated_result(self.client.zones.list_records, account_id=self.account.id, zone=zone, filter=dnsimple_filter) return [d.__dict__ for d in records_list] def delete_record(self, domain, rid): - """delete a single dns ressource record""" + """delete a single dns resource record""" self.client.zones.delete_record(self.account.id, domain, rid) def update_record(self, domain, rid, ttl=None, priority=None): - """update a single dns ressource record""" + """update a single dns resource record""" zr = ZoneRecordUpdateInput(ttl=ttl, priority=priority) result = self.client.zones.update_record(self.account.id, str(domain), str(rid), zr).data.__dict__ return result def create_record(self, domain, name, record_type, content, ttl=None, priority=None): - """create a single dns ressource record""" + """create a single dns resource record""" zr = ZoneRecordInput(name=name, type=record_type, content=content, ttl=ttl, priority=priority) return self.client.zones.create_record(self.account.id, str(domain), zr).data.__dict__ diff --git a/plugins/modules/dnsimple_info.py b/plugins/modules/dnsimple_info.py new file mode 100644 index 0000000000..64cc4527a6 --- /dev/null +++ b/plugins/modules/dnsimple_info.py @@ -0,0 +1,328 @@ +#!/usr/bin/python + +# Copyright Edward Hilgendorf, +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: dnsimple_info + +short_description: Pull basic info from DNSimple API + +version_added: "4.2.0" + +description: Retrieve existing records and domains from DNSimple API. + +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module + +options: + name: + description: + - The domain name to retrieve info from. + - Returns all associated records for this domain if specified. + - If not specified, returns all domains associated with the account ID. + type: str + + account_id: + description: The account ID to query. + required: true + type: str + + api_key: + description: The API key to use. + required: true + type: str + + record: + description: + - The record to find. + - If specified, only this record is returned instead of all records. + required: false + type: str + + sandbox: + description: Whether or not to use sandbox environment. + required: false + default: false + type: bool + +author: + - Edward Hilgendorf (@edhilgendorf) +""" + +EXAMPLES = r""" +- name: Get all domains from an account + community.general.dnsimple_info: + account_id: "1234" + api_key: "1234" + +- name: Get all records from a domain + community.general.dnsimple_info: + name: "example.com" + account_id: "1234" + api_key: "1234" + +- name: Get all info from a matching record + community.general.dnsimple_info: + name: "example.com" + record: "subdomain" + account_id: "1234" + api_key: "1234" +""" + +RETURN = r""" +dnsimple_domain_info: + description: Returns a list of dictionaries of all domains associated with the supplied account ID. + type: list + elements: dict + returned: success when O(name) is not specified + sample: + - account_id: 1234 + created_at: '2021-10-16T21:25:42Z' + id: 123456 + last_transferred_at: + name: example.com + reverse: false + secondary: false + updated_at: '2021-11-10T20:22:50Z' + contains: + account_id: + description: The account ID. + type: int + created_at: + description: When the domain entry was created. + type: str + id: + description: ID of the entry. + type: int + last_transferred_at: + description: Date the domain was transferred, or empty if not. + type: str + name: + description: Name of the record. + type: str + reverse: + description: Whether or not it is a reverse zone record. + type: bool + updated_at: + description: When the domain entry was updated. + type: str + +dnsimple_records_info: + description: Returns a list of dictionaries with all records for the domain supplied. + type: list + elements: dict + returned: success when O(name) is specified, but O(record) is not + sample: + - content: ns1.dnsimple.com admin.dnsimple.com + created_at: '2021-10-16T19:07:34Z' + id: 12345 + name: 'catheadbiscuit' + parent_id: + priority: + regions: + - global + system_record: true + ttl: 3600 + type: SOA + updated_at: '2021-11-15T23:55:51Z' + zone_id: example.com + contains: + content: + description: Content of the returned record. + type: str + created_at: + description: When the domain entry was created. + type: str + id: + description: ID of the entry. + type: int + name: + description: Name of the record. + type: str + parent_id: + description: Parent record or null. + type: int + priority: + description: Priority setting of the record. + type: str + regions: + description: List of regions where the record is available. + type: list + system_record: + description: Whether or not it is a system record. + type: bool + ttl: + description: Record TTL. + type: int + type: + description: Record type. + type: str + updated_at: + description: When the domain entry was updated. + type: str + zone_id: + description: ID of the zone that the record is associated with. + type: str +dnsimple_record_info: + description: Returns a list of dictionaries that match the record supplied. + returned: success when O(name) and O(record) are specified + type: list + elements: dict + sample: + - content: 1.2.3.4 + created_at: '2021-11-15T23:55:51Z' + id: 123456 + name: catheadbiscuit + parent_id: + priority: + regions: + - global + system_record: false + ttl: 3600 + type: A + updated_at: '2021-11-15T23:55:51Z' + zone_id: example.com + contains: + content: + description: Content of the returned record. + type: str + created_at: + description: When the domain entry was created. + type: str + id: + description: ID of the entry. + type: int + name: + description: Name of the record. + type: str + parent_id: + description: Parent record or null. + type: int + priority: + description: Priority setting of the record. + type: str + regions: + description: List of regions where the record is available. + type: list + system_record: + description: Whether or not it is a system record. + type: bool + ttl: + description: Record TTL. + type: int + type: + description: Record type. + type: str + updated_at: + description: When the domain entry was updated. + type: str + zone_id: + description: ID of the zone that the record is associated with. + type: str +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils import deps + +with deps.declare("requests"): + from requests import Request, Session + + +def build_url(account, key, is_sandbox): + headers = {'Accept': 'application/json', + 'Authorization': 'Bearer {0}'.format(key)} + sandbox = '.sandbox' if is_sandbox else '' + url = 'https://api{sandbox}.dnsimple.com/v2/{account}'.format(sandbox=sandbox, account=account) + req = Request(url=url, headers=headers) + prepped_request = req.prepare() + return prepped_request + + +def iterate_data(module, request_object): + base_url = request_object.url + response = Session().send(request_object) + if 'pagination' not in response.json(): + module.fail_json('API Call failed, check ID, key and sandbox values') + + data = response.json()["data"] + total_pages = response.json()["pagination"]["total_pages"] + page = 1 + + while page < total_pages: + page = page + 1 + request_object.url = '{url}&page={page}'.format(url=base_url, page=page) + new_results = Session().send(request_object) + data = data + new_results.json()['data'] + + return data + + +def record_info(dnsimple_mod, req_obj): + req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?name=' + dnsimple_mod.params["record"], 'GET' + return iterate_data(dnsimple_mod, req_obj) + + +def domain_info(dnsimple_mod, req_obj): + req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?per_page=100', 'GET' + return iterate_data(dnsimple_mod, req_obj) + + +def account_info(dnsimple_mod, req_obj): + req_obj.url, req_obj.method = req_obj.url + '/zones/?per_page=100', 'GET' + return iterate_data(dnsimple_mod, req_obj) + + +def main(): + # define available arguments/parameters a user can pass to the module + fields = { + "account_id": {"required": True, "type": "str"}, + "api_key": {"required": True, "type": "str", "no_log": True}, + "name": {"required": False, "type": "str"}, + "record": {"required": False, "type": "str"}, + "sandbox": {"required": False, "type": "bool", "default": False} + } + + result = { + 'changed': False + } + + module = AnsibleModule( + argument_spec=fields, + supports_check_mode=True + ) + + params = module.params + req = build_url(params['account_id'], + params['api_key'], + params['sandbox']) + + deps.validate(module) + + # At minimum we need account and key + if params['account_id'] and params['api_key']: + # If we have a record return info on that record + if params['name'] and params['record']: + result['dnsimple_record_info'] = record_info(module, req) + module.exit_json(**result) + + # If we have the account only and domain, return records for the domain + elif params['name']: + result['dnsimple_records_info'] = domain_info(module, req) + module.exit_json(**result) + + # If we have the account only, return domains + else: + result['dnsimple_domain_info'] = account_info(module, req) + module.exit_json(**result) + else: + module.fail_json(msg="Need at least account_id and api_key") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/dnsmadeeasy.py b/plugins/modules/dnsmadeeasy.py similarity index 83% rename from plugins/modules/net_tools/dnsmadeeasy.py rename to plugins/modules/dnsmadeeasy.py index 1d708cdce0..e74e8a547b 100644 --- a/plugins/modules/net_tools/dnsmadeeasy.py +++ b/plugins/modules/dnsmadeeasy.py @@ -1,21 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: dnsmadeeasy -short_description: Interface with dnsmadeeasy.com (a DNS hosting service). +short_description: Interface with dnsmadeeasy.com (a DNS hosting service) description: - - > - Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or - monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/) + - 'Manages DNS records using the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation + of domains or monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/).' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: account_key: description: @@ -31,8 +35,8 @@ options: domain: description: - - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster - resolution + - Domain to work with. Can be the domain name (for example V(mydomain.com)) or the numeric ID of the domain in DNS Made + Easy (for example V(839989)) for faster resolution. required: true type: str @@ -40,55 +44,53 @@ options: description: - Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used. type: bool - default: 'no' + default: false record_name: description: - - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless - of the state argument. + - Record name to get/create/delete/update. If O(record_name) is not specified; all records for the domain are returned + in "result" regardless of the state argument. type: str record_type: description: - Record type. - choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ] + choices: ['A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT'] type: str record_value: description: - - > - Record value. HTTPRED: , MX: , NS: , PTR: , - SRV: , TXT: " - - > - If record_value is not specified; no changes will be made and the record will be returned in 'result' - (in other words, this module can be used to fetch a record's current id, type, and ttl) + - 'Record value. HTTPRED: , MX: , NS: , PTR: , SRV: + , TXT: ".' + - If O(record_value) is not specified; no changes are made and the record is returned in RV(ignore:result) (in other + words, this module can be used to fetch a record's current ID, type, and TTL). type: str record_ttl: description: - - record's "Time to live". Number of seconds the record remains cached in DNS servers. + - Record's "Time-To-Live". Number of seconds the record remains cached in DNS servers. default: 1800 type: int state: description: - - whether the record should exist or not + - Whether the record should exist or not. required: true - choices: [ 'present', 'absent' ] + choices: ['present', 'absent'] type: str validate_certs: description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool - default: 'yes' + default: true monitor: description: - - If C(yes), add or change the monitor. This is applicable only for A records. + - If V(true), add or change the monitor. This is applicable only for A records. type: bool - default: 'no' + default: false systemDescription: description: @@ -124,9 +126,8 @@ options: contactList: description: - - Name or id of the contact list that the monitor will notify. - - The default C('') means the Account Owner. - default: '' + - Name or ID of the contact list that the monitor notifies. + - The default V('') means the Account Owner. type: str httpFqdn: @@ -146,16 +147,16 @@ options: failover: description: - - If C(yes), add or change the failover. This is applicable only for A records. + - If V(true), add or change the failover. This is applicable only for A records. type: bool - default: 'no' + default: false autoFailover: description: - If true, fallback to the primary IP address is manual after a failover. - If false, fallback to the primary IP address is automatic after a failover. type: bool - default: 'no' + default: false ip1: description: @@ -185,20 +186,19 @@ options: type: str notes: - - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few - seconds of actual time by using NTP. - - This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'. - These values can be be registered and used in your playbooks. - - Only A records can have a monitor or failover. - - To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required. - - To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required. - - The monitor and the failover will share 'port', 'protocol', and 'ip1' options. - -requirements: [ hashlib, hmac ] + - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure + you are within a few seconds of actual time by using NTP. + - This module returns record(s) and monitor(s) in the RV(ignore:result) element when O(state=present). These values can + be be registered and used in your playbooks. + - Only A records can have a O(monitor) or O(failover). + - To add failover, the O(failover), O(autoFailover), O(port), O(protocol), O(ip1), and O(ip2) options are required. + - To add monitor, the O(monitor), O(port), O(protocol), O(maxEmails), O(systemDescription), and O(ip1) options are required. + - The options O(monitor) and O(failover) share O(port), O(protocol), and O(ip1) options. +requirements: [hashlib, hmac] author: "Brice Burgess (@briceburg)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Fetch my.com domain records community.general.dnsmadeeasy: account_key: key @@ -253,7 +253,7 @@ EXAMPLES = ''' record_name: test record_type: A record_value: 127.0.0.1 - failover: True + failover: true ip1: 127.0.0.2 ip2: 127.0.0.3 @@ -266,7 +266,7 @@ EXAMPLES = ''' record_name: test record_type: A record_value: 127.0.0.1 - failover: True + failover: true ip1: 127.0.0.2 ip2: 127.0.0.3 ip3: 127.0.0.4 @@ -282,10 +282,10 @@ EXAMPLES = ''' record_name: test record_type: A record_value: 127.0.0.1 - monitor: yes + monitor: true ip1: 127.0.0.2 - protocol: HTTP # default - port: 80 # default + protocol: HTTP # default + port: 80 # default maxEmails: 1 systemDescription: Monitor Test A record contactList: my contact list @@ -299,13 +299,13 @@ EXAMPLES = ''' record_name: test record_type: A record_value: 127.0.0.1 - monitor: yes + monitor: true ip1: 127.0.0.2 - protocol: HTTP # default - port: 80 # default + protocol: HTTP # default + port: 80 # default maxEmails: 1 systemDescription: Monitor Test A record - contactList: 1174 # contact list id + contactList: 1174 # contact list id httpFqdn: http://my.com httpFile: example httpQueryString: some string @@ -319,10 +319,10 @@ EXAMPLES = ''' record_name: test record_type: A record_value: 127.0.0.1 - failover: True + failover: true ip1: 127.0.0.2 ip2: 127.0.0.3 - monitor: yes + monitor: true protocol: HTTPS port: 443 maxEmails: 1 @@ -338,7 +338,7 @@ EXAMPLES = ''' record_name: test record_type: A record_value: 127.0.0.1 - failover: no + failover: false - name: Remove a monitor community.general.dnsmadeeasy: @@ -349,8 +349,8 @@ EXAMPLES = ''' record_name: test record_type: A record_value: 127.0.0.1 - monitor: no -''' + monitor: false +""" # ============================================ # DNSMadeEasy module specific support methods. @@ -361,11 +361,10 @@ import hashlib import hmac import locale from time import strftime, gmtime +from urllib.parse import urlencode from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.six import string_types class DME2(object): @@ -415,7 +414,7 @@ class DME2(object): def query(self, resource, method, data=None): url = self.baseurl + resource - if data and not isinstance(data, string_types): + if data and not isinstance(data, str): data = urlencode(data) response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers()) @@ -484,7 +483,7 @@ class DME2(object): return self.query(self.record_url, 'GET')['data'] def _instMap(self, type): - # @TODO cache this call so it's executed only once per ansible execution + # @TODO cache this call so it is executed only once per ansible execution map = {} results = {} @@ -502,15 +501,15 @@ class DME2(object): return json.dumps(data, separators=(',', ':')) def createRecord(self, data): - # @TODO update the cache w/ resultant record + id when impleneted + # @TODO update the cache w/ resultant record + id when implemented return self.query(self.record_url, 'POST', data) def updateRecord(self, record_id, data): - # @TODO update the cache w/ resultant record + id when impleneted + # @TODO update the cache w/ resultant record + id when implemented return self.query(self.record_url + '/' + str(record_id), 'PUT', data) def deleteRecord(self, record_id): - # @TODO remove record from the cache when impleneted + # @TODO remove record from the cache when implemented return self.query(self.record_url + '/' + str(record_id), 'DELETE') def getMonitor(self, record_id): @@ -551,28 +550,28 @@ def main(): domain=dict(required=True), sandbox=dict(default=False, type='bool'), state=dict(required=True, choices=['present', 'absent']), - record_name=dict(required=False), - record_type=dict(required=False, choices=[ + record_name=dict(), + record_type=dict(choices=[ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']), - record_value=dict(required=False), - record_ttl=dict(required=False, default=1800, type='int'), + record_value=dict(), + record_ttl=dict(default=1800, type='int'), monitor=dict(default=False, type='bool'), systemDescription=dict(default=''), maxEmails=dict(default=1, type='int'), protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']), port=dict(default=80, type='int'), sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']), - contactList=dict(default=None), - httpFqdn=dict(required=False), - httpFile=dict(required=False), - httpQueryString=dict(required=False), + contactList=dict(), + httpFqdn=dict(), + httpFile=dict(), + httpQueryString=dict(), failover=dict(default=False, type='bool'), autoFailover=dict(default=False, type='bool'), - ip1=dict(required=False), - ip2=dict(required=False), - ip3=dict(required=False), - ip4=dict(required=False), - ip5=dict(required=False), + ip1=dict(), + ip2=dict(), + ip3=dict(), + ip4=dict(), + ip5=dict(), validate_certs=dict(default=True, type='bool'), ), required_together=[ diff --git a/plugins/modules/system/dpkg_divert.py b/plugins/modules/dpkg_divert.py similarity index 76% rename from plugins/modules/system/dpkg_divert.py rename to plugins/modules/dpkg_divert.py index 709d35b865..7f37a47de4 100644 --- a/plugins/modules/system/dpkg_divert.py +++ b/plugins/modules/dpkg_divert.py @@ -1,90 +1,82 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017-2020, Yann Amar -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017-2020, Yann Amar +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: dpkg_divert short_description: Override a debian package's version of a file version_added: '0.2.0' author: - quidame (@quidame) description: - - A diversion is for C(dpkg) the knowledge that only a given package - (or the local administrator) is allowed to install a file at a given - location. Other packages shipping their own version of this file will - be forced to I(divert) it, i.e. to install it at another location. It - allows one to keep changes in a file provided by a debian package by - preventing its overwrite at package upgrade. - - This module manages diversions of debian packages files using the - C(dpkg-divert) commandline tool. It can either create or remove a - diversion for a given file, but also update an existing diversion - to modify its I(holder) and/or its I(divert) location. + - A diversion is for C(dpkg) the knowledge that only a given package (or the local administrator) is allowed to install + a file at a given location. Other packages shipping their own version of this file are forced to O(divert) it, that is + to install it at another location. It allows one to keep changes in a file provided by a debian package by preventing + it being overwritten on package upgrade. + - This module manages diversions of debian packages files using the C(dpkg-divert) commandline tool. It can either create + or remove a diversion for a given file, but also update an existing diversion to modify its O(holder) and/or its O(divert) + location. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full options: path: description: - - The original and absolute path of the file to be diverted or - undiverted. This path is unique, i.e. it is not possible to get - two diversions for the same I(path). + - The original and absolute path of the file to be diverted or undiverted. This path is unique, in other words it is + not possible to get two diversions for the same O(path). required: true type: path state: description: - - When I(state=absent), remove the diversion of the specified - I(path); when I(state=present), create the diversion if it does - not exist, or update its package I(holder) or I(divert) location, - if it already exists. + - When O(state=absent), remove the diversion of the specified O(path); when O(state=present), create the diversion if + it does not exist, or update its package O(holder) or O(divert) location, if it already exists. type: str default: present choices: [absent, present] holder: description: - - The name of the package whose copy of file is not diverted, also - known as the diversion holder or the package the diversion belongs - to. - - The actual package does not have to be installed or even to exist - for its name to be valid. If not specified, the diversion is hold - by 'LOCAL', that is reserved by/for dpkg for local diversions. - - This parameter is ignored when I(state=absent). + - The name of the package whose copy of file is not diverted, also known as the diversion holder or the package the + diversion belongs to. + - The actual package does not have to be installed or even to exist for its name to be valid. If not specified, the + diversion is hold by 'LOCAL', that is reserved by/for dpkg for local diversions. + - This parameter is ignored when O(state=absent). type: str divert: description: - - The location where the versions of file will be diverted. + - The location where the versions of file are diverted. - Default is to add suffix C(.distrib) to the file path. - - This parameter is ignored when I(state=absent). + - This parameter is ignored when O(state=absent). type: path rename: description: - - Actually move the file aside (when I(state=present)) or back (when - I(state=absent)), but only when changing the state of the diversion. - This parameter has no effect when attempting to add a diversion that - already exists or when removing an unexisting one. - - Unless I(force=true), renaming fails if the destination file already - exists (this lock being a dpkg-divert feature, and bypassing it being - a module feature). + - Actually move the file aside (when O(state=present)) or back (when O(state=absent)), but only when changing the state + of the diversion. This parameter has no effect when attempting to add a diversion that already exists or when removing + an unexisting one. + - Unless O(force=true), renaming fails if the destination file already exists (this lock being a dpkg-divert feature, + and bypassing it being a module feature). type: bool - default: no + default: false force: description: - - When I(rename=true) and I(force=true), renaming is performed even if - the target of the renaming exists, i.e. the existing contents of the - file at this location will be lost. - - This parameter is ignored when I(rename=false). + - When O(rename=true) and O(force=true), renaming is performed even if the target of the renaming exists, in other words + the existing contents of the file at this location are lost. + - This parameter is ignored when O(rename=false). type: bool - default: no -notes: - - This module supports I(check_mode) and I(diff). + default: false requirements: - dpkg-divert >= 1.15.0 (Debian family) -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Divert /usr/bin/busybox to /usr/bin/busybox.distrib and keep file in place community.general.dpkg_divert: path: /usr/bin/busybox @@ -98,37 +90,29 @@ EXAMPLES = r''' community.general.dpkg_divert: path: /usr/bin/busybox divert: /usr/bin/busybox.dpkg-divert - rename: yes + rename: true - name: Remove the busybox diversion and move the diverted file back community.general.dpkg_divert: path: /usr/bin/busybox state: absent - rename: yes - force: yes -''' + rename: true + force: true +""" -RETURN = r''' +RETURN = r""" commands: description: The dpkg-divert commands ran internally by the module. type: list returned: on_success elements: str - sample: |- - [ - "/usr/bin/dpkg-divert --no-rename --remove /etc/foobarrc", - "/usr/bin/dpkg-divert --package ansible --no-rename --add /etc/foobarrc" - ] + sample: "/usr/bin/dpkg-divert --no-rename --remove /etc/foobarrc" messages: description: The dpkg-divert relevant messages (stdout or stderr). type: list returned: on_success elements: str - sample: |- - [ - "Removing 'local diversion of /etc/foobarrc to /etc/foobarrc.distrib'", - "Adding 'diversion of /etc/foobarrc to /etc/foobarrc.distrib by ansible'" - ] + sample: "Removing 'local diversion of /etc/foobarrc to /etc/foobarrc.distrib'" diversion: description: The status of the diversion after task execution. type: dict @@ -146,14 +130,14 @@ diversion: state: description: The state of the diversion. type: str - sample: |- + sample: { "divert": "/etc/foobarrc.distrib", "holder": "LOCAL", - "path": "/etc/foobarrc" + "path": "/etc/foobarrc", "state": "present" } -''' +""" import re @@ -180,11 +164,11 @@ def main(): module = AnsibleModule( argument_spec=dict( path=dict(required=True, type='path'), - state=dict(required=False, type='str', default='present', choices=['absent', 'present']), - holder=dict(required=False, type='str'), - divert=dict(required=False, type='path'), - rename=dict(required=False, type='bool', default=False), - force=dict(required=False, type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + holder=dict(type='str'), + divert=dict(type='path'), + rename=dict(type='bool', default=False), + force=dict(type='bool', default=False), ), supports_check_mode=True, ) diff --git a/plugins/modules/packaging/language/easy_install.py b/plugins/modules/easy_install.py similarity index 67% rename from plugins/modules/packaging/language/easy_install.py rename to plugins/modules/easy_install.py index 5e1d7930b5..d533da899f 100644 --- a/plugins/modules/packaging/language/easy_install.py +++ b/plugins/modules/easy_install.py @@ -1,72 +1,68 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2012, Matt Wright -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2012, Matt Wright +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: easy_install short_description: Installs Python libraries description: - - Installs Python libraries, optionally in a I(virtualenv) + - Installs Python libraries, optionally in a C(virtualenv). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: type: str description: - - A Python library name + - A Python library name. required: true virtualenv: type: str description: - - an optional I(virtualenv) directory path to install into. If the - I(virtualenv) does not exist, it is created automatically + - An optional O(virtualenv) directory path to install into. If the O(virtualenv) does not exist, it is created automatically. virtualenv_site_packages: description: - - Whether the virtual environment will inherit packages from the - global site-packages directory. Note that if this setting is - changed on an already existing virtual environment it will not - have any effect, the environment must be deleted and newly - created. + - Whether the virtual environment inherits packages from the global site-packages directory. Note that this setting + has no effect on an already existing virtual environment, so if you want to change it, the environment must be deleted + and newly created. type: bool - default: 'no' + default: false virtualenv_command: type: str description: - - The command to create the virtual environment with. For example - C(pyvenv), C(virtualenv), C(virtualenv2). + - The command to create the virtual environment with. For example V(pyvenv), V(virtualenv), V(virtualenv2). default: virtualenv executable: type: str description: - - The explicit executable or a pathname to the executable to be used to - run easy_install for a specific version of Python installed in the - system. For example C(easy_install-3.3), if there are both Python 2.7 - and 3.3 installations in the system and you want to run easy_install - for the Python 3.3 installation. + - The explicit executable or a pathname to the executable to be used to run easy_install for a specific version of Python + installed in the system. For example V(easy_install-3.3), if there are both Python 2.7 and 3.3 installations in the + system and you want to run easy_install for the Python 3.3 installation. default: easy_install state: type: str description: - - The desired state of the library. C(latest) ensures that the latest version is installed. + - The desired state of the library. V(latest) ensures that the latest version is installed. choices: [present, latest] default: present notes: - - Please note that the C(easy_install) module can only install Python - libraries. Thus this module is not able to remove libraries. It is - generally recommended to use the M(ansible.builtin.pip) module which you can first install - using M(community.general.easy_install). - - Also note that I(virtualenv) must be installed on the remote host if the - C(virtualenv) parameter is specified. -requirements: [ "virtualenv" ] + - Please note that the C(easy_install) module can only install Python libraries. Thus this module is not able to remove + libraries. It is generally recommended to use the M(ansible.builtin.pip) module which you can first install using M(community.general.easy_install). + - Also note that C(virtualenv) must be installed on the remote host if the O(virtualenv) parameter is specified. +requirements: ["virtualenv"] author: "Matt Wright (@mattupstate)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install or update pip community.general.easy_install: name: pip @@ -76,7 +72,13 @@ EXAMPLES = ''' community.general.easy_install: name: bottle virtualenv: /webapps/myapp/venv -''' + +- name: Install a python package using pyvenv as the virtualenv tool + community.general.easy_install: + name: package_name + virtualenv: /opt/myenv + virtualenv_command: pyvenv +""" import os import os.path @@ -85,7 +87,7 @@ from ansible.module_utils.basic import AnsibleModule def install_package(module, name, easy_install, executable_arguments): - cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name) + cmd = [easy_install] + executable_arguments + [name] rc, out, err = module.run_command(cmd) return rc, out, err @@ -129,14 +131,13 @@ def _get_easy_install(module, env=None, executable=None): def main(): arg_spec = dict( name=dict(required=True), - state=dict(required=False, - default='present', + state=dict(default='present', choices=['present', 'latest'], type='str'), - virtualenv=dict(default=None, required=False), + virtualenv=dict(), virtualenv_site_packages=dict(default=False, type='bool'), - virtualenv_command=dict(default='virtualenv', required=False), - executable=dict(default='easy_install', required=False), + virtualenv_command=dict(default='virtualenv'), + executable=dict(default='easy_install'), ) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) diff --git a/plugins/modules/web_infrastructure/ejabberd_user.py b/plugins/modules/ejabberd_user.py similarity index 58% rename from plugins/modules/web_infrastructure/ejabberd_user.py rename to plugins/modules/ejabberd_user.py index e6cdd72b5e..d60a5d4f4a 100644 --- a/plugins/modules/web_infrastructure/ejabberd_user.py +++ b/plugins/modules/ejabberd_user.py @@ -1,57 +1,56 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2013, Peter Sprygada -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ejabberd_user author: "Peter Sprygada (@privateip)" short_description: Manages users for ejabberd servers requirements: - - ejabberd with mod_admin_extra + - ejabberd with mod_admin_extra description: - - This module provides user management for ejabberd servers + - This module provides user management for ejabberd servers. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - username: - type: str - description: - - the name of the user to manage - required: true - host: - type: str - description: - - the ejabberd host associated with this username - required: true - password: - type: str - description: - - the password to assign to the username - required: false - logging: - description: - - enables or disables the local syslog facility for this module - required: false - default: false - type: bool - state: - type: str - description: - - describe the desired state of the user to be managed - required: false - default: 'present' - choices: [ 'present', 'absent' ] + username: + type: str + description: + - The name of the user to manage. + required: true + host: + type: str + description: + - The ejabberd host associated with this username. + required: true + password: + type: str + description: + - The password to assign to the username. + required: false + state: + type: str + description: + - Describe the desired state of the user to be managed. + required: false + default: 'present' + choices: ['present', 'absent'] notes: - - Password parameter is required for state == present only - - Passwords must be stored in clear text for this release - - The ejabberd configuration file must include mod_admin_extra as a module. -''' -EXAMPLES = ''' + - Password parameter is required for O(state=present) only. + - Passwords must be stored in clear text for this release. + - The ejabberd configuration file must include mod_admin_extra as a module. +""" +EXAMPLES = r""" # Example playbook entries using the ejabberd_user module to manage users state. - name: Create a user if it does not exist @@ -65,11 +64,10 @@ EXAMPLES = ''' username: test host: server state: absent -''' - -import syslog +""" from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt class EjabberdUser(object): @@ -77,16 +75,26 @@ class EjabberdUser(object): object manages user creation and deletion using ejabberdctl. The following commands are currently supported: * ejabberdctl register - * ejabberdctl deregister + * ejabberdctl unregister """ def __init__(self, module): self.module = module - self.logging = module.params.get('logging') self.state = module.params.get('state') self.host = module.params.get('host') self.user = module.params.get('username') self.pwd = module.params.get('password') + self.runner = CmdRunner( + module, + command="ejabberdctl", + arg_formats=dict( + cmd=cmd_runner_fmt.as_list(), + host=cmd_runner_fmt.as_list(), + user=cmd_runner_fmt.as_list(), + pwd=cmd_runner_fmt.as_list(), + ), + check_rc=False, + ) @property def changed(self): @@ -94,7 +102,7 @@ class EjabberdUser(object): changed. It will return True if the user does not match the supplied credentials and False if it does not """ - return self.run_command('check_password', [self.user, self.host, self.pwd]) + return self.run_command('check_password', 'user host pwd', (lambda rc, out, err: bool(rc))) @property def exists(self): @@ -102,37 +110,42 @@ class EjabberdUser(object): host specified. If the user exists True is returned, otherwise False is returned """ - return self.run_command('check_account', [self.user, self.host]) + return self.run_command('check_account', 'user host', (lambda rc, out, err: not bool(rc))) def log(self, entry): - """ This method will log information to the local syslog facility """ - if self.logging: - syslog.openlog('ansible-%s' % self.module._name) - syslog.syslog(syslog.LOG_NOTICE, entry) + """ This method does nothing """ + pass - def run_command(self, cmd, options): + def run_command(self, cmd, options, process=None): """ This method will run the any command specified and return the returns using the Ansible common module """ - cmd = [self.module.get_bin_path('ejabberdctl'), cmd] + options - self.log('command: %s' % " ".join(cmd)) - return self.module.run_command(cmd) + def _proc(*a): + return a + + if process is None: + process = _proc + + with self.runner("cmd " + options, output_process=process) as ctx: + res = ctx.run(cmd=cmd, host=self.host, user=self.user, pwd=self.pwd) + self.log('command: %s' % " ".join(ctx.run_info['cmd'])) + return res def update(self): """ The update method will update the credentials for the user provided """ - return self.run_command('change_password', [self.user, self.host, self.pwd]) + return self.run_command('change_password', 'user host pwd') def create(self): """ The create method will create a new user on the host with the password provided """ - return self.run_command('register', [self.user, self.host, self.pwd]) + return self.run_command('register', 'user host pwd') def delete(self): """ The delete method will delete the user from the host """ - return self.run_command('unregister', [self.user, self.host]) + return self.run_command('unregister', 'user host') def main(): @@ -142,7 +155,6 @@ def main(): username=dict(required=True, type='str'), password=dict(type='str', no_log=True), state=dict(default='present', choices=['present', 'absent']), - logging=dict(default=False, type='bool') # deprecate in favour of c.g.syslogger? ), required_if=[ ('state', 'present', ['password']), diff --git a/plugins/modules/database/misc/elasticsearch_plugin.py b/plugins/modules/elasticsearch_plugin.py similarity index 61% rename from plugins/modules/database/misc/elasticsearch_plugin.py rename to plugins/modules/elasticsearch_plugin.py index bc7df931b6..7d49ebded1 100644 --- a/plugins/modules/database/misc/elasticsearch_plugin.py +++ b/plugins/modules/elasticsearch_plugin.py @@ -1,89 +1,91 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2015, Mathew Davies -# (c) 2017, Sam Doran -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Mathew Davies +# Copyright (c) 2017, Sam Doran +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: elasticsearch_plugin short_description: Manage Elasticsearch plugins description: - - Manages Elasticsearch plugins. + - Manages Elasticsearch plugins. author: - - Mathew Davies (@ThePixelDeveloper) - - Sam Doran (@samdoran) + - Mathew Davies (@ThePixelDeveloper) + - Sam Doran (@samdoran) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the plugin to install. - required: True - type: str - state: - description: - - Desired state of a plugin. - choices: ["present", "absent"] - default: present - type: str - src: - description: - - Optionally set the source location to retrieve the plugin from. This can be a file:// - URL to install from a local file, or a remote URL. If this is not set, the plugin - location is just based on the name. - - The name parameter must match the descriptor in the plugin ZIP specified. - - Is only used if the state would change, which is solely checked based on the name - parameter. If, for example, the plugin is already installed, changing this has no - effect. - - For ES 1.x use url. - required: False - type: str - url: - description: - - Set exact URL to download the plugin from (Only works for ES 1.x). - - For ES 2.x and higher, use src. - required: False - type: str - timeout: - description: - - "Timeout setting: 30s, 1m, 1h..." - - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0. - default: 1m - type: str - force: - description: - - "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails." - default: False - type: bool - plugin_bin: - description: - - Location of the plugin binary. If this file is not found, the default plugin binaries will be used. - - The default changed in Ansible 2.4 to None. - type: path - plugin_dir: - description: - - Your configured plugin directory specified in Elasticsearch - default: /usr/share/elasticsearch/plugins/ - type: path - proxy_host: - description: - - Proxy host to use during plugin installation - type: str - proxy_port: - description: - - Proxy port to use during plugin installation - type: str - version: - description: - - Version of the plugin to be installed. - If plugin exists with previous version, it will NOT be updated - type: str -''' + name: + description: + - Name of the plugin to install. + required: true + type: str + state: + description: + - Desired state of a plugin. + choices: ["present", "absent"] + default: present + type: str + src: + description: + - Optionally set the source location to retrieve the plugin from. This can be a C(file://) URL to install from a local + file, or a remote URL. If this is not set, the plugin location is just based on the name. + - The name parameter must match the descriptor in the plugin ZIP specified. + - Is only used if the state would change, which is solely checked based on the name parameter. If, for example, the + plugin is already installed, changing this has no effect. + - For ES 1.x use O(url). + required: false + type: str + url: + description: + - Set exact URL to download the plugin from (Only works for ES 1.x). + - For ES 2.x and higher, use src. + required: false + type: str + timeout: + description: + - 'Timeout setting: V(30s), V(1m), V(1h)...' + - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0. + default: 1m + type: str + force: + description: + - Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console + detection fails. + default: false + type: bool + plugin_bin: + description: + - Location of the plugin binary. If this file is not found, the default plugin binaries are used. + type: path + plugin_dir: + description: + - Your configured plugin directory specified in Elasticsearch. + default: /usr/share/elasticsearch/plugins/ + type: path + proxy_host: + description: + - Proxy host to use during plugin installation. + type: str + proxy_port: + description: + - Proxy port to use during plugin installation. + type: str + version: + description: + - Version of the plugin to be installed. If plugin exists with previous version, it is NOT updated. + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install Elasticsearch Head plugin in Elasticsearch 2.x community.general.elasticsearch_plugin: name: mobz/elasticsearch-head @@ -108,8 +110,8 @@ EXAMPLES = ''' community.general.elasticsearch_plugin: name: ingest-geoip state: present - force: yes -''' + force: true +""" import os @@ -159,33 +161,38 @@ def parse_error(string): def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force): - cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]] + cmd = [plugin_bin, PACKAGE_STATE_MAP["present"]] is_old_command = (os.path.basename(plugin_bin) == 'plugin') # Timeout and version are only valid for plugin, not elasticsearch-plugin if is_old_command: if timeout: - cmd_args.append("--timeout %s" % timeout) + cmd.append("--timeout") + cmd.append(timeout) if version: plugin_name = plugin_name + '/' + version - cmd_args[2] = plugin_name + cmd[2] = plugin_name if proxy_host and proxy_port: - cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port)) + java_opts = ["-Dhttp.proxyHost=%s" % proxy_host, + "-Dhttp.proxyPort=%s" % proxy_port, + "-Dhttps.proxyHost=%s" % proxy_host, + "-Dhttps.proxyPort=%s" % proxy_port] + module.run_command_environ_update = dict(CLI_JAVA_OPTS=" ".join(java_opts), # Elasticsearch 8.x + ES_JAVA_OPTS=" ".join(java_opts)) # Older Elasticsearch versions # Legacy ES 1.x if url: - cmd_args.append("--url %s" % url) + cmd.append("--url") + cmd.append(url) if force: - cmd_args.append("--batch") + cmd.append("--batch") if src: - cmd_args.append(src) + cmd.append(src) else: - cmd_args.append(plugin_name) - - cmd = " ".join(cmd_args) + cmd.append(plugin_name) if module.check_mode: rc, out, err = 0, "check mode", "" @@ -200,9 +207,7 @@ def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_hos def remove_plugin(module, plugin_bin, plugin_name): - cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)] - - cmd = " ".join(cmd_args) + cmd = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)] if module.check_mode: rc, out, err = 0, "check mode", "" @@ -231,8 +236,8 @@ def get_plugin_bin(module, plugin_bin=None): # Get separate lists of dirs and binary names from the full paths to the # plugin binaries. - plugin_dirs = list(set([os.path.dirname(x) for x in bin_paths])) - plugin_bins = list(set([os.path.basename(x) for x in bin_paths])) + plugin_dirs = list(set(os.path.dirname(x) for x in bin_paths)) + plugin_bins = list(set(os.path.basename(x) for x in bin_paths)) # Check for the binary names in the default system paths as well as the path # specified in the module arguments. @@ -252,15 +257,15 @@ def main(): argument_spec=dict( name=dict(required=True), state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), - src=dict(default=None), - url=dict(default=None), + src=dict(), + url=dict(), timeout=dict(default="1m"), force=dict(type='bool', default=False), plugin_bin=dict(type="path"), plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"), - proxy_host=dict(default=None), - proxy_port=dict(default=None), - version=dict(default=None) + proxy_host=dict(), + proxy_port=dict(), + version=dict() ), mutually_exclusive=[("src", "url")], supports_check_mode=True diff --git a/plugins/modules/storage/emc/emc_vnx_sg_member.py b/plugins/modules/emc_vnx_sg_member.py similarity index 78% rename from plugins/modules/storage/emc/emc_vnx_sg_member.py rename to plugins/modules/emc_vnx_sg_member.py index 20977687fc..fce2c59c32 100644 --- a/plugins/modules/storage/emc/emc_vnx_sg_member.py +++ b/plugins/modules/emc_vnx_sg_member.py @@ -1,56 +1,57 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2018, Luca 'remix_tj' Lorenzetto # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type - - -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: emc_vnx_sg_member short_description: Manage storage group member on EMC VNX description: - - "This module manages the members of an existing storage group." - + - This module manages the members of an existing storage group. extends_documentation_fragment: -- community.general.emc.emc_vnx + - community.general.emc.emc_vnx + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the Storage group to manage. - required: true - type: str - lunid: - description: - - Lun id to be added. - required: true - type: int - state: - description: - - Indicates the desired lunid state. - - C(present) ensures specified lunid is present in the Storage Group. - - C(absent) ensures specified lunid is absent from Storage Group. - default: present - choices: [ "present", "absent"] - type: str + name: + description: + - Name of the Storage group to manage. + required: true + type: str + lunid: + description: + - LUN ID to be added. + required: true + type: int + state: + description: + - Indicates the desired lunid state. + - V(present) ensures specified O(lunid) is present in the Storage Group. + - V(absent) ensures specified O(lunid) is absent from Storage Group. + default: present + choices: ["present", "absent"] + type: str author: - - Luca 'remix_tj' Lorenzetto (@remixtj) -''' + - Luca 'remix_tj' Lorenzetto (@remixtj) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add lun to storage group community.general.emc_vnx_sg_member: name: sg01 @@ -68,14 +69,14 @@ EXAMPLES = ''' sp_password: sysadmin lunid: 100 state: absent -''' +""" -RETURN = ''' +RETURN = r""" hluid: - description: LUNID that hosts attached to the storage group will see. - type: int - returned: success -''' + description: LUNID visible to hosts attached to the storage group. + type: int + returned: success +""" import traceback diff --git a/plugins/modules/clustering/etcd3.py b/plugins/modules/etcd3.py similarity index 68% rename from plugins/modules/clustering/etcd3.py rename to plugins/modules/etcd3.py index 6a09513364..397bb1d767 100644 --- a/plugins/modules/clustering/etcd3.py +++ b/plugins/modules/etcd3.py @@ -1,84 +1,89 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# (c) 2018, Jean-Philippe Evrard -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Jean-Philippe Evrard +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: etcd3 -short_description: "Set or delete key value pairs from an etcd3 cluster" +short_description: Set or delete key value pairs from an etcd3 cluster requirements: - etcd3 description: - - Sets or deletes values in etcd3 cluster using its v3 api. - - Needs python etcd3 lib to work + - Sets or deletes values in etcd3 cluster using its v3 API. + - Needs python etcd3 lib to work. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - key: - type: str - description: - - the key where the information is stored in the cluster - required: true - value: - type: str - description: - - the information stored - required: true - host: - type: str - description: - - the IP address of the cluster - default: 'localhost' - port: - type: int - description: - - the port number used to connect to the cluster - default: 2379 - state: - type: str - description: - - the state of the value for the key. - - can be present or absent - required: true - choices: [ present, absent ] - user: - type: str - description: - - The etcd user to authenticate with. - password: - type: str - description: - - The password to use for authentication. - - Required if I(user) is defined. - ca_cert: - type: path - description: - - The Certificate Authority to use to verify the etcd host. - - Required if I(client_cert) and I(client_key) are defined. - client_cert: - type: path - description: - - PEM formatted certificate chain file to be used for SSL client authentication. - - Required if I(client_key) is defined. - client_key: - type: path - description: - - PEM formatted file that contains your private key to be used for SSL client authentication. - - Required if I(client_cert) is defined. - timeout: - type: int - description: - - The socket level timeout in seconds. + key: + type: str + description: + - The key where the information is stored in the cluster. + required: true + value: + type: str + description: + - The information stored. + required: true + host: + type: str + description: + - The IP address of the cluster. + default: 'localhost' + port: + type: int + description: + - The port number used to connect to the cluster. + default: 2379 + state: + type: str + description: + - The state of the value for the key. + - Can be present or absent. + required: true + choices: [present, absent] + user: + type: str + description: + - The etcd user to authenticate with. + password: + type: str + description: + - The password to use for authentication. + - Required if O(user) is defined. + ca_cert: + type: path + description: + - The Certificate Authority to use to verify the etcd host. + - Required if O(client_cert) and O(client_key) are defined. + client_cert: + type: path + description: + - PEM formatted certificate chain file to be used for SSL client authentication. + - Required if O(client_key) is defined. + client_key: + type: path + description: + - PEM formatted file that contains your private key to be used for SSL client authentication. + - Required if O(client_cert) is defined. + timeout: + type: int + description: + - The socket level timeout in seconds. author: - - Jean-Philippe Evrard (@evrardjp) - - Victor Fauth (@vfauth) -''' + - Jean-Philippe Evrard (@evrardjp) + - Victor Fauth (@vfauth) +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379" community.general.etcd3: key: "foo" @@ -106,16 +111,16 @@ EXAMPLES = """ client_key: "/etc/ssl/private/key.pem" """ -RETURN = ''' +RETURN = r""" key: - description: The key that was queried - returned: always - type: str + description: The key that was queried. + returned: always + type: str old_value: - description: The previous value in the cluster - returned: always - type: str -''' + description: The previous value in the cluster. + returned: always + type: str +""" import traceback @@ -126,6 +131,7 @@ from ansible.module_utils.common.text.converters import to_native try: import etcd3 HAS_ETCD = True + ETCD_IMP_ERR = None except ImportError: ETCD_IMP_ERR = traceback.format_exc() HAS_ETCD = False @@ -184,13 +190,8 @@ def run_module(): allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key', 'timeout', 'user', 'password'] - # TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is - # the minimum supported version - # client_params = {key: value for key, value in module.params.items() if key in allowed_keys} - client_params = dict() - for key, value in module.params.items(): - if key in allowed_keys: - client_params[key] = value + + client_params = {key: value for key, value in module.params.items() if key in allowed_keys} try: etcd = etcd3.client(**client_params) except Exception as exp: diff --git a/plugins/modules/facter_facts.py b/plugins/modules/facter_facts.py new file mode 100644 index 0000000000..8ef5d7776b --- /dev/null +++ b/plugins/modules/facter_facts.py @@ -0,0 +1,86 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Alexei Znamensky +# Copyright (c) 2012, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: facter_facts +short_description: Runs the discovery program C(facter) on the remote system and return Ansible facts +version_added: 8.0.0 +description: + - Runs the C(facter) discovery program (U(https://github.com/puppetlabs/facter)) on the remote system, returning Ansible + facts from the JSON data that can be useful for inventory purposes. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +options: + arguments: + description: + - Specifies arguments for facter. + type: list + elements: str +requirements: + - facter + - ruby-json +author: + - Ansible Core Team + - Michael DeHaan +""" + +EXAMPLES = r""" +- name: Execute facter no arguments + community.general.facter_facts: + +- name: Execute facter with arguments + community.general.facter_facts: + arguments: + - -p + - system_uptime + - timezone + - is_virtual +""" + +RETURN = r""" +ansible_facts: + description: Dictionary with one key C(facter). + returned: always + type: dict + contains: + facter: + description: Dictionary containing facts discovered in the remote system. + returned: always + type: dict +""" + +import json + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + arguments=dict(type='list', elements='str'), + ), + supports_check_mode=True, + ) + + facter_path = module.get_bin_path( + 'facter', + opt_dirs=['/opt/puppetlabs/bin']) + + cmd = [facter_path, "--json"] + if module.params['arguments']: + cmd += module.params['arguments'] + + rc, out, err = module.run_command(cmd, check_rc=True) + module.exit_json(ansible_facts=dict(facter=json.loads(out))) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/files/ini_file.py b/plugins/modules/files/ini_file.py deleted file mode 100644 index 79d373f3a7..0000000000 --- a/plugins/modules/files/ini_file.py +++ /dev/null @@ -1,483 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2012, Jan-Piet Mens -# Copyright: (c) 2015, Ales Nosek -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: ini_file -short_description: Tweak settings in INI files -extends_documentation_fragment: files -description: - - Manage (add, remove, change) individual settings in an INI-style file without having - to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). - - Adds missing sections if they don't exist. - - Before Ansible 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file. - - Since Ansible 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when - no other modifications need to be applied. -options: - path: - description: - - Path to the INI-style file; this file is created if required. - - Before Ansible 2.3 this option was only usable as I(dest). - type: path - required: true - aliases: [ dest ] - section: - description: - - Section name in INI file. This is added if C(state=present) automatically when - a single value is being set. - - If left empty or set to C(null), the I(option) will be placed before the first I(section). - - Using C(null) is also required if the config format does not support sections. - type: str - required: true - option: - description: - - If set (required for changing a I(value)), this is the name of the option. - - May be omitted if adding/removing a whole I(section). - type: str - value: - description: - - The string value to be associated with an I(option). - - May be omitted when removing an I(option). - - Mutually exclusive with I(values). - - I(value=v) is equivalent to I(values=[v]). - type: str - values: - description: - - The string value to be associated with an I(option). - - May be omitted when removing an I(option). - - Mutually exclusive with I(value). - - I(value=v) is equivalent to I(values=[v]). - type: list - elements: str - version_added: 3.6.0 - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - type: bool - default: no - state: - description: - - If set to C(absent) and I(exclusive) set to C(yes) all matching I(option) lines are removed. - - If set to C(absent) and I(exclusive) set to C(no) the specified C(option=value) lines are removed, - but the other I(option)s with the same name are not touched. - - If set to C(present) and I(exclusive) set to C(no) the specified C(option=values) lines are added, - but the other I(option)s with the same name are not touched. - - If set to C(present) and I(exclusive) set to C(yes) all given C(option=values) lines will be - added and the other I(option)s with the same name are removed. - type: str - choices: [ absent, present ] - default: present - exclusive: - description: - - If set to C(yes) (default), all matching I(option) lines are removed when I(state=absent), - or replaced when I(state=present). - - If set to C(no), only the specified I(value(s)) are added when I(state=present), - or removed when I(state=absent), and existing ones are not modified. - type: bool - default: yes - version_added: 3.6.0 - no_extra_spaces: - description: - - Do not insert spaces before and after '=' symbol. - type: bool - default: no - create: - description: - - If set to C(no), the module will fail if the file does not already exist. - - By default it will create the file if it is missing. - type: bool - default: yes - allow_no_value: - description: - - Allow option without value and without '=' symbol. - type: bool - default: no -notes: - - While it is possible to add an I(option) without specifying a I(value), this makes no sense. - - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well. - - As of community.general 3.2.0, UTF-8 BOM markers are discarded when reading files. -author: - - Jan-Piet Mens (@jpmens) - - Ales Nosek (@noseka1) -''' - -EXAMPLES = r''' -# Before Ansible 2.3, option 'dest' was used instead of 'path' -- name: Ensure "fav=lemonade is in section "[drinks]" in specified file - community.general.ini_file: - path: /etc/conf - section: drinks - option: fav - value: lemonade - mode: '0600' - backup: yes - -- name: Ensure "temperature=cold is in section "[drinks]" in specified file - community.general.ini_file: - path: /etc/anotherconf - section: drinks - option: temperature - value: cold - backup: yes - -- name: Add "beverage=lemon juice" is in section "[drinks]" in specified file - community.general.ini_file: - path: /etc/conf - section: drinks - option: beverage - value: lemon juice - mode: '0600' - state: present - exclusive: no - -- name: Ensure multiple values "beverage=coke" and "beverage=pepsi" are in section "[drinks]" in specified file - community.general.ini_file: - path: /etc/conf - section: drinks - option: beverage - values: - - coke - - pepsi - mode: '0600' - state: present -''' - -import io -import os -import re -import tempfile -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes, to_text - - -def match_opt(option, line): - option = re.escape(option) - return re.match('[#;]?( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) - - -def match_active_opt(option, line): - option = re.escape(option) - return re.match('( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) - - -def update_section_line(changed, section_lines, index, changed_lines, newline, msg): - option_changed = section_lines[index] != newline - changed = changed or option_changed - if option_changed: - msg = 'option changed' - section_lines[index] = newline - changed_lines[index] = 1 - return (changed, msg) - - -def do_ini(module, filename, section=None, option=None, values=None, - state='present', exclusive=True, backup=False, no_extra_spaces=False, - create=True, allow_no_value=False): - - if section is not None: - section = to_text(section) - if option is not None: - option = to_text(option) - - # deduplicate entries in values - values_unique = [] - [values_unique.append(to_text(value)) for value in values if value not in values_unique and value is not None] - values = values_unique - - diff = dict( - before='', - after='', - before_header='%s (content)' % filename, - after_header='%s (content)' % filename, - ) - - if not os.path.exists(filename): - if not create: - module.fail_json(rc=257, msg='Destination %s does not exist!' % filename) - destpath = os.path.dirname(filename) - if not os.path.exists(destpath) and not module.check_mode: - os.makedirs(destpath) - ini_lines = [] - else: - with io.open(filename, 'r', encoding="utf-8-sig") as ini_file: - ini_lines = [to_text(line) for line in ini_file.readlines()] - - if module._diff: - diff['before'] = u''.join(ini_lines) - - changed = False - - # ini file could be empty - if not ini_lines: - ini_lines.append(u'\n') - - # last line of file may not contain a trailing newline - if ini_lines[-1] == u"" or ini_lines[-1][-1] != u'\n': - ini_lines[-1] += u'\n' - changed = True - - # append fake section lines to simplify the logic - # At top: - # Fake random section to do not match any other in the file - # Using commit hash as fake section name - fake_section_name = u"ad01e11446efb704fcdbdb21f2c43757423d91c5" - - # Insert it at the beginning - ini_lines.insert(0, u'[%s]' % fake_section_name) - - # At bottom: - ini_lines.append(u'[') - - # If no section is defined, fake section is used - if not section: - section = fake_section_name - - within_section = not section - section_start = section_end = 0 - msg = 'OK' - if no_extra_spaces: - assignment_format = u'%s=%s\n' - else: - assignment_format = u'%s = %s\n' - - option_no_value_present = False - - non_blank_non_comment_pattern = re.compile(to_text(r'^[ \t]*([#;].*)?$')) - - before = after = [] - section_lines = [] - - for index, line in enumerate(ini_lines): - # find start and end of section - if line.startswith(u'[%s]' % section): - within_section = True - section_start = index - elif line.startswith(u'['): - if within_section: - section_end = index - break - - before = ini_lines[0:section_start] - section_lines = ini_lines[section_start:section_end] - after = ini_lines[section_end:len(ini_lines)] - - # Keep track of changed section_lines - changed_lines = [0] * len(section_lines) - - # handling multiple instances of option=value when state is 'present' with/without exclusive is a bit complex - # - # 1. edit all lines where we have a option=value pair with a matching value in values[] - # 2. edit all the remaing lines where we have a matching option - # 3. delete remaining lines where we have a matching option - # 4. insert missing option line(s) at the end of the section - - if state == 'present' and option: - for index, line in enumerate(section_lines): - if match_opt(option, line): - match = match_opt(option, line) - if values and match.group(6) in values: - matched_value = match.group(6) - if not matched_value and allow_no_value: - # replace existing option with no value line(s) - newline = u'%s\n' % option - option_no_value_present = True - else: - # replace existing option=value line(s) - newline = assignment_format % (option, matched_value) - (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) - values.remove(matched_value) - elif not values and allow_no_value: - # replace existing option with no value line(s) - newline = u'%s\n' % option - (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) - option_no_value_present = True - break - - if state == 'present' and exclusive and not allow_no_value: - # override option with no value to option with value if not allow_no_value - if len(values) > 0: - for index, line in enumerate(section_lines): - if not changed_lines[index] and match_active_opt(option, section_lines[index]): - newline = assignment_format % (option, values.pop(0)) - (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) - if len(values) == 0: - break - # remove all remaining option occurrences from the rest of the section - for index in range(len(section_lines) - 1, 0, -1): - if not changed_lines[index] and match_active_opt(option, section_lines[index]): - del section_lines[index] - del changed_lines[index] - changed = True - msg = 'option changed' - - if state == 'present': - # insert missing option line(s) at the end of the section - for index in range(len(section_lines), 0, -1): - # search backwards for previous non-blank or non-comment line - if not non_blank_non_comment_pattern.match(section_lines[index - 1]): - if option and values: - # insert option line(s) - for element in values[::-1]: - # items are added backwards, so traverse the list backwards to not confuse the user - # otherwise some of their options might appear in reverse order for whatever fancy reason ¯\_(ツ)_/¯ - if element is not None: - # insert option=value line - section_lines.insert(index, assignment_format % (option, element)) - msg = 'option added' - changed = True - elif element is None and allow_no_value: - # insert option with no value line - section_lines.insert(index, u'%s\n' % option) - msg = 'option added' - changed = True - elif option and not values and allow_no_value and not option_no_value_present: - # insert option with no value line(s) - section_lines.insert(index, u'%s\n' % option) - msg = 'option added' - changed = True - break - - if state == 'absent': - if option: - if exclusive: - # delete all option line(s) with given option and ignore value - new_section_lines = [line for line in section_lines if not (match_active_opt(option, line))] - if section_lines != new_section_lines: - changed = True - msg = 'option changed' - section_lines = new_section_lines - elif not exclusive and len(values) > 0: - # delete specified option=value line(s) - new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(6) in values)] - if section_lines != new_section_lines: - changed = True - msg = 'option changed' - section_lines = new_section_lines - else: - # drop the entire section - if section_lines: - section_lines = [] - msg = 'section removed' - changed = True - - # reassemble the ini_lines after manipulation - ini_lines = before + section_lines + after - - # remove the fake section line - del ini_lines[0] - del ini_lines[-1:] - - if not within_section and state == 'present': - ini_lines.append(u'[%s]\n' % section) - msg = 'section and option added' - if option and values: - for value in values: - ini_lines.append(assignment_format % (option, value)) - elif option and not values and allow_no_value: - ini_lines.append(u'%s\n' % option) - else: - msg = 'only section added' - changed = True - - if module._diff: - diff['after'] = u''.join(ini_lines) - - backup_file = None - if changed and not module.check_mode: - if backup: - backup_file = module.backup_local(filename) - - encoded_ini_lines = [to_bytes(line) for line in ini_lines] - try: - tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir) - f = os.fdopen(tmpfd, 'wb') - f.writelines(encoded_ini_lines) - f.close() - except IOError: - module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc()) - - try: - module.atomic_move(tmpfile, filename) - except IOError: - module.ansible.fail_json(msg='Unable to move temporary \ - file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc()) - - return (changed, backup_file, diff, msg) - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - path=dict(type='path', required=True, aliases=['dest']), - section=dict(type='str', required=True), - option=dict(type='str'), - value=dict(type='str'), - values=dict(type='list', elements='str'), - backup=dict(type='bool', default=False), - state=dict(type='str', default='present', choices=['absent', 'present']), - exclusive=dict(type='bool', default=True), - no_extra_spaces=dict(type='bool', default=False), - allow_no_value=dict(type='bool', default=False), - create=dict(type='bool', default=True) - ), - mutually_exclusive=[ - ['value', 'values'] - ], - add_file_common_args=True, - supports_check_mode=True, - ) - - path = module.params['path'] - section = module.params['section'] - option = module.params['option'] - value = module.params['value'] - values = module.params['values'] - state = module.params['state'] - exclusive = module.params['exclusive'] - backup = module.params['backup'] - no_extra_spaces = module.params['no_extra_spaces'] - allow_no_value = module.params['allow_no_value'] - create = module.params['create'] - - if state == 'present' and not allow_no_value and value is None and not values: - module.fail_json(msg="Parameter 'value(s)' must be defined if state=present and allow_no_value=False.") - - if value is not None: - values = [value] - elif values is None: - values = [] - - (changed, backup_file, diff, msg) = do_ini(module, path, section, option, values, state, exclusive, backup, no_extra_spaces, create, allow_no_value) - - if not module.check_mode and os.path.exists(path): - file_args = module.load_file_common_arguments(module.params) - changed = module.set_fs_attributes_if_different(file_args, changed) - - results = dict( - changed=changed, - diff=diff, - msg=msg, - path=path, - ) - if backup_file is not None: - results['backup_file'] = backup_file - - # Mission complete - module.exit_json(**results) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/files/sapcar_extract.py b/plugins/modules/files/sapcar_extract.py deleted file mode 100644 index 8463703c1e..0000000000 --- a/plugins/modules/files/sapcar_extract.py +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Rainer Leber -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: sapcar_extract -short_description: Manages SAP SAPCAR archives -version_added: "3.2.0" -description: - - Provides support for unpacking C(sar)/C(car) files with the SAPCAR binary from SAP and pulling - information back into Ansible. -options: - path: - description: The path to the SAR/CAR file. - type: path - required: true - dest: - description: - - The destination where SAPCAR extracts the SAR file. Missing folders will be created. - If this parameter is not provided it will unpack in the same folder as the SAR file. - type: path - binary_path: - description: - - The path to the SAPCAR binary, for example, C(/home/dummy/sapcar) or C(https://myserver/SAPCAR). - If this parameter is not provided the module will look in C(PATH). - type: path - signature: - description: - - If C(true) the signature will be extracted. - default: false - type: bool - security_library: - description: - - The path to the security library, for example, C(/usr/sap/hostctrl/exe/libsapcrytp.so), for signature operations. - type: path - manifest: - description: - - The name of the manifest. - default: "SIGNATURE.SMF" - type: str - remove: - description: - - If C(true) the SAR/CAR file will be removed. B(This should be used with caution!) - default: false - type: bool -author: - - Rainer Leber (@RainerLeber) -notes: - - Always returns C(changed=true) in C(check_mode). -''' - -EXAMPLES = """ -- name: Extract SAR file - community.general.sapcar_extract: - path: "~/source/hana.sar" - -- name: Extract SAR file with destination - community.general.sapcar_extract: - path: "~/source/hana.sar" - dest: "~/test/" - -- name: Extract SAR file with destination and download from webserver can be a fileshare as well - community.general.sapcar_extract: - path: "~/source/hana.sar" - dest: "~/dest/" - binary_path: "https://myserver/SAPCAR" - -- name: Extract SAR file and delete SAR after extract - community.general.sapcar_extract: - path: "~/source/hana.sar" - remove: true - -- name: Extract SAR file with manifest - community.general.sapcar_extract: - path: "~/source/hana.sar" - signature: true - -- name: Extract SAR file with manifest and rename it - community.general.sapcar_extract: - path: "~/source/hana.sar" - manifest: "MyNewSignature.SMF" - signature: true -""" - -import os -from tempfile import NamedTemporaryFile -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import open_url -from ansible.module_utils.common.text.converters import to_native - - -def get_list_of_files(dir_name): - # create a list of file and directories - # names in the given directory - list_of_file = os.listdir(dir_name) - allFiles = list() - # Iterate over all the entries - for entry in list_of_file: - # Create full path - fullPath = os.path.join(dir_name, entry) - # If entry is a directory then get the list of files in this directory - if os.path.isdir(fullPath): - allFiles = allFiles + [fullPath] - allFiles = allFiles + get_list_of_files(fullPath) - else: - allFiles.append(fullPath) - return allFiles - - -def download_SAPCAR(binary_path, module): - bin_path = None - # download sapcar binary if url is provided otherwise path is returned - if binary_path is not None: - if binary_path.startswith('https://') or binary_path.startswith('http://'): - random_file = NamedTemporaryFile(delete=False) - with open_url(binary_path) as response: - with random_file as out_file: - data = response.read() - out_file.write(data) - os.chmod(out_file.name, 0o700) - bin_path = out_file.name - module.add_cleanup_file(bin_path) - else: - bin_path = binary_path - return bin_path - - -def check_if_present(command, path, dest, signature, manifest, module): - # manipuliating output from SAR file for compare with already extracted files - iter_command = [command, '-tvf', path] - sar_out = module.run_command(iter_command)[1] - sar_raw = sar_out.split("\n")[1:] - if dest[-1] != "/": - dest = dest + "/" - sar_files = [dest + x.split(" ")[-1] for x in sar_raw if x] - # remove any SIGNATURE.SMF from list because it will not unpacked if signature is false - if not signature: - sar_files = [item for item in sar_files if '.SMF' not in item] - # if signature is renamed manipulate files in list of sar file for compare. - if manifest != "SIGNATURE.SMF": - sar_files = [item for item in sar_files if '.SMF' not in item] - sar_files = sar_files + [manifest] - # get extracted files if present - files_extracted = get_list_of_files(dest) - # compare extracted files with files in sar file - present = all(elem in files_extracted for elem in sar_files) - return present - - -def main(): - module = AnsibleModule( - argument_spec=dict( - path=dict(type='path', required=True), - dest=dict(type='path'), - binary_path=dict(type='path'), - signature=dict(type='bool', default=False), - security_library=dict(type='path'), - manifest=dict(type='str', default="SIGNATURE.SMF"), - remove=dict(type='bool', default=False), - ), - supports_check_mode=True, - ) - rc, out, err = [0, "", ""] - params = module.params - check_mode = module.check_mode - - path = params['path'] - dest = params['dest'] - signature = params['signature'] - security_library = params['security_library'] - manifest = params['manifest'] - remove = params['remove'] - - bin_path = download_SAPCAR(params['binary_path'], module) - - if dest is None: - dest_head_tail = os.path.split(path) - dest = dest_head_tail[0] + '/' - else: - if not os.path.exists(dest): - os.makedirs(dest, 0o755) - - if bin_path is not None: - command = [module.get_bin_path(bin_path, required=True)] - else: - try: - command = [module.get_bin_path('sapcar', required=True)] - except Exception as e: - module.fail_json(msg='Failed to find SAPCAR at the expected path or URL "{0}". Please check whether it is available: {1}' - .format(bin_path, to_native(e))) - - present = check_if_present(command[0], path, dest, signature, manifest, module) - - if not present: - command.extend(['-xvf', path, '-R', dest]) - if security_library: - command.extend(['-L', security_library]) - if signature: - command.extend(['-manifest', manifest]) - if not check_mode: - (rc, out, err) = module.run_command(command, check_rc=True) - changed = True - else: - changed = False - out = "allready unpacked" - - if remove: - os.remove(path) - - module.exit_json(changed=changed, message=rc, stdout=out, - stderr=err, command=' '.join(command)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/files/filesize.py b/plugins/modules/filesize.py similarity index 85% rename from plugins/modules/files/filesize.py rename to plugins/modules/filesize.py index 83edbe58ae..b0ef189143 100644 --- a/plugins/modules/files/filesize.py +++ b/plugins/modules/filesize.py @@ -1,29 +1,31 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, quidame -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, quidame +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: filesize short_description: Create a file with a given size, or resize it if it exists description: - - This module is a simple wrapper around C(dd) to create, extend or truncate - a file, given its size. It can be used to manage swap files (that require - contiguous blocks) or alternatively, huge sparse files. - + - This module is a simple wrapper around C(dd) to create, extend or truncate a file, given its size. It can be used to manage + swap files (that require contiguous blocks) or alternatively, huge sparse files. author: - quidame (@quidame) version_added: "3.0.0" +attributes: + check_mode: + support: full + diff_mode: + support: full + options: path: description: @@ -33,73 +35,58 @@ options: size: description: - Requested size of the file. - - The value is a number (either C(int) or C(float)) optionally followed - by a multiplicative suffix, that can be one of C(B) (bytes), C(KB) or - C(kB) (= 1000B), C(MB) or C(mB) (= 1000kB), C(GB) or C(gB) (= 1000MB), - and so on for C(T), C(P), C(E), C(Z) and C(Y); or alternatively one of - C(K), C(k) or C(KiB) (= 1024B); C(M), C(m) or C(MiB) (= 1024KiB); - C(G), C(g) or C(GiB) (= 1024MiB); and so on. - - If the multiplicative suffix is not provided, the value is treated as - an integer number of blocks of I(blocksize) bytes each (float values - are rounded to the closest integer). - - When the I(size) value is equal to the current file size, does nothing. - - When the I(size) value is bigger than the current file size, bytes from - I(source) (if I(sparse) is not C(false)) are appended to the file - without truncating it, in other words, without modifying the existing - bytes of the file. - - When the I(size) value is smaller than the current file size, it is - truncated to the requested value without modifying bytes before this - value. - - That means that a file of any arbitrary size can be grown to any other - arbitrary size, and then resized down to its initial size without - modifying its initial content. + - The value is a number (either C(int) or C(float)) optionally followed by a multiplicative suffix, that can be one + of V(B) (bytes), V(KB) or V(kB) (= 1000B), V(MB) or V(mB) (= 1000kB), V(GB) or V(gB) (= 1000MB), and so on for V(T), + V(P), V(E), V(Z) and V(Y); or alternatively one of V(K), V(k) or V(KiB) (= 1024B); V(M), V(m) or V(MiB) (= 1024KiB); + V(G), V(g) or V(GiB) (= 1024MiB); and so on. + - If the multiplicative suffix is not provided, the value is treated as an integer number of blocks of O(blocksize) + bytes each (float values are rounded to the closest integer). + - When the O(size) value is equal to the current file size, does nothing. + - When the O(size) value is bigger than the current file size, bytes from O(source) (if O(sparse) is not V(false)) are + appended to the file without truncating it, in other words, without modifying the existing bytes of the file. + - When the O(size) value is smaller than the current file size, it is truncated to the requested value without modifying + bytes before this value. + - That means that a file of any arbitrary size can be grown to any other arbitrary size, and then resized down to its + initial size without modifying its initial content. type: raw required: true blocksize: description: - Size of blocks, in bytes if not followed by a multiplicative suffix. - - The numeric value (before the unit) C(MUST) be an integer (or a C(float) - if it equals an integer). - - If not set, the size of blocks is guessed from the OS and commonly - results in C(512) or C(4096) bytes, that is used internally by the - module or when I(size) has no unit. + - The numeric value (before the unit) B(MUST) be an integer (or a C(float) if it equals an integer). + - If not set, the size of blocks is guessed from the OS and commonly results in V(512) or V(4096) bytes, that is used + internally by the module or when O(size) has no unit. type: raw source: description: - Device or file that provides input data to provision the file. - - This parameter is ignored when I(sparse=true). + - This parameter is ignored when O(sparse=true). type: path default: /dev/zero force: description: - - Whether or not to overwrite the file if it exists, in other words, to - truncate it from 0. When C(true), the module is not idempotent, that - means it always reports I(changed=true). - - I(force=true) and I(sparse=true) are mutually exclusive. + - Whether or not to overwrite the file if it exists, in other words, to truncate it from 0. When V(true), the module + is not idempotent, that means it always reports C(changed=true). + - O(force=true) and O(sparse=true) are mutually exclusive. type: bool default: false sparse: description: - Whether or not the file to create should be a sparse file. - - This option is effective only on newly created files, or when growing a - file, only for the bytes to append. + - This option is effective only on newly created files, or when growing a file, only for the bytes to append. - This option is not supported on OSes or filesystems not supporting sparse files. - - I(force=true) and I(sparse=true) are mutually exclusive. + - O(force=true) and O(sparse=true) are mutually exclusive. type: bool default: false unsafe_writes: description: - - This option is silently ignored. This module always modifies file - size in-place. - -notes: - - This module supports C(check_mode) and C(diff). - + - This option is silently ignored. This module always modifies file size in-place. requirements: - dd (Data Duplicator) in PATH extends_documentation_fragment: - ansible.builtin.files + - community.general.attributes seealso: - name: dd(1) manpage for Linux @@ -133,9 +120,9 @@ seealso: - name: busybox(1) manpage for Linux description: Manual page of the GNU/Linux's busybox, that provides its own dd implementation. link: https://www.unix.com/man-page/linux/1/busybox -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a file of 1G filled with null bytes community.general.filesize: path: /var/bigfile @@ -178,9 +165,9 @@ EXAMPLES = r''' mode: u=rw,go= owner: root group: root -''' +""" -RETURN = r''' +RETURN = r""" cmd: description: Command executed to create or resize the file. type: str @@ -201,7 +188,7 @@ filesize: type: int sample: 1024 bytes: - description: Size of the file, in bytes, as the product of C(blocks) and C(blocksize). + description: Size of the file, in bytes, as the product of RV(filesize.blocks) and RV(filesize.blocksize). type: int sample: 512000 iec: @@ -224,7 +211,7 @@ path: type: str sample: /var/swap0 returned: always -''' +""" import re @@ -315,7 +302,7 @@ def split_size_unit(string, isint=False): Support optional space(s) between the numeric value and the unit. """ unit = re.sub(r'(\d|\.)', r'', string).strip() - value = float(re.sub(r'%s' % unit, r'', string).strip()) + value = float(re.sub(unit, r'', string).strip()) if isint and unit in ('B', ''): if int(value) != value: raise AssertionError("invalid blocksize value: bytes require an integer value") diff --git a/plugins/modules/system/filesystem.py b/plugins/modules/filesystem.py similarity index 68% rename from plugins/modules/system/filesystem.py rename to plugins/modules/filesystem.py index 6b38c58183..1477925de3 100644 --- a/plugins/modules/system/filesystem.py +++ b/plugins/modules/filesystem.py @@ -1,16 +1,14 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, quidame -# Copyright: (c) 2013, Alexander Bulimov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, quidame +# Copyright (c) 2013, Alexander Bulimov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" author: - Alexander Bulimov (@abulimov) - quidame (@quidame) @@ -18,81 +16,98 @@ module: filesystem short_description: Makes a filesystem description: - This module creates a filesystem. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: description: - - If C(state=present), the filesystem is created if it doesn't already - exist, that is the default behaviour if I(state) is omitted. - - If C(state=absent), filesystem signatures on I(dev) are wiped if it - contains a filesystem (as known by C(blkid)). - - When C(state=absent), all other options but I(dev) are ignored, and the - module doesn't fail if the device I(dev) doesn't actually exist. + - If O(state=present), the filesystem is created if it does not already exist, that is the default behaviour if O(state) + is omitted. + - If O(state=absent), filesystem signatures on O(dev) are wiped if it contains a filesystem (as known by C(blkid)). + - When O(state=absent), all other options but O(dev) are ignored, and the module does not fail if the device O(dev) + does not actually exist. type: str - choices: [ present, absent ] + choices: [present, absent] default: present version_added: 1.3.0 fstype: - choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs ] + choices: [bcachefs, btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs] description: - - Filesystem type to be created. This option is required with - C(state=present) (or if I(state) is omitted). - - ufs support has been added in community.general 3.4.0. + - Filesystem type to be created. This option is required with O(state=present) (or if O(state) is omitted). + - V(ufs) support has been added in community.general 3.4.0. + - V(bcachefs) support has been added in community.general 8.6.0. type: str aliases: [type] dev: description: - - Target path to block device (Linux) or character device (FreeBSD) or - regular file (both). - - When setting Linux-specific filesystem types on FreeBSD, this module - only works when applying to regular files, aka disk images. - - Currently C(lvm) (Linux-only) and C(ufs) (FreeBSD-only) don't support - a regular file as their target I(dev). + - Target path to block device (Linux) or character device (FreeBSD) or regular file (both). + - When setting Linux-specific filesystem types on FreeBSD, this module only works when applying to regular files, also known as + disk images. + - Currently V(lvm) (Linux-only) and V(ufs) (FreeBSD-only) do not support a regular file as their target O(dev). - Support for character devices on FreeBSD has been added in community.general 3.4.0. type: path - required: yes + required: true aliases: [device] force: description: - - If C(yes), allows to create new filesystem on devices that already has filesystem. + - If V(true), allows to create new filesystem on devices that already has filesystem. type: bool - default: 'no' + default: false resizefs: description: - - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space. - - Supported for C(btrfs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat) filesystems. - Attempts to resize other filesystem types will fail. - - XFS Will only grow if mounted. Currently, the module is based on commands - from C(util-linux) package to perform operations, so resizing of XFS is - not supported on FreeBSD systems. - - vFAT will likely fail if fatresize < 1.04. + - If V(true), if the block device and filesystem size differ, grow the filesystem into the space. + - >- + Supported when O(fstype) is one of: V(bcachefs), V(btrfs), V(ext2), V(ext3), V(ext4), V(ext4dev), V(f2fs), V(lvm), V(xfs), V(ufs) and V(vfat). + Attempts to resize other filesystem types fail. + - XFS only grows if mounted. Currently, the module is based on commands from C(util-linux) package to perform operations, + so resizing of XFS is not supported on FreeBSD systems. + - VFAT is likely to fail if C(fatresize < 1.04). + - Mutually exclusive with O(uuid). type: bool - default: 'no' + default: false opts: description: - - List of options to be passed to mkfs command. + - List of options to be passed to C(mkfs) command. type: str + uuid: + description: + - Set filesystem's UUID to the given value. + - The UUID options specified in O(opts) take precedence over this value. + - See xfs_admin(8) (C(xfs)), tune2fs(8) (C(ext2), C(ext3), C(ext4), C(ext4dev)) for possible values. + - For O(fstype=lvm) the value is ignored, it resets the PV UUID if set. + - Supported for O(fstype) being one of V(bcachefs), V(ext2), V(ext3), V(ext4), V(ext4dev), V(lvm), or V(xfs). + - This is B(not idempotent). Specifying this option always results in a change. + - Mutually exclusive with O(resizefs). + type: str + version_added: 7.1.0 requirements: - - Uses specific tools related to the I(fstype) for creating or resizing a - filesystem (from packages e2fsprogs, xfsprogs, dosfstools, and so on). - - Uses generic tools mostly related to the Operating System (Linux or - FreeBSD) or available on both, as C(blkid). + - Uses specific tools related to the O(fstype) for creating or resizing a filesystem (from packages e2fsprogs, xfsprogs, + dosfstools, and so on). + - Uses generic tools mostly related to the Operating System (Linux or FreeBSD) or available on both, as C(blkid). - On FreeBSD, either C(util-linux) or C(e2fsprogs) package is required. notes: - - Potential filesystems on I(dev) are checked using C(blkid). In case C(blkid) - is unable to detect a filesystem (and in case C(fstyp) on FreeBSD is also - unable to detect a filesystem), this filesystem is overwritten even if - I(force) is C(no). - - On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide - a C(blkid) command that is compatible with this module. However, these - packages conflict with each other, and only the C(util-linux) package - provides the command required to not fail when I(state=absent). - - This module supports I(check_mode). + - Potential filesystems on O(dev) are checked using C(blkid). In case C(blkid) is unable to detect a filesystem (and in + case C(fstyp) on FreeBSD is also unable to detect a filesystem), this filesystem is overwritten even if O(force=false). + - On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide a C(blkid) command that is compatible with this + module. However, these packages conflict with each other, and only the C(util-linux) package provides the command required + to not fail when O(state=absent). seealso: - module: community.general.filesize - module: ansible.posix.mount -''' + - name: xfs_admin(8) manpage for Linux + description: Manual page of the GNU/Linux's xfs_admin implementation. + link: https://man7.org/linux/man-pages/man8/xfs_admin.8.html + - name: tune2fs(8) manpage for Linux + description: Manual page of the GNU/Linux's tune2fs implementation. + link: https://man7.org/linux/man-pages/man8/tune2fs.8.html +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a ext2 filesystem on /dev/sdb1 community.general.filesystem: fstype: ext2 @@ -113,7 +128,25 @@ EXAMPLES = ''' community.general.filesystem: dev: /path/to/disk.img fstype: vfat -''' + +- name: Reset an xfs filesystem UUID on /dev/sdb1 + community.general.filesystem: + fstype: xfs + dev: /dev/sdb1 + uuid: generate + +- name: Reset an ext4 filesystem UUID on /dev/sdb1 + community.general.filesystem: + fstype: ext4 + dev: /dev/sdb1 + uuid: random + +- name: Reset an LVM filesystem (PV) UUID on /dev/sdc + community.general.filesystem: + fstype: lvm + dev: /dev/sdc + uuid: random +""" import os import platform @@ -171,10 +204,15 @@ class Filesystem(object): MKFS = None MKFS_FORCE_FLAGS = [] + MKFS_SET_UUID_OPTIONS = None + MKFS_SET_UUID_EXTRA_OPTIONS = [] INFO = None GROW = None GROW_MAX_SPACE_FLAGS = [] GROW_MOUNTPOINT_ONLY = False + CHANGE_UUID = None + CHANGE_UUID_OPTION = None + CHANGE_UUID_OPTION_HAS_ARG = True LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'} @@ -193,13 +231,19 @@ class Filesystem(object): """ raise NotImplementedError() - def create(self, opts, dev): + def create(self, opts, dev, uuid=None): if self.module.check_mode: return + if uuid and self.MKFS_SET_UUID_OPTIONS: + if not (set(self.MKFS_SET_UUID_OPTIONS) & set(opts)): + opts += [self.MKFS_SET_UUID_OPTIONS[0], uuid] + self.MKFS_SET_UUID_EXTRA_OPTIONS + mkfs = self.module.get_bin_path(self.MKFS, required=True) cmd = [mkfs] + self.MKFS_FORCE_FLAGS + opts + [str(dev)] self.module.run_command(cmd, check_rc=True) + if uuid and self.CHANGE_UUID and self.MKFS_SET_UUID_OPTIONS is None: + self.change_uuid(new_uuid=uuid, dev=dev) def wipefs(self, dev): if self.module.check_mode: @@ -248,11 +292,31 @@ class Filesystem(object): dummy, out, dummy = self.module.run_command(self.grow_cmd(grow_target), check_rc=True) return out + def change_uuid_cmd(self, new_uuid, target): + """Build and return the UUID change command line as list.""" + cmdline = [self.module.get_bin_path(self.CHANGE_UUID, required=True)] + if self.CHANGE_UUID_OPTION_HAS_ARG: + cmdline += [self.CHANGE_UUID_OPTION, new_uuid, target] + else: + cmdline += [self.CHANGE_UUID_OPTION, target] + return cmdline + + def change_uuid(self, new_uuid, dev): + """Change filesystem UUID. Returns stdout of used command""" + if self.module.check_mode: + self.module.exit_json(change=True, msg='Changing %s filesystem UUID on device %s' % (self.fstype, dev)) + + dummy, out, dummy = self.module.run_command(self.change_uuid_cmd(new_uuid=new_uuid, target=str(dev)), check_rc=True) + return out + class Ext(Filesystem): MKFS_FORCE_FLAGS = ['-F'] + MKFS_SET_UUID_OPTIONS = ['-U'] INFO = 'tune2fs' GROW = 'resize2fs' + CHANGE_UUID = 'tune2fs' + CHANGE_UUID_OPTION = "-U" def get_fs_size(self, dev): """Get Block count and Block size and return their product.""" @@ -291,6 +355,8 @@ class XFS(Filesystem): INFO = 'xfs_info' GROW = 'xfs_growfs' GROW_MOUNTPOINT_ONLY = True + CHANGE_UUID = "xfs_admin" + CHANGE_UUID_OPTION = "-U" def get_fs_size(self, dev): """Get bsize and blocks and return their product.""" @@ -329,6 +395,48 @@ class Reiserfs(Filesystem): MKFS_FORCE_FLAGS = ['-q'] +class Bcachefs(Filesystem): + MKFS = 'mkfs.bcachefs' + MKFS_FORCE_FLAGS = ['--force'] + MKFS_SET_UUID_OPTIONS = ['-U', '--uuid'] + INFO = 'bcachefs' + GROW = 'bcachefs' + GROW_MAX_SPACE_FLAGS = ['device', 'resize'] + + def get_fs_size(self, dev): + """Return size in bytes of filesystem on device (integer).""" + dummy, stdout, dummy = self.module.run_command([self.module.get_bin_path(self.INFO), + 'show-super', str(dev)], check_rc=True) + + for line in stdout.splitlines(): + if "Size: " in line: + parts = line.split() + unit = parts[2] + + base = None + exp = None + + units_2 = ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"] + units_10 = ["B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"] + + try: + exp = units_2.index(unit) + base = 1024 + except ValueError: + exp = units_10.index(unit) + base = 1000 + + if exp == 0: + value = int(parts[1]) + else: + value = float(parts[1]) + + if base is not None and exp is not None: + return int(value * pow(base, exp)) + + raise ValueError(repr(stdout)) + + class Btrfs(Filesystem): MKFS = 'mkfs.btrfs' INFO = 'btrfs' @@ -444,8 +552,13 @@ class VFAT(Filesystem): class LVM(Filesystem): MKFS = 'pvcreate' MKFS_FORCE_FLAGS = ['-f'] + MKFS_SET_UUID_OPTIONS = ['-u', '--uuid'] + MKFS_SET_UUID_EXTRA_OPTIONS = ['--norestorefile'] INFO = 'pvs' GROW = 'pvresize' + CHANGE_UUID = 'pvchange' + CHANGE_UUID_OPTION = '-u' + CHANGE_UUID_OPTION_HAS_ARG = False def get_fs_size(self, dev): """Get and return PV size, in bytes.""" @@ -486,6 +599,7 @@ class UFS(Filesystem): FILESYSTEMS = { + 'bcachefs': Bcachefs, 'ext2': Ext2, 'ext3': Ext3, 'ext4': Ext4, @@ -518,10 +632,14 @@ def main(): opts=dict(type='str'), force=dict(type='bool', default=False), resizefs=dict(type='bool', default=False), + uuid=dict(type='str'), ), required_if=[ ('state', 'present', ['fstype']) ], + mutually_exclusive=[ + ('resizefs', 'uuid'), + ], supports_check_mode=True, ) @@ -531,6 +649,7 @@ def main(): opts = module.params['opts'] force = module.params['force'] resizefs = module.params['resizefs'] + uuid = module.params['uuid'] mkfs_opts = [] if opts is not None: @@ -569,21 +688,30 @@ def main(): filesystem = klass(module) + if uuid and not (filesystem.CHANGE_UUID or filesystem.MKFS_SET_UUID_OPTIONS): + module.fail_json(changed=False, msg="module does not support UUID option for this filesystem (%s) yet." % fstype) + same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype] - if same_fs and not resizefs and not force: + if same_fs and not resizefs and not uuid and not force: module.exit_json(changed=False) - elif same_fs and resizefs: - if not filesystem.GROW: - module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype) + elif same_fs: + if resizefs: + if not filesystem.GROW: + module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype) - out = filesystem.grow(dev) + out = filesystem.grow(dev) - module.exit_json(changed=True, msg=out) + module.exit_json(changed=True, msg=out) + elif uuid: + + out = filesystem.change_uuid(new_uuid=uuid, dev=dev) + + module.exit_json(changed=True, msg=out) elif fs and not force: - module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err) + module.fail_json(msg="'%s' is already used as %s, use force=true to overwrite" % (dev, fs), rc=rc, err=err) # create fs - filesystem.create(mkfs_opts, dev) + filesystem.create(opts=mkfs_opts, dev=dev, uuid=uuid) changed = True elif fs: diff --git a/plugins/modules/packaging/os/flatpak.py b/plugins/modules/flatpak.py similarity index 54% rename from plugins/modules/packaging/os/flatpak.py rename to plugins/modules/flatpak.py index da913b5ac0..3fab8f820b 100644 --- a/plugins/modules/packaging/os/flatpak.py +++ b/plugins/modules/flatpak.py @@ -1,88 +1,90 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) -# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) -# Copyright: (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017 John Kwiatkoski (@JayKayy) +# Copyright (c) 2018 Alexander Bethke (@oolongbrothers) +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: flatpak short_description: Manage flatpaks description: -- Allows users to add or remove flatpaks. -- See the M(community.general.flatpak_remote) module for managing flatpak remotes. + - Allows users to add or remove flatpaks. + - See the M(community.general.flatpak_remote) module for managing flatpak remotes. author: -- John Kwiatkoski (@JayKayy) -- Alexander Bethke (@oolongbrothers) + - John Kwiatkoski (@JayKayy) + - Alexander Bethke (@oolongbrothers) requirements: -- flatpak + - flatpak +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: partial + details: + - If O(state=latest), the module always returns RV(ignore:changed=true). + diff_mode: + support: none options: executable: description: - - The path to the C(flatpak) executable to use. - - By default, this module looks for the C(flatpak) executable on the path. + - The path to the C(flatpak) executable to use. + - By default, this module looks for the C(flatpak) executable on the path. type: path default: flatpak method: description: - - The installation method to use. - - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system) - or only for the current C(user). + - The installation method to use. + - Defines if the C(flatpak) is supposed to be installed globally for the whole V(system) or only for the current V(user). type: str - choices: [ system, user ] + choices: [system, user] default: system name: description: - - The name of the flatpak to manage. To operate on several packages this - can accept a list of packages. - - When used with I(state=present), I(name) can be specified as a URL to a - C(flatpakref) file or the unique reverse DNS name that identifies a flatpak. - - Both C(https://) and C(http://) URLs are supported. - - When supplying a reverse DNS name, you can use the I(remote) option to specify on what remote - to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit). - - When used with I(state=absent), it is recommended to specify the name in the reverse DNS - format. - - When supplying a URL with I(state=absent), the module will try to match the - installed flatpak based on the name of the flatpakref to remove it. However, there is no - guarantee that the names of the flatpakref file and the reverse DNS name of the installed - flatpak do match. + - The name of the flatpak to manage. To operate on several packages this can accept a list of packages. + - When used with O(state=present), O(name) can be specified as a URL to a C(flatpakref) file or the unique reverse DNS + name that identifies a flatpak. + - Both C(https://) and C(http://) URLs are supported. + - When supplying a reverse DNS name, you can use the O(remote) option to specify on what remote to look for the flatpak. + An example for a reverse DNS name is C(org.gnome.gedit). + - When used with O(state=absent) or O(state=latest), it is recommended to specify the name in the reverse DNS format. + - When supplying a URL with O(state=absent) or O(state=latest), the module tries to match the installed flatpak based + on the name of the flatpakref to remove or update it. However, there is no guarantee that the names of the flatpakref + file and the reverse DNS name of the installed flatpak do match. type: list elements: str required: true no_dependencies: description: - - If installing runtime dependencies should be omitted or not - - This parameter is primarily implemented for integration testing this module. - There might however be some use cases where you would want to have this, like when you are - packaging your own flatpaks. + - If installing runtime dependencies should be omitted or not. + - This parameter is primarily implemented for integration testing this module. There might however be some use cases + where you would want to have this, like when you are packaging your own flatpaks. type: bool default: false version_added: 3.2.0 remote: description: - - The flatpak remote (repository) to install the flatpak from. - - By default, C(flathub) is assumed, but you do need to add the flathub flatpak_remote before - you can use this. - - See the M(community.general.flatpak_remote) module for managing flatpak remotes. + - The flatpak remote (repository) to install the flatpak from. + - By default, V(flathub) is assumed, but you do need to add the flathub flatpak_remote before you can use this. + - See the M(community.general.flatpak_remote) module for managing flatpak remotes. type: str default: flathub state: description: - - Indicates the desired package state. - choices: [ absent, present ] + - Indicates the desired package state. + - The value V(latest) is supported since community.general 8.6.0. + choices: [absent, present, latest] type: str default: present -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Install the spotify flatpak community.general.flatpak: - name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref + name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref state: present - name: Install the gedit flatpak package without dependencies (not recommended) @@ -103,6 +105,12 @@ EXAMPLES = r''' state: present remote: gnome +- name: Install GIMP using custom flatpak binary path + community.general.flatpak: + name: org.gimp.GIMP + state: present + executable: /usr/local/bin/flatpak-dev + - name: Install multiple packages community.general.flatpak: name: @@ -110,6 +118,37 @@ EXAMPLES = r''' - org.inkscape.Inkscape - org.mozilla.firefox +- name: Update the spotify flatpak + community.general.flatpak: + name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref + state: latest + +- name: Update the gedit flatpak package without dependencies (not recommended) + community.general.flatpak: + name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref + state: latest + no_dependencies: true + +- name: Update the gedit package from flathub for current user + community.general.flatpak: + name: org.gnome.gedit + state: latest + method: user + +- name: Update the Gnome Calendar flatpak from the gnome remote system-wide + community.general.flatpak: + name: org.gnome.Calendar + state: latest + remote: gnome + +- name: Update multiple packages + community.general.flatpak: + name: + - org.gimp.GIMP + - org.inkscape.Inkscape + - org.mozilla.firefox + state: latest + - name: Remove the gedit flatpak community.general.flatpak: name: org.gnome.gedit @@ -122,37 +161,18 @@ EXAMPLES = r''' - org.inkscape.Inkscape - org.mozilla.firefox state: absent -''' +""" -RETURN = r''' +RETURN = r""" command: - description: The exact flatpak command that was executed + description: The exact flatpak command that was executed. returned: When a flatpak command has been executed type: str sample: "/usr/bin/flatpak install --user --nontinteractive flathub org.gnome.Calculator" -msg: - description: Module error message - returned: failure - type: str - sample: "Executable '/usr/local/bin/flatpak' was not found on the system." -rc: - description: Return code from flatpak binary - returned: When a flatpak command has been executed - type: int - sample: 0 -stderr: - description: Error output from flatpak binary - returned: When a flatpak command has been executed - type: str - sample: "error: Error searching remote flathub: Can't find ref org.gnome.KDE" -stdout: - description: Output from flatpak binary - returned: When a flatpak command has been executed - type: str - sample: "org.gnome.Calendar/x86_64/stable\tcurrent\norg.gnome.gitg/x86_64/stable\tcurrent\n" -''' +""" + +from urllib.parse import urlparse -from ansible.module_utils.six.moves.urllib.parse import urlparse from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.version import LooseVersion @@ -162,7 +182,7 @@ OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application" def install_flat(module, binary, remote, names, method, no_dependencies): """Add new flatpaks.""" - global result + global result # pylint: disable=global-variable-not-assigned uri_names = [] id_names = [] for name in names: @@ -187,9 +207,31 @@ def install_flat(module, binary, remote, names, method, no_dependencies): result['changed'] = True +def update_flat(module, binary, names, method, no_dependencies): + """Update existing flatpaks.""" + global result # pylint: disable=global-variable-not-assigned + installed_flat_names = [ + _match_installed_flat_name(module, binary, name, method) + for name in names + ] + command = [binary, "update", "--{0}".format(method)] + flatpak_version = _flatpak_version(module, binary) + if LooseVersion(flatpak_version) < LooseVersion('1.1.3'): + command += ["-y"] + else: + command += ["--noninteractive"] + if no_dependencies: + command += ["--no-deps"] + command += installed_flat_names + stdout = _flatpak_command(module, module.check_mode, command) + result["changed"] = ( + True if module.check_mode else stdout.find("Nothing to do.") == -1 + ) + + def uninstall_flat(module, binary, names, method): """Remove existing flatpaks.""" - global result + global result # pylint: disable=global-variable-not-assigned installed_flat_names = [ _match_installed_flat_name(module, binary, name, method) for name in names @@ -207,7 +249,7 @@ def uninstall_flat(module, binary, names, method): def flatpak_exists(module, binary, names, method): """Check if the flatpaks are installed.""" - command = [binary, "list", "--{0}".format(method), "--app"] + command = [binary, "list", "--{0}".format(method)] output = _flatpak_command(module, False, command) installed = [] not_installed = [] @@ -224,7 +266,7 @@ def _match_installed_flat_name(module, binary, name, method): # This is a difficult function, since if the user supplies a flatpakref url, # we have to rely on a naming convention: # The flatpakref file name needs to match the flatpak name - global result + global result # pylint: disable=global-variable-not-assigned parsed_name = _parse_flatpak_name(name) # Try running flatpak list with columns feature command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"] @@ -248,7 +290,7 @@ def _match_installed_flat_name(module, binary, name, method): def _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method): - global result + global result # pylint: disable=global-variable-not-assigned command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"] output = _flatpak_command(module, False, command) for row in output.split('\n'): @@ -257,7 +299,7 @@ def _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, metho def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method): - global result + global result # pylint: disable=global-variable-not-assigned command = [binary, "list", "--{0}".format(method), "--app"] output = _flatpak_command(module, False, command) for row in output.split('\n'): @@ -265,18 +307,44 @@ def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method return row.split()[0] +def _is_flatpak_id(part): + # For guidelines on application IDs, refer to the following resources: + # Flatpak: + # https://docs.flatpak.org/en/latest/conventions.html#application-ids + # Flathub: + # https://docs.flathub.org/docs/for-app-authors/requirements#application-id + if '.' not in part: + return False + sections = part.split('.') + if len(sections) < 2: + return False + domain = sections[0] + if not domain.islower(): + return False + for section in sections[1:]: + if not section.isalnum(): + return False + return True + + def _parse_flatpak_name(name): if name.startswith('http://') or name.startswith('https://'): file_name = urlparse(name).path.split('/')[-1] file_name_without_extension = file_name.split('.')[0:-1] common_name = ".".join(file_name_without_extension) else: - common_name = name + parts = name.split('/') + for part in parts: + if _is_flatpak_id(part): + common_name = part + break + else: + common_name = name return common_name def _flatpak_version(module, binary): - global result + global result # pylint: disable=global-variable-not-assigned command = [binary, "--version"] output = _flatpak_command(module, False, command) version_number = output.split()[1] @@ -284,7 +352,7 @@ def _flatpak_version(module, binary): def _flatpak_command(module, noop, command, ignore_failure=False): - global result + global result # pylint: disable=global-variable-not-assigned result['command'] = ' '.join(command) if noop: result['rc'] = 0 @@ -305,7 +373,7 @@ def main(): method=dict(type='str', default='system', choices=['user', 'system']), state=dict(type='str', default='present', - choices=['absent', 'present']), + choices=['absent', 'present', 'latest']), no_dependencies=dict(type='bool', default=False), executable=dict(type='path', default='flatpak') ), @@ -329,11 +397,16 @@ def main(): if not binary: module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) + module.run_command_environ_update = dict(LANGUAGE='C', LC_ALL='C') + installed, not_installed = flatpak_exists(module, binary, name, method) - if state == 'present' and not_installed: - install_flat(module, binary, remote, not_installed, method, no_dependencies) - elif state == 'absent' and installed: + if state == 'absent' and installed: uninstall_flat(module, binary, installed, method) + else: + if state == 'latest' and installed: + update_flat(module, binary, installed, method, no_dependencies) + if state in ('present', 'latest') and not_installed: + install_flat(module, binary, remote, not_installed, method, no_dependencies) module.exit_json(**result) diff --git a/plugins/modules/flatpak_remote.py b/plugins/modules/flatpak_remote.py new file mode 100644 index 0000000000..891942143d --- /dev/null +++ b/plugins/modules/flatpak_remote.py @@ -0,0 +1,247 @@ +#!/usr/bin/python + +# Copyright (c) 2017 John Kwiatkoski (@JayKayy) +# Copyright (c) 2018 Alexander Bethke (@oolongbrothers) +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: flatpak_remote +short_description: Manage flatpak repository remotes +description: + - Allows users to add or remove flatpak remotes. + - The flatpak remotes concept is comparable to what is called repositories in other packaging formats. + - Currently, remote addition is only supported using C(flatpakrepo) file URLs. + - Existing remotes are not updated. + - See the M(community.general.flatpak) module for managing flatpaks. +author: + - John Kwiatkoski (@JayKayy) + - Alexander Bethke (@oolongbrothers) +requirements: + - flatpak +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + executable: + description: + - The path to the C(flatpak) executable to use. + - By default, this module looks for the C(flatpak) executable on the path. + type: str + default: flatpak + flatpakrepo_url: + description: + - The URL to the C(flatpakrepo) file representing the repository remote to add. + - When used with O(state=present), the flatpak remote specified under the O(flatpakrepo_url) is added using the specified + installation O(method). + - When used with O(state=absent), this is not required. + - Required when O(state=present). + type: str + method: + description: + - The installation method to use. + - Defines if the C(flatpak) is supposed to be installed globally for the whole V(system) or only for the current V(user). + type: str + choices: [system, user] + default: system + name: + description: + - The desired name for the flatpak remote to be registered under on the managed host. + - When used with O(state=present), the remote is added to the managed host under the specified O(name). + - When used with O(state=absent) the remote with that name is removed. + type: str + required: true + state: + description: + - Indicates the desired package state. + type: str + choices: [absent, present] + default: present + enabled: + description: + - Indicates whether this remote is enabled. + type: bool + default: true + version_added: 6.4.0 +""" + +EXAMPLES = r""" +- name: Add the Gnome flatpak remote to the system installation + community.general.flatpak_remote: + name: gnome + state: present + flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo + +- name: Add the flathub flatpak repository remote to the user installation + community.general.flatpak_remote: + name: flathub + state: present + flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo + method: user + +- name: Remove the Gnome flatpak remote from the user installation + community.general.flatpak_remote: + name: gnome + state: absent + method: user + +- name: Remove the flathub remote from the system installation + community.general.flatpak_remote: + name: flathub + state: absent + +- name: Disable the flathub remote in the system installation + community.general.flatpak_remote: + name: flathub + state: present + enabled: false +""" + +RETURN = r""" +command: + description: The exact flatpak command that was executed. + returned: When a flatpak command has been executed + type: str + sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_native + + +def add_remote(module, binary, name, flatpakrepo_url, method): + """Add a new remote.""" + global result # pylint: disable=global-variable-not-assigned + command = [binary, "remote-add", "--{0}".format(method), name, flatpakrepo_url] + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remove_remote(module, binary, name, method): + """Remove an existing remote.""" + global result # pylint: disable=global-variable-not-assigned + command = [binary, "remote-delete", "--{0}".format(method), "--force", name] + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remote_exists(module, binary, name, method): + """Check if the remote exists.""" + command = [binary, "remote-list", "--show-disabled", "--{0}".format(method)] + # The query operation for the remote needs to be run even in check mode + output = _flatpak_command(module, False, command) + for line in output.splitlines(): + listed_remote = line.split() + if len(listed_remote) == 0: + continue + if listed_remote[0] == to_native(name): + return True + return False + + +def enable_remote(module, binary, name, method): + """Enable a remote.""" + global result # pylint: disable=global-variable-not-assigned + command = [binary, "remote-modify", "--enable", "--{0}".format(method), name] + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def disable_remote(module, binary, name, method): + """Disable a remote.""" + global result # pylint: disable=global-variable-not-assigned + command = [binary, "remote-modify", "--disable", "--{0}".format(method), name] + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remote_enabled(module, binary, name, method): + """Check if the remote is enabled.""" + command = [binary, "remote-list", "--show-disabled", "--{0}".format(method)] + # The query operation for the remote needs to be run even in check mode + output = _flatpak_command(module, False, command) + for line in output.splitlines(): + listed_remote = line.split() + if len(listed_remote) == 0: + continue + if listed_remote[0] == to_native(name): + return len(listed_remote) == 1 or "disabled" not in listed_remote[1].split(",") + return False + + +def _flatpak_command(module, noop, command): + global result # pylint: disable=global-variable-not-assigned + result['command'] = ' '.join(command) + if noop: + result['rc'] = 0 + return "" + + result['rc'], result['stdout'], result['stderr'] = module.run_command( + command, check_rc=True + ) + return result['stdout'] + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + flatpakrepo_url=dict(type='str'), + method=dict(type='str', default='system', + choices=['user', 'system']), + state=dict(type='str', default="present", + choices=['absent', 'present']), + enabled=dict(type='bool', default=True), + executable=dict(type='str', default="flatpak") + ), + # This module supports check mode + supports_check_mode=True, + ) + + name = module.params['name'] + flatpakrepo_url = module.params['flatpakrepo_url'] + method = module.params['method'] + state = module.params['state'] + enabled = module.params['enabled'] + executable = module.params['executable'] + binary = module.get_bin_path(executable, None) + + if flatpakrepo_url is None: + flatpakrepo_url = '' + + global result + result = dict( + changed=False + ) + + # If the binary was not found, fail the operation + if not binary: + module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) + + remote_already_exists = remote_exists(module, binary, to_bytes(name), method) + + if state == 'present' and not remote_already_exists: + add_remote(module, binary, name, flatpakrepo_url, method) + elif state == 'absent' and remote_already_exists: + remove_remote(module, binary, name, method) + + if state == 'present': + remote_already_enabled = remote_enabled(module, binary, to_bytes(name), method) + + if enabled and not remote_already_enabled: + enable_remote(module, binary, name, method) + if not enabled and remote_already_enabled: + disable_remote(module, binary, name, method) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/gandi_livedns.py b/plugins/modules/gandi_livedns.py similarity index 51% rename from plugins/modules/net_tools/gandi_livedns.py rename to plugins/modules/gandi_livedns.py index 6124288511..0d6f93529d 100644 --- a/plugins/modules/net_tools/gandi_livedns.py +++ b/plugins/modules/gandi_livedns.py @@ -1,42 +1,54 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019 Gregory Thiemonge -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019 Gregory Thiemonge +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: gandi_livedns author: -- Gregory Thiemonge (@gthiemonge) + - Gregory Thiemonge (@gthiemonge) version_added: "2.3.0" short_description: Manage Gandi LiveDNS records description: -- "Manages DNS records by the Gandi LiveDNS API, see the docs: U(https://doc.livedns.gandi.net/)." + - 'Manages DNS records by the Gandi LiveDNS API, see the docs: U(https://doc.livedns.gandi.net/).' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: + personal_access_token: + description: + - Scoped API token. + - One of O(personal_access_token) and O(api_key) must be specified. + type: str + version_added: 9.0.0 api_key: description: - - Account API token. + - Account API token. + - Note that these type of keys are deprecated and might stop working at some point. Use personal access tokens instead. + - One of O(personal_access_token) and O(api_key) must be specified. type: str - required: true record: description: - - Record to add. + - Record to add. type: str required: true state: description: - - Whether the record(s) should exist or not. + - Whether the record(s) should exist or not. type: str - choices: [ absent, present ] + choices: [absent, present] default: present ttl: description: - - The TTL to give the new record. - - Required when I(state=present). + - The TTL to give the new record. + - Required when O(state=present). type: int type: description: @@ -45,29 +57,27 @@ options: required: true values: description: - - The record values. - - Required when I(state=present). + - The record values. + - Required when O(state=present). type: list elements: str domain: description: - - The name of the Domain to work with (for example, "example.com"). + - The name of the Domain to work with (for example, V(example.com)). required: true type: str -notes: -- Supports C(check_mode). -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a test A record to point to 127.0.0.1 in the my.com domain community.general.gandi_livedns: domain: my.com record: test type: A values: - - 127.0.0.1 + - 127.0.0.1 ttl: 7200 - api_key: dummyapitoken + personal_access_token: dummytoken register: record - name: Create a mail CNAME record to www.my.com domain @@ -76,9 +86,9 @@ EXAMPLES = r''' type: CNAME record: mail values: - - www + - www ttl: 7200 - api_key: dummyapitoken + personal_access_token: dummytoken state: present - name: Change its TTL @@ -87,9 +97,9 @@ EXAMPLES = r''' type: CNAME record: mail values: - - www + - www ttl: 10800 - api_key: dummyapitoken + personal_access_token: dummytoken state: present - name: Delete the record @@ -97,45 +107,55 @@ EXAMPLES = r''' domain: my.com type: CNAME record: mail - api_key: dummyapitoken + personal_access_token: dummytoken state: absent -''' -RETURN = r''' +- name: Use a (deprecated) API Key + community.general.gandi_livedns: + domain: my.com + record: test + type: A + values: + - 127.0.0.1 + ttl: 7200 + api_key: dummyapikey +""" + +RETURN = r""" record: - description: A dictionary containing the record data. - returned: success, except on record deletion - type: dict - contains: - values: - description: The record content (details depend on record type). - returned: success - type: list - elements: str - sample: - - 192.0.2.91 - - 192.0.2.92 - record: - description: The record name. - returned: success - type: str - sample: www - ttl: - description: The time-to-live for the record. - returned: success - type: int - sample: 300 - type: - description: The record type. - returned: success - type: str - sample: A - domain: - description: The domain associated with the record. - returned: success - type: str - sample: my.com -''' + description: A dictionary containing the record data. + returned: success, except on record deletion + type: dict + contains: + values: + description: The record content (details depend on record type). + returned: success + type: list + elements: str + sample: + - 192.0.2.91 + - 192.0.2.92 + record: + description: The record name. + returned: success + type: str + sample: www + ttl: + description: The time-to-live for the record. + returned: success + type: int + sample: 300 + type: + description: The record type. + returned: success + type: str + sample: A + domain: + description: The domain associated with the record. + returned: success + type: str + sample: my.com +""" from ansible.module_utils.basic import AnsibleModule @@ -145,7 +165,8 @@ from ansible_collections.community.general.plugins.module_utils.gandi_livedns_ap def main(): module = AnsibleModule( argument_spec=dict( - api_key=dict(type='str', required=True, no_log=True), + api_key=dict(type='str', no_log=True), + personal_access_token=dict(type='str', no_log=True), record=dict(type='str', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), ttl=dict(type='int'), @@ -157,6 +178,12 @@ def main(): required_if=[ ('state', 'present', ['values', 'ttl']), ], + mutually_exclusive=[ + ('api_key', 'personal_access_token'), + ], + required_one_of=[ + ('api_key', 'personal_access_token'), + ], ) gandi_api = GandiLiveDNSAPI(module) diff --git a/plugins/modules/gconftool2.py b/plugins/modules/gconftool2.py new file mode 100644 index 0000000000..4092a8b7e6 --- /dev/null +++ b/plugins/modules/gconftool2.py @@ -0,0 +1,177 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Kenneth D. Evensen +# Copyright (c) 2017, Abhijeet Kasurde +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gconftool2 +author: + - Kenneth D. Evensen (@kevensen) +short_description: Edit GNOME Configurations +description: + - This module allows for the manipulation of GNOME 2 Configuration using C(gconftool-2). Please see the gconftool-2(1) man + pages for more details. +seealso: + - name: C(gconftool-2) command manual page + description: Manual page for the command. + link: https://help.gnome.org/admin//system-admin-guide/2.32/gconf-6.html.en + +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + key: + type: str + description: + - A GConf preference key is an element in the GConf repository that corresponds to an application preference. + required: true + value: + type: str + description: + - Preference keys typically have simple values such as strings, integers, or lists of strings and integers. This is + ignored unless O(state=present). + value_type: + type: str + description: + - The type of value being set. This is ignored unless O(state=present). + choices: [bool, float, int, string] + state: + type: str + description: + - The action to take upon the key/value. + required: true + choices: [absent, present] + config_source: + type: str + description: + - Specify a configuration source to use rather than the default path. + direct: + description: + - Access the config database directly, bypassing server. If O(direct) is specified then the O(config_source) must be + specified as well. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Change the widget font to "Serif 12" + community.general.gconftool2: + key: "/desktop/gnome/interface/font_name" + value_type: "string" + value: "Serif 12" +""" + +RETURN = r""" +key: + description: The key specified in the module parameters. + returned: success + type: str + sample: /desktop/gnome/interface/font_name +value_type: + description: The type of the value that was changed. + returned: success + type: str + sample: string +value: + description: + - The value of the preference key after executing the module or V(null) if key is removed. + - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that. + returned: success + type: str + sample: "Serif 12" +previous_value: + description: + - The value of the preference key before executing the module. + - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that. + returned: success + type: str + sample: "Serif 12" +version: + description: Version of gconftool-2. + type: str + returned: always + sample: "3.2.6" + version_added: 10.0.0 +""" + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.gconftool2 import gconftool2_runner + + +class GConftool(StateModuleHelper): + diff_params = ('value', ) + output_params = ('key', 'value_type') + facts_params = ('key', 'value_type') + facts_name = 'gconftool2' + module = dict( + argument_spec=dict( + key=dict(type='str', required=True, no_log=False), + value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']), + value=dict(type='str'), + state=dict(type='str', required=True, choices=['absent', 'present']), + direct=dict(type='bool', default=False), + config_source=dict(type='str'), + ), + required_if=[ + ('state', 'present', ['value', 'value_type']), + ('direct', True, ['config_source']), + ], + supports_check_mode=True, + ) + + def __init_module__(self): + self.runner = gconftool2_runner(self.module, check_rc=True) + if not self.vars.direct and self.vars.config_source is not None: + self.do_raise('If the "config_source" is specified then "direct" must be "true"') + + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() + + self.vars.set('previous_value', self._get(), fact=True) + self.vars.set('value_type', self.vars.value_type) + self.vars.set('_value', self.vars.previous_value, output=False, change=True) + self.vars.set_meta('value', initial_value=self.vars.previous_value) + self.vars.set('playbook_value', self.vars.value, fact=True) + + def _make_process(self, fail_on_err): + def process(rc, out, err): + if err and fail_on_err: + self.do_raise('gconftool-2 failed with error:\n%s' % err.strip()) + out = out.rstrip() + self.vars.value = None if out == "" else out + return self.vars.value + return process + + def _get(self): + return self.runner("state key", output_process=self._make_process(False)).run(state="get") + + def state_absent(self): + with self.runner("state key", output_process=self._make_process(False)) as ctx: + ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set('new_value', None, fact=True) + self.vars._value = None + + def state_present(self): + with self.runner("direct config_source value_type state key value", output_process=self._make_process(True)) as ctx: + ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set('new_value', self._get(), fact=True) + self.vars._value = self.vars.new_value + + +def main(): + GConftool.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gconftool2_info.py b/plugins/modules/gconftool2_info.py new file mode 100644 index 0000000000..f1047bccee --- /dev/null +++ b/plugins/modules/gconftool2_info.py @@ -0,0 +1,86 @@ +#!/usr/bin/python +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gconftool2_info +author: + - "Alexei Znamensky (@russoz)" +short_description: Retrieve GConf configurations +version_added: 5.1.0 +description: + - This module allows retrieving application preferences from the GConf database, with the help of C(gconftool-2). +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + key: + description: + - The key name for an element in the GConf database. + type: str + required: true +seealso: + - name: C(gconftool-2) command manual page + description: Manual page for the command. + link: https://help.gnome.org/admin//system-admin-guide/2.32/gconf-6.html.en + - name: gconf repository (archived) + description: Git repository for the project. It is an archived project, so the repository is read-only. + link: https://gitlab.gnome.org/Archive/gconf +""" + +EXAMPLES = r""" +- name: Get value for a certain key in the database. + community.general.gconftool2_info: + key: /desktop/gnome/background/picture_filename + register: result +""" + +RETURN = r""" +value: + description: + - The value of the property. + returned: success + type: str + sample: Monospace 10 +version: + description: Version of gconftool-2. + type: str + returned: always + sample: "3.2.6" + version_added: 10.0.0 +""" + +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper +from ansible_collections.community.general.plugins.module_utils.gconftool2 import gconftool2_runner + + +class GConftoolInfo(ModuleHelper): + output_params = ['key'] + module = dict( + argument_spec=dict( + key=dict(type='str', required=True, no_log=False), + ), + supports_check_mode=True, + ) + + def __init_module__(self): + self.runner = gconftool2_runner(self.module, check_rc=True) + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() + + def __run__(self): + with self.runner.context(args_order=["state", "key"]) as ctx: + rc, out, err = ctx.run(state="get") + self.vars.value = None if err and not out else out.rstrip() + + +def main(): + GConftoolInfo.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/gem.py b/plugins/modules/gem.py similarity index 72% rename from plugins/modules/packaging/language/gem.py rename to plugins/modules/gem.py index ad48e0c7da..535e420e71 100644 --- a/plugins/modules/packaging/language/gem.py +++ b/plugins/modules/gem.py @@ -1,19 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, Johan Wiren -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, Johan Wiren +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gem short_description: Manage Ruby gems description: - Manage installation and uninstallation of Ruby gems. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: type: str @@ -23,7 +28,7 @@ options: state: type: str description: - - The desired state of the gem. C(latest) ensures that the latest version is installed. + - The desired state of the gem. V(latest) ensures that the latest version is installed. required: false choices: [present, absent, latest] default: present @@ -37,49 +42,47 @@ options: - Whether to include dependencies or not. required: false type: bool - default: "yes" + default: true repository: type: str description: - - The repository from which the gem will be installed + - The repository from which the gem is installed. required: false aliases: [source] user_install: description: - - Install gem in user's local gems cache or for all users + - Install gem in user's local gems cache or for all users. required: false type: bool - default: "yes" + default: true executable: type: path description: - - Override the path to the gem executable + - Override the path to the gem executable. required: false install_dir: type: path description: - - Install the gems into a specific directory. - These gems will be independent from the global installed ones. - Specifying this requires user_install to be false. + - Install the gems into a specific directory. These gems are independent from the global installed ones. Specifying + this requires user_install to be false. required: false bindir: type: path description: - - Install executables into a specific directory. + - Install executables into a specific directory. version_added: 3.3.0 norc: type: bool + default: true description: - - Avoid loading any C(.gemrc) file. Ignored for RubyGems prior to 2.5.2. - - The current default value C(false) has been deprecated in community.general 5.0.0. - Explicitly specify the value to prevent the deprecation warning to be shown." - - From community.general 6.0.0 on, the default will be changed to C(true). + - Avoid loading any C(.gemrc) file. Ignored for RubyGems prior to 2.5.2. + - The default changed from V(false) to V(true) in community.general 6.0.0. version_added: 3.3.0 env_shebang: description: - Rewrite the shebang line on installed scripts to use /usr/bin/env. required: false - default: "no" + default: false type: bool version: type: str @@ -90,31 +93,31 @@ options: description: - Allow installation of pre-release versions of the gem. required: false - default: "no" + default: false type: bool include_doc: description: - Install with or without docs. required: false - default: "no" + default: false type: bool build_flags: type: str description: - - Allow adding build flags for gem compilation + - Allow adding build flags for gem compilation. required: false force: description: - - Force gem to install, bypassing dependency checks. + - Force gem to (un-)install, bypassing dependency checks. required: false - default: "no" + default: false type: bool author: - - "Ansible Core Team" - - "Johan Wiren (@johanwiren)" -''' + - "Ansible Core Team" + - "Johan Wiren (@johanwiren)" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install version 1.0 of vagrant community.general.gem: name: vagrant @@ -131,7 +134,7 @@ EXAMPLES = ''' name: rake gem_source: /path/to/gems/rake-1.0.gem state: present -''' +""" import re @@ -234,9 +237,11 @@ def uninstall(module): cmd.extend(['--version', module.params['version']]) else: cmd.append('--all') - cmd.append('--executable') + cmd.append('--executable') + if module.params['force']: + cmd.append('--force') cmd.append(module.params['name']) - module.run_command(cmd, environ_update=environ, check_rc=True) + return module.run_command(cmd, environ_update=environ, check_rc=True) def install(module): @@ -288,22 +293,22 @@ def main(): module = AnsibleModule( argument_spec=dict( - executable=dict(required=False, type='path'), - gem_source=dict(required=False, type='path'), - include_dependencies=dict(required=False, default=True, type='bool'), + executable=dict(type='path'), + gem_source=dict(type='path'), + include_dependencies=dict(default=True, type='bool'), name=dict(required=True, type='str'), - repository=dict(required=False, aliases=['source'], type='str'), - state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'), - user_install=dict(required=False, default=True, type='bool'), - install_dir=dict(required=False, type='path'), + repository=dict(aliases=['source'], type='str'), + state=dict(default='present', choices=['present', 'absent', 'latest'], type='str'), + user_install=dict(default=True, type='bool'), + install_dir=dict(type='path'), bindir=dict(type='path'), - norc=dict(type='bool'), - pre_release=dict(required=False, default=False, type='bool'), - include_doc=dict(required=False, default=False, type='bool'), - env_shebang=dict(required=False, default=False, type='bool'), - version=dict(required=False, type='str'), - build_flags=dict(required=False, type='str'), - force=dict(required=False, default=False, type='bool'), + norc=dict(type='bool', default=True), + pre_release=dict(default=False, type='bool'), + include_doc=dict(default=False, type='bool'), + env_shebang=dict(default=False, type='bool'), + version=dict(type='str'), + build_flags=dict(type='str'), + force=dict(default=False, type='bool'), ), supports_check_mode=True, mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']], @@ -315,12 +320,6 @@ def main(): module.fail_json(msg="Cannot maintain state=latest when installing from local source") if module.params['user_install'] and module.params['install_dir']: module.fail_json(msg="install_dir requires user_install=false") - if module.params['norc'] is None: - module.deprecate( - 'The default of the norc option has been deprecated. It will be changed to `true`' - ' in community.general 6.0.0. Specify an explicit value to get rid of this message', - version='6.0.0', collection_name='community.general') - module.params['norc'] = False if not module.params['gem_source']: module.params['gem_source'] = module.params['name'] @@ -333,9 +332,21 @@ def main(): changed = True elif module.params['state'] == 'absent': if exists(module): - uninstall(module) - changed = True - + command_output = uninstall(module) + if command_output is not None and exists(module): + rc, out, err = command_output + module.fail_json( + msg=( + "Failed to uninstall gem '%s': it is still present after 'gem uninstall'. " + "This usually happens with default or system gems provided by the OS, " + "which cannot be removed with the gem command." + ) % module.params['name'], + rc=rc, + stdout=out, + stderr=err + ) + else: + changed = True result = {} result['name'] = module.params['name'] result['state'] = module.params['state'] diff --git a/plugins/modules/gio_mime.py b/plugins/modules/gio_mime.py new file mode 100644 index 0000000000..a7fb3c4fcf --- /dev/null +++ b/plugins/modules/gio_mime.py @@ -0,0 +1,106 @@ +#!/usr/bin/python +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gio_mime +author: + - "Alexei Znamensky (@russoz)" +short_description: Set default handler for MIME type, for applications using Gnome GIO +version_added: 7.5.0 +description: + - This module allows configuring the default handler for a specific MIME type, to be used by applications built with the + Gnome GIO API. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + mime_type: + description: + - MIME type for which a default handler is set. + type: str + required: true + handler: + description: + - Default handler set for the MIME type. + type: str + required: true +notes: + - This module is a thin wrapper around the C(gio mime) command (and subcommand). + - See man gio(1) for more details. +seealso: + - name: C(gio) command manual page + description: Manual page for the command. + link: https://man.archlinux.org/man/gio.1 + - name: GIO Documentation + description: Reference documentation for the GIO API.. + link: https://docs.gtk.org/gio/ +""" + +EXAMPLES = r""" +- name: Set chrome as the default handler for https + community.general.gio_mime: + mime_type: x-scheme-handler/https + handler: google-chrome.desktop + register: result +""" + +RETURN = r""" +handler: + description: + - The handler set as default. + returned: success + type: str + sample: google-chrome.desktop +version: + description: Version of gio. + type: str + returned: always + sample: "2.80.0" + version_added: 10.0.0 +""" + +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper +from ansible_collections.community.general.plugins.module_utils.gio_mime import gio_mime_runner, gio_mime_get + + +class GioMime(ModuleHelper): + output_params = ['handler'] + module = dict( + argument_spec=dict( + mime_type=dict(type='str', required=True), + handler=dict(type='str', required=True), + ), + supports_check_mode=True, + ) + + def __init_module__(self): + self.runner = gio_mime_runner(self.module, check_rc=True) + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() + self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True) + + def __run__(self): + check_mode_return = (0, 'Module executed in check mode', '') + if self.vars.has_changed: + with self.runner.context(args_order="mime mime_type handler", check_mode_skip=True, check_mode_return=check_mode_return) as ctx: + rc, out, err = ctx.run() + self.vars.stdout = out + self.vars.stderr = err + self.vars.set("run_info", ctx.run_info, verbosity=4) + + +def main(): + GioMime.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/git_config.py b/plugins/modules/git_config.py new file mode 100644 index 0000000000..30af5b43fd --- /dev/null +++ b/plugins/modules/git_config.py @@ -0,0 +1,263 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Marius Gedminas +# Copyright (c) 2016, Matthew Gamble +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: git_config +author: + - Matthew Gamble (@djmattyg007) + - Marius Gedminas (@mgedmin) +requirements: ['git'] +short_description: Update git configuration +description: + - The M(community.general.git_config) module changes git configuration by invoking C(git config). This is needed if you + do not want to use M(ansible.builtin.template) for the entire git config file (for example because you need to change + just C(user.email) in C(/etc/.git/config)). Solutions involving M(ansible.builtin.command) are cumbersome or do not work + correctly in check mode. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The name of the setting. + type: str + required: true + repo: + description: + - Path to a git repository for reading and writing values from a specific repo. + type: path + file: + description: + - Path to an adhoc git configuration file to be managed using the V(file) scope. + type: path + version_added: 2.0.0 + scope: + description: + - Specify which scope to read/set values from. + - This is required when setting config values. + - If this is set to V(local), you must also specify the O(repo) parameter. + - If this is set to V(file), you must also specify the O(file) parameter. + - It defaults to system. + choices: ["file", "local", "global", "system"] + type: str + state: + description: + - 'Indicates the setting should be set/unset. This parameter has higher precedence than O(value) parameter: when O(state=absent) + and O(value) is defined, O(value) is discarded.' + choices: ['present', 'absent'] + default: 'present' + type: str + value: + description: + - When specifying the name of a single setting, supply a value to set that setting to the given value. + - From community.general 11.0.0 on, O(value) is required if O(state=present). To read values, use the M(community.general.git_config_info) + module instead. + type: str + add_mode: + description: + - Specify if a value should replace the existing value(s) or if the new value should be added alongside other values + with the same name. + - This option is only relevant when adding/replacing values. If O(state=absent) or values are just read out, this option + is not considered. + choices: ["add", "replace-all"] + type: str + default: "replace-all" + version_added: 8.1.0 +""" + +EXAMPLES = r""" +- name: Add a setting to ~/.gitconfig + community.general.git_config: + name: alias.ci + scope: global + value: commit + +- name: Add a setting to ~/.gitconfig + community.general.git_config: + name: alias.st + scope: global + value: status + +- name: Remove a setting from ~/.gitconfig + community.general.git_config: + name: alias.ci + scope: global + state: absent + +- name: Add a setting to ~/.gitconfig + community.general.git_config: + name: core.editor + scope: global + value: vim + +- name: Add a setting system-wide + community.general.git_config: + name: alias.remotev + scope: system + value: remote -v + +- name: Add a setting to a system scope (default) + community.general.git_config: + name: alias.diffc + value: diff --cached + +- name: Add a setting to a system scope (default) + community.general.git_config: + name: color.ui + value: auto + +- name: Add several options for the same name + community.general.git_config: + name: push.pushoption + value: "{{ item }}" + add_mode: add + loop: + - merge_request.create + - merge_request.draft + +- name: Make etckeeper not complaining when it is invoked by cron + community.general.git_config: + name: user.email + repo: /etc + scope: local + value: 'root@{{ ansible_fqdn }}' +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + repo=dict(type='path'), + file=dict(type='path'), + add_mode=dict(type='str', default='replace-all', choices=['add', 'replace-all']), + scope=dict(type='str', choices=['file', 'local', 'global', 'system']), + state=dict(type='str', default='present', choices=['present', 'absent']), + value=dict(), + ), + required_if=[ + ('scope', 'local', ['repo']), + ('scope', 'file', ['file']), + ('state', 'present', ['value']), + ], + supports_check_mode=True, + ) + git_path = module.get_bin_path('git', True) + + params = module.params + # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting. + # Set the locale to C to ensure consistent messages. + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + name = params['name'] or '' + unset = params['state'] == 'absent' + new_value = params['value'] or '' + add_mode = params['add_mode'] + + if not unset and not new_value: + module.fail_json(msg="If state=present, a value must be specified. Use the community.general.git_config_info module to read a config value.") + + scope = determine_scope(params) + cwd = determine_cwd(scope, params) + + base_args = [git_path, "config", "--includes"] + + if scope == 'file': + base_args.append('-f') + base_args.append(params['file']) + elif scope: + base_args.append("--" + scope) + + list_args = list(base_args) + + list_args.append("--get-all") + list_args.append(name) + + (rc, out, err) = module.run_command(list_args, cwd=cwd, expand_user_and_vars=False) + + if rc >= 2: + # If the return code is 1, it just means the option hasn't been set yet, which is fine. + module.fail_json(rc=rc, msg=err, cmd=' '.join(list_args)) + + old_values = out.rstrip().splitlines() + + if unset and not out: + module.exit_json(changed=False, msg='no setting to unset') + elif new_value in old_values and (len(old_values) == 1 or add_mode == "add") and not unset: + module.exit_json(changed=False, msg="") + + # Until this point, the git config was just read and in case no change is needed, the module has already exited. + + set_args = list(base_args) + if unset: + set_args.append("--unset-all") + set_args.append(name) + else: + set_args.append("--" + add_mode) + set_args.append(name) + set_args.append(new_value) + + if not module.check_mode: + (rc, out, err) = module.run_command(set_args, cwd=cwd, ignore_invalid_cwd=False, expand_user_and_vars=False) + if err: + module.fail_json(rc=rc, msg=err, cmd=set_args) + + if unset: + after_values = [] + elif add_mode == "add": + after_values = old_values + [new_value] + else: + after_values = [new_value] + + module.exit_json( + msg='setting changed', + diff=dict( + before_header=' '.join(set_args), + before=build_diff_value(old_values), + after_header=' '.join(set_args), + after=build_diff_value(after_values), + ), + changed=True + ) + + +def determine_scope(params): + if params['scope']: + return params['scope'] + return 'system' + + +def build_diff_value(value): + if not value: + return "\n" + if len(value) == 1: + return value[0] + "\n" + return value + + +def determine_cwd(scope, params): + if scope == 'local': + return params['repo'] + # Run from root directory to avoid accidentally picking up any local config settings + return "/" + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/git_config_info.py b/plugins/modules/git_config_info.py new file mode 100644 index 0000000000..b5a15fe94f --- /dev/null +++ b/plugins/modules/git_config_info.py @@ -0,0 +1,182 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Guenther Grill +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: git_config_info +author: + - Guenther Grill (@guenhter) +version_added: 8.1.0 +requirements: ['git'] +short_description: Read git configuration +description: + - The M(community.general.git_config_info) module reads the git configuration by invoking C(git config). +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + name: + description: + - The name of the setting to read. + - If not provided, all settings are returned as RV(config_values). + type: str + path: + description: + - Path to a git repository or file for reading values from a specific repo. + - If O(scope) is V(local), this must point to a repository to read from. + - If O(scope) is V(file), this must point to specific git config file to read from. + - Otherwise O(path) is ignored if set. + type: path + scope: + description: + - Specify which scope to read values from. + - If set to V(global), the global git config is used. O(path) is ignored. + - If set to V(system), the system git config is used. O(path) is ignored. + - If set to V(local), O(path) must be set to the repo to read from. + - If set to V(file), O(path) must be set to the config file to read from. + choices: ["global", "system", "local", "file"] + default: "system" + type: str +""" + +EXAMPLES = r""" +- name: Read a system wide config + community.general.git_config_info: + name: core.editor + register: result + +- name: Show value of core.editor + ansible.builtin.debug: + msg: "{{ result.config_value | default('(not set)', true) }}" + +- name: Read a global config from ~/.gitconfig + community.general.git_config_info: + name: alias.remotev + scope: global + +- name: Read a project specific config + community.general.git_config_info: + name: color.ui + scope: local + path: /etc + +- name: Read all global values + community.general.git_config_info: + scope: global + +- name: Read all system wide values + community.general.git_config_info: + +- name: Read all values of a specific file + community.general.git_config_info: + scope: file + path: /etc/gitconfig +""" + +RETURN = r""" +config_value: + description: >- + When O(name) is set, a string containing the value of the setting in name. If O(name) is not set, empty. If a config key + such as V(push.pushoption) has more then one entry, just the first one is returned here. + returned: success if O(name) is set + type: str + sample: "vim" + +config_values: + description: + - This is a dictionary mapping a git configuration setting to a list of its values. + - When O(name) is not set, all configuration settings are returned here. + - When O(name) is set, only the setting specified in O(name) is returned here. If that setting is not set, the key is + still present, and its value is an empty list. + returned: success + type: dict + sample: + core.editor: ["vim"] + color.ui: ["auto"] + push.pushoption: ["merge_request.create", "merge_request.draft"] + alias.remotev: ["remote -v"] +""" + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type="str"), + path=dict(type="path"), + scope=dict(type="str", default="system", choices=["global", "system", "local", "file"]), + ), + required_if=[ + ("scope", "local", ["path"]), + ("scope", "file", ["path"]), + ], + required_one_of=[], + supports_check_mode=True, + ) + + # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting. + # Set the locale to C to ensure consistent messages. + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + name = module.params["name"] + path = module.params["path"] + scope = module.params["scope"] + + run_cwd = path if scope == "local" else "/" + args = build_args(module, name, path, scope) + + (rc, out, err) = module.run_command(args, cwd=run_cwd, expand_user_and_vars=False) + + if rc == 128 and "unable to read config file" in err: + # This just means nothing has been set at the given scope + pass + elif rc >= 2: + # If the return code is 1, it just means the option hasn't been set yet, which is fine. + module.fail_json(rc=rc, msg=err, cmd=" ".join(args)) + + output_lines = out.strip("\0").split("\0") if out else [] + + if name: + first_value = output_lines[0] if output_lines else "" + config_values = {name: output_lines} + module.exit_json(changed=False, msg="", config_value=first_value, config_values=config_values) + else: + config_values = text_to_dict(output_lines) + module.exit_json(changed=False, msg="", config_value="", config_values=config_values) + + +def build_args(module, name, path, scope): + git_path = module.get_bin_path("git", True) + args = [git_path, "config", "--includes", "--null", "--" + scope] + + if scope == "file": + args.append(path) + + if name: + args.extend(["--get-all", name]) + else: + args.append("--list") + + return args + + +def text_to_dict(text_lines): + config_values = {} + for value in text_lines: + k, v = value.split("\n", 1) + if k in config_values: + config_values[k].append(v) + else: + config_values[k] = [v] + return config_values + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/source_control/github/github_deploy_key.py b/plugins/modules/github_deploy_key.py similarity index 77% rename from plugins/modules/source_control/github/github_deploy_key.py rename to plugins/modules/github_deploy_key.py index a90de48d42..799ee300c5 100644 --- a/plugins/modules/source_control/github/github_deploy_key.py +++ b/plugins/modules/github_deploy_key.py @@ -1,25 +1,31 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: github_deploy_key author: "Ali (@bincyber)" -short_description: Manages deploy keys for GitHub repositories. +short_description: Manages deploy keys for GitHub repositories description: - - "Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password, - username and password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin - rights on the repository are required." + - Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password, username and + password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin rights on the repository + are required. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: github_url: description: - - The base URL of the GitHub API + - The base URL of the GitHub API. required: false type: str version_added: '0.2.0' @@ -28,19 +34,19 @@ options: description: - The name of the individual account or organization that owns the GitHub repository. required: true - aliases: [ 'account', 'organization' ] + aliases: ['account', 'organization'] type: str repo: description: - The name of the GitHub repository. required: true - aliases: [ 'repository' ] + aliases: ['repository'] type: str name: description: - The name for the deploy key. required: true - aliases: [ 'title', 'label' ] + aliases: ['title', 'label'] type: str key: description: @@ -49,48 +55,50 @@ options: type: str read_only: description: - - If C(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write. + - If V(true), the deploy key is only able to read repository contents. Otherwise, the deploy key is able to read and + write. type: bool - default: 'yes' + default: true state: description: - The state of the deploy key. default: "present" - choices: [ "present", "absent" ] + choices: ["present", "absent"] type: str force: description: - - If C(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title. + - If V(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title. type: bool - default: 'no' + default: false username: description: - - The username to authenticate with. Should not be set when using personal access token + - The username to authenticate with. Should not be set when using personal access token. type: str password: description: - - The password to authenticate with. Alternatively, a personal access token can be used instead of I(username) and I(password) combination. + - The password to authenticate with. Alternatively, a personal access token can be used instead of O(username) and O(password) + combination. type: str token: description: - - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with I(password). + - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with O(password). type: str otp: description: - - The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password). + - The 6 digit One Time Password for 2-Factor Authentication. Required together with O(username) and O(password). type: int notes: - - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/." -''' + - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/." +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add a new read-only deploy key to a GitHub repository using basic authentication community.general.github_deploy_key: owner: "johndoe" repo: "example" name: "new-deploy-key" key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." - read_only: yes + read_only: true username: "johndoe" password: "supersecretpassword" @@ -100,7 +108,7 @@ EXAMPLES = ''' repository: "example" name: "new-deploy-key" key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." - force: yes + force: true username: "johndoe" password: "supersecretpassword" state: absent @@ -111,7 +119,7 @@ EXAMPLES = ''' repository: "example" name: "new-deploy-key" key: "{{ lookup('file', '~/.ssh/github.pub') }}" - force: yes + force: true token: "ABAQDAwXxn7kIMNWzcDfo..." - name: Re-add a deploy key to a GitHub repository but with a different name @@ -140,36 +148,36 @@ EXAMPLES = ''' repo: "example" name: "new-deploy-key" key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..." - read_only: yes + read_only: true username: "janedoe" password: "supersecretpassword" -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: the status message describing what occurred - returned: always - type: str - sample: "Deploy key added successfully" + description: The status message describing what occurred. + returned: always + type: str + sample: "Deploy key added successfully" http_status_code: - description: the HTTP status code returned by the GitHub API - returned: failed - type: int - sample: 400 + description: The HTTP status code returned by the GitHub API. + returned: failed + type: int + sample: 400 error: - description: the error message returned by the GitHub API - returned: failed - type: str - sample: "key is already in use" + description: The error message returned by the GitHub API. + returned: failed + type: str + sample: "key is already in use" id: - description: the key identifier assigned by GitHub for the deploy key - returned: changed - type: int - sample: 24381901 -''' + description: The key identifier assigned by GitHub for the deploy key. + returned: changed + type: int + sample: 24381901 +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url @@ -218,7 +226,7 @@ class GithubDeployKey(object): yield self.module.from_json(resp.read()) links = {} - for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info["link"]): + for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info.get("link", '')): links[y] = x url = links.get('next') @@ -249,7 +257,12 @@ class GithubDeployKey(object): key_id = response_body["id"] self.module.exit_json(changed=True, msg="Deploy key successfully added", id=key_id) elif status_code == 422: - self.module.exit_json(changed=False, msg="Deploy key already exists") + # there might be multiple reasons for a 422 + # so we must check if the reason is that the key already exists + if self.get_existing_key(): + self.module.exit_json(changed=False, msg="Deploy key already exists") + else: + self.handle_error(method="POST", info=info) else: self.handle_error(method="POST", info=info) @@ -269,6 +282,8 @@ class GithubDeployKey(object): body = info.get('body') if body: err = self.module.from_json(body)['message'] + else: + err = None if status_code == 401: self.module.fail_json(msg="Failed to connect to {0} due to invalid credentials".format(self.github_url), http_status_code=status_code, error=err) @@ -286,18 +301,18 @@ class GithubDeployKey(object): def main(): module = AnsibleModule( argument_spec=dict( - github_url=dict(required=False, type='str', default="https://api.github.com"), + github_url=dict(type='str', default="https://api.github.com"), owner=dict(required=True, type='str', aliases=['account', 'organization']), repo=dict(required=True, type='str', aliases=['repository']), name=dict(required=True, type='str', aliases=['title', 'label']), key=dict(required=True, type='str', no_log=False), - read_only=dict(required=False, type='bool', default=True), + read_only=dict(type='bool', default=True), state=dict(default='present', choices=['present', 'absent']), - force=dict(required=False, type='bool', default=False), - username=dict(required=False, type='str'), - password=dict(required=False, type='str', no_log=True), - otp=dict(required=False, type='int', no_log=True), - token=dict(required=False, type='str', no_log=True) + force=dict(type='bool', default=False), + username=dict(type='str'), + password=dict(type='str', no_log=True), + otp=dict(type='int', no_log=True), + token=dict(type='str', no_log=True) ), mutually_exclusive=[ ['password', 'token'] diff --git a/plugins/modules/source_control/github/github_issue.py b/plugins/modules/github_issue.py similarity index 74% rename from plugins/modules/source_control/github/github_issue.py rename to plugins/modules/github_issue.py index 4add29f341..2923917eec 100644 --- a/plugins/modules/source_control/github/github_issue.py +++ b/plugins/modules/github_issue.py @@ -1,19 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017-18, Abhijeet Kasurde +# Copyright (c) 2017-18, Abhijeet Kasurde # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: github_issue -short_description: View GitHub issue. +short_description: View GitHub issue description: - - View GitHub issue for a given repository and organization. + - View GitHub issue for a given repository and organization. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: repo: description: @@ -32,24 +38,24 @@ options: type: int action: description: - - Get various details about issue depending upon action specified. + - Get various details about issue depending upon action specified. default: 'get_status' choices: - - 'get_status' + - get_status type: str author: - - Abhijeet Kasurde (@Akasurde) -''' + - Abhijeet Kasurde (@Akasurde) +""" -RETURN = ''' +RETURN = r""" issue_status: - description: State of the GitHub issue - type: str - returned: success - sample: open, closed -''' + description: State of the GitHub issue. + type: str + returned: success + sample: open, closed +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Check if GitHub issue is closed or not community.general.github_issue: organization: ansible @@ -62,7 +68,7 @@ EXAMPLES = ''' ansible.builtin.debug: msg: Do something when issue 23642 is open when: r.issue_status == 'open' -''' +""" import json diff --git a/plugins/modules/source_control/github/github_key.py b/plugins/modules/github_key.py similarity index 62% rename from plugins/modules/source_control/github/github_key.py rename to plugins/modules/github_key.py index 2afbe29aa1..957d130774 100644 --- a/plugins/modules/source_control/github/github_key.py +++ b/plugins/modules/github_key.py @@ -1,18 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: github_key -short_description: Manage GitHub access keys. +short_description: Manage GitHub access keys description: - - Creates, removes, or updates GitHub access keys. + - Creates, removes, or updates GitHub access keys. + - Works with both GitHub.com and GitHub Enterprise Server installations. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: token: description: @@ -21,12 +28,12 @@ options: type: str name: description: - - SSH key name + - SSH key name. required: true type: str pubkey: description: - - SSH public key value. Required when C(state=present). + - SSH public key value. Required when O(state=present). type: str state: description: @@ -36,34 +43,64 @@ options: type: str force: description: - - The default is C(yes), which will replace the existing remote key - if it's different than C(pubkey). If C(no), the key will only be - set if no key with the given C(name) exists. + - The default is V(true), which replaces the existing remote key if it is different than O(pubkey). If V(false), the + key is only set if no key with the given O(name) exists. type: bool - default: 'yes' + default: true + api_url: + description: + - URL to the GitHub API if not using github.com but your own GitHub Enterprise instance. + type: str + default: 'https://api.github.com' + version_added: "11.0.0" author: Robert Estelle (@erydo) -''' +""" -RETURN = ''' +RETURN = r""" deleted_keys: - description: An array of key objects that were deleted. Only present on state=absent - type: list - returned: When state=absent - sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}] + description: An array of key objects that were deleted. Only present on state=absent. + type: list + returned: When state=absent + sample: + [ + { + "id": 0, + "key": "BASE64 encoded key", + "url": "http://example.com/github key", + "created_at": "YYYY-MM-DDTHH:MM:SZ", + "read_only": false + } + ] matching_keys: - description: An array of keys matching the specified name. Only present on state=present - type: list - returned: When state=present - sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}] + description: An array of keys matching the specified name. Only present on state=present. + type: list + returned: When state=present + sample: + [ + { + "id": 0, + "key": "BASE64 encoded key", + "url": "http://example.com/github key", + "created_at": "YYYY-MM-DDTHH:MM:SZ", + "read_only": false + } + ] key: - description: Metadata about the key just created. Only present on state=present - type: dict - returned: success - sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False} -''' + description: Metadata about the key just created. Only present on state=present. + type: dict + returned: success + sample: + { + "id": 0, + "key": "BASE64 encoded key", + "url": "http://example.com/github key", + "created_at": "YYYY-MM-DDTHH:MM:SZ", + "read_only": false + } +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Read SSH public key to authorize ansible.builtin.shell: cat /home/foo/.ssh/id_rsa.pub register: ssh_pub_key @@ -74,17 +111,33 @@ EXAMPLES = ''' name: Access Key for Some Machine token: '{{ github_access_token }}' pubkey: '{{ ssh_pub_key.stdout }}' -''' +# Alternatively, a single task can be used reading a key from a file on the controller +- name: Authorize key with GitHub + community.general.github_key: + name: Access Key for Some Machine + token: '{{ github_access_token }}' + pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}" +# GitHub Enterprise Server usage +- name: Authorize key with GitHub Enterprise + community.general.github_key: + name: Access Key for Some Machine + token: '{{ github_enterprise_token }}' + pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}" + api_url: 'https://github.company.com/api/v3' +""" + +import datetime import json import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url - -API_BASE = 'https://api.github.com' +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) class GitHubResponse(object): @@ -106,9 +159,10 @@ class GitHubResponse(object): class GitHubSession(object): - def __init__(self, module, token): + def __init__(self, module, token, api_url): self.module = module self.token = token + self.api_url = api_url.rstrip('/') def request(self, method, url, data=None): headers = { @@ -126,7 +180,7 @@ class GitHubSession(object): def get_all_keys(session): - url = API_BASE + '/user/keys' + url = session.api_url + '/user/keys' result = [] while url: r = session.request('GET', url) @@ -137,21 +191,20 @@ def get_all_keys(session): def create_key(session, name, pubkey, check_mode): if check_mode: - from datetime import datetime - now = datetime.utcnow() + now_t = now() return { 'id': 0, 'key': pubkey, 'title': name, 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY', - 'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'), + 'created_at': datetime.datetime.strftime(now_t, '%Y-%m-%dT%H:%M:%SZ'), 'read_only': False, 'verified': False } else: return session.request( 'POST', - API_BASE + '/user/keys', + session.api_url + '/user/keys', data=json.dumps({'title': name, 'key': pubkey})).json() @@ -160,7 +213,7 @@ def delete_keys(session, to_delete, check_mode): return for key in to_delete: - session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"]) + session.request('DELETE', session.api_url + '/user/keys/%s' % key["id"]) def ensure_key_absent(session, name, check_mode): @@ -208,6 +261,7 @@ def main(): 'pubkey': {}, 'state': {'choices': ['present', 'absent'], 'default': 'present'}, 'force': {'default': True, 'type': 'bool'}, + 'api_url': {'default': 'https://api.github.com', 'type': 'str'}, } module = AnsibleModule( argument_spec=argument_spec, @@ -219,6 +273,7 @@ def main(): state = module.params['state'] force = module.params['force'] pubkey = module.params.get('pubkey') + api_url = module.params.get('api_url') if pubkey: pubkey_parts = pubkey.split(' ') @@ -228,7 +283,7 @@ def main(): elif state == 'present': module.fail_json(msg='"pubkey" is required when state=present') - session = GitHubSession(module, token) + session = GitHubSession(module, token, api_url) if state == 'present': result = ensure_key_present(module, session, name, pubkey, force=force, check_mode=module.check_mode) diff --git a/plugins/modules/source_control/github/github_release.py b/plugins/modules/github_release.py similarity index 58% rename from plugins/modules/source_control/github/github_release.py rename to plugins/modules/github_release.py index 654dce5f98..933b9c8bd1 100644 --- a/plugins/modules/source_control/github/github_release.py +++ b/plugins/modules/github_release.py @@ -1,78 +1,83 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: Ansible Team -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Team +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: github_release short_description: Interact with GitHub Releases description: - - Fetch metadata about GitHub Releases + - Fetch metadata about GitHub Releases. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - token: - description: - - GitHub Personal Access Token for authenticating. Mutually exclusive with C(password). - type: str - user: - description: - - The GitHub account that owns the repository - type: str - required: true - password: - description: - - The GitHub account password for the user. Mutually exclusive with C(token). - type: str - repo: - description: - - Repository name - type: str - required: true - action: - description: - - Action to perform - type: str - required: true - choices: [ 'latest_release', 'create_release' ] - tag: - description: - - Tag name when creating a release. Required when using action is set to C(create_release). - type: str - target: - description: - - Target of release when creating a release - type: str - name: - description: - - Name of release when creating a release - type: str - body: - description: - - Description of the release when creating a release - type: str - draft: - description: - - Sets if the release is a draft or not. (boolean) - type: 'bool' - default: 'no' - prerelease: - description: - - Sets if the release is a prerelease or not. (boolean) - type: bool - default: 'no' + token: + description: + - GitHub Personal Access Token for authenticating. Mutually exclusive with O(password). + type: str + user: + description: + - The GitHub account that owns the repository. + type: str + required: true + password: + description: + - The GitHub account password for the user. Mutually exclusive with O(token). + type: str + repo: + description: + - Repository name. + type: str + required: true + action: + description: + - Action to perform. + type: str + required: true + choices: ['latest_release', 'create_release'] + tag: + description: + - Tag name when creating a release. Required when using O(action=create_release). + type: str + target: + description: + - Target of release when creating a release. + type: str + name: + description: + - Name of release when creating a release. + type: str + body: + description: + - Description of the release when creating a release. + type: str + draft: + description: + - Sets if the release is a draft or not. (boolean). + type: bool + default: false + prerelease: + description: + - Sets if the release is a prerelease or not. (boolean). + type: bool + default: false author: - - "Adrian Moisey (@adrianmoisey)" + - "Adrian Moisey (@adrianmoisey)" requirements: - - "github3.py >= 1.0.0a3" -''' + - "github3.py >= 1.0.0a3" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get latest release of a public repository community.general.github_release: user: ansible @@ -86,7 +91,7 @@ EXAMPLES = ''' repo: testrepo action: latest_release -- name: Get latest release of test repo using username and password. Ansible 2.4. +- name: Get latest release of test repo using username and password community.general.github_release: user: testuser password: secret123 @@ -103,25 +108,15 @@ EXAMPLES = ''' target: master name: My Release body: Some description +""" -''' - -RETURN = ''' -create_release: - description: - - Version of the created release - - "For Ansible version 2.5 and later, if specified release version already exists, then State is unchanged" - - "For Ansible versions prior to 2.5, if specified release version already exists, then State is skipped" - type: str - returned: success - sample: 1.1.0 - -latest_release: - description: Version of the latest release - type: str - returned: success - sample: 1.1.0 -''' +RETURN = r""" +tag: + description: Version of the created/latest release. + type: str + returned: success + sample: 1.1.0 +""" import traceback @@ -185,13 +180,29 @@ def main(): else: gh_obj = github3.GitHub() - # test if we're actually logged in - if password or login_token: + # GitHub's token formats: + # - ghp_ - Personal access token (classic) + # - github_pat_ - Fine-grained personal access token + # - gho_ - OAuth access token + # - ghu_ - User access token for a GitHub App + # - ghs_ - Installation access token for a GitHub App + # - ghr_ - Refresh token for a GitHub App + # + # References: + # https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/about-authentication-to-github#githubs-token-formats + # + # Test if we're actually logged in, but skip this check for some token prefixes + SKIPPED_TOKEN_PREFIXES = ['ghs_'] + if password or (login_token and not any(login_token.startswith(prefix) for prefix in SKIPPED_TOKEN_PREFIXES)): gh_obj.me() except github3.exceptions.AuthenticationFailed as e: module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e), details="Please check username and password or token " "for repository %s" % repo) + except github3.exceptions.GitHubError as e: + module.fail_json(msg='GitHub API error: %s' % to_native(e), + details="Please check username and password or token " + "for repository %s" % repo) repository = gh_obj.repository(user, repo) diff --git a/plugins/modules/source_control/github/github_repo.py b/plugins/modules/github_repo.py similarity index 68% rename from plugins/modules/source_control/github/github_repo.py rename to plugins/modules/github_repo.py index 1446e4abe9..601bea71fd 100644 --- a/plugins/modules/source_control/github/github_repo.py +++ b/plugins/modules/github_repo.py @@ -1,108 +1,109 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Álvaro Torres Cogollo -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Álvaro Torres Cogollo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: github_repo short_description: Manage your repositories on Github version_added: 2.2.0 description: -- Manages Github repositories using PyGithub library. -- Authentication can be done with I(access_token) or with I(username) and I(password). + - Manages Github repositories using PyGithub library. + - Authentication can be done with O(access_token) or with O(username) and O(password). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: username: description: - - Username used for authentication. - - This is only needed when not using I(access_token). + - Username used for authentication. + - This is only needed when not using O(access_token). type: str required: false password: description: - - Password used for authentication. - - This is only needed when not using I(access_token). + - Password used for authentication. + - This is only needed when not using O(access_token). type: str required: false access_token: description: - - Token parameter for authentication. - - This is only needed when not using I(username) and I(password). + - Token parameter for authentication. + - This is only needed when not using O(username) and O(password). type: str required: false name: description: - - Repository name. + - Repository name. type: str required: true description: description: - - Description for the repository. - - Defaults to empty if I(force_defaults=true), which is the default in this module. - - Defaults to empty if I(force_defaults=false) when creating a new repository. - - This is only used when I(state) is C(present). + - Description for the repository. + - Defaults to empty if O(force_defaults=true), which is the default in this module. + - Defaults to empty if O(force_defaults=false) when creating a new repository. + - This is only used when O(state) is V(present). type: str required: false private: description: - - Whether the repository should be private or not. - - Defaults to C(false) if I(force_defaults=true), which is the default in this module. - - Defaults to C(false) if I(force_defaults=false) when creating a new repository. - - This is only used when I(state) is C(present). + - Whether the repository should be private or not. + - Defaults to V(false) if O(force_defaults=true), which is the default in this module. + - Defaults to V(false) if O(force_defaults=false) when creating a new repository. + - This is only used when O(state=present). type: bool required: false state: description: - - Whether the repository should exist or not. + - Whether the repository should exist or not. type: str default: present - choices: [ absent, present ] + choices: [absent, present] required: false organization: description: - - Organization for the repository. - - When I(state) is C(present), the repository will be created in the current user profile. + - Organization for the repository. + - When O(state=present), the repository is created in the current user profile. type: str required: false api_url: description: - - URL to the GitHub API if not using github.com but you own instance. + - URL to the GitHub API if not using github.com but you own instance. type: str default: 'https://api.github.com' version_added: "3.5.0" force_defaults: description: - - Overwrite current I(description) and I(private) attributes with defaults if set to C(true), which currently is the default. - - The default for this option will be deprecated in a future version of this collection, and eventually change to C(false). + - If V(true), overwrite current O(description) and O(private) attributes with defaults. + - V(true) is deprecated for this option and will not be allowed starting in community.general 13.0.0. V(false) will be the default value then. type: bool - default: true required: false version_added: 4.1.0 requirements: -- PyGithub>=1.54 + - PyGithub>=1.54 notes: -- For Python 3, PyGithub>=1.54 should be used. -- "For Python 3.5, PyGithub==1.54 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-54-november-30-2020)." -- "For Python 2.7, PyGithub==1.45 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-45-december-29-2019)." -- Supports C(check_mode). + - For Python 3, PyGithub>=1.54 should be used. author: -- Álvaro Torres Cogollo (@atorrescogollo) -''' + - Álvaro Torres Cogollo (@atorrescogollo) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a Github repository community.general.github_repo: access_token: mytoken organization: MyOrganization name: myrepo description: "Just for fun" - private: yes + private: true state: present - force_defaults: no + force_defaults: false register: result - name: Delete the repository @@ -113,18 +114,17 @@ EXAMPLES = ''' name: myrepo state: absent register: result -''' +""" -RETURN = ''' +RETURN = r""" repo: description: Repository information as JSON. See U(https://docs.github.com/en/rest/reference/repos#get-a-repository). - returned: success and I(state) is C(present) + returned: success and O(state=present) type: dict -''' +""" import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -import sys GITHUB_IMP_ERR = None try: @@ -240,13 +240,13 @@ def main(): password=dict(type='str', no_log=True), access_token=dict(type='str', no_log=True), name=dict(type='str', required=True), - state=dict(type='str', required=False, default="present", + state=dict(type='str', default="present", choices=["present", "absent"]), - organization=dict(type='str', required=False, default=None), + organization=dict(type='str', ), private=dict(type='bool'), description=dict(type='str'), - api_url=dict(type='str', required=False, default='https://api.github.com'), - force_defaults=dict(type='bool', default=True), + api_url=dict(type='str', default='https://api.github.com'), + force_defaults=dict(type='bool'), ) module = AnsibleModule( argument_spec=module_args, @@ -256,6 +256,11 @@ def main(): mutually_exclusive=[('username', 'access_token')] ) + if module.params['force_defaults'] is None: + module.deprecate("'force_defaults=true' is deprecated and will not be allowed in community.general 13.0.0, use 'force_defaults=false' instead", + version="13.0.0", collection_name="community.general") + module.params['force_defaults'] = True + if not HAS_GITHUB_PACKAGE: module.fail_json(msg=missing_required_lib( "PyGithub"), exception=GITHUB_IMP_ERR) diff --git a/plugins/modules/source_control/github/github_webhook.py b/plugins/modules/github_webhook.py similarity index 75% rename from plugins/modules/source_control/github/github_webhook.py rename to plugins/modules/github_webhook.py index fcb6f8d06f..867bfc380e 100644 --- a/plugins/modules/source_control/github/github_webhook.py +++ b/plugins/modules/github_webhook.py @@ -1,39 +1,44 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: github_webhook short_description: Manage GitHub webhooks description: - - "Create and delete GitHub webhooks" + - Create and delete GitHub webhooks. requirements: - "PyGithub >= 1.3.5" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: repository: description: - - Full name of the repository to configure a hook for + - Full name of the repository to configure a hook for. type: str required: true aliases: - repo url: description: - - URL to which payloads will be delivered + - URL to which payloads are delivered. type: str required: true content_type: description: - - The media type used to serialize the payloads + - The media type used to serialize the payloads. type: str required: false - choices: [ form, json ] + choices: [form, json] default: form secret: description: @@ -42,61 +47,57 @@ options: required: false insecure_ssl: description: - - > - Flag to indicate that GitHub should skip SSL verification when calling - the hook. + - Flag to indicate that GitHub should skip SSL verification when calling the hook. required: false type: bool default: false events: description: - - > - A list of GitHub events the hook is triggered for. Events are listed at - U(https://developer.github.com/v3/activity/events/types/). Required - unless C(state) is C(absent) + - A list of GitHub events the hook is triggered for. Events are listed at U(https://developer.github.com/v3/activity/events/types/). + Required unless O(state=absent). required: false type: list elements: str active: description: - - Whether or not the hook is active + - Whether or not the hook is active. required: false type: bool default: true state: description: - - Whether the hook should be present or absent + - Whether the hook should be present or absent. type: str required: false - choices: [ absent, present ] + choices: [absent, present] default: present user: description: - - User to authenticate to GitHub as + - User to authenticate to GitHub as. type: str required: true password: description: - - Password to authenticate to GitHub with + - Password to authenticate to GitHub with. type: str required: false token: description: - - Token to authenticate to GitHub with + - Token to authenticate to GitHub with. type: str required: false github_url: description: - - Base URL of the GitHub API + - Base URL of the GitHub API. type: str required: false default: https://api.github.com author: - "Chris St. Pierre (@stpierre)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new webhook that triggers on push (password auth) community.general.github_webhook: repository: ansible/ansible @@ -112,7 +113,7 @@ EXAMPLES = ''' url: https://jenkins.example.com/ghprbhook/ content_type: json secret: "{{ github_shared_secret }}" - insecure_ssl: True + insecure_ssl: true events: - issue_comment - pull_request @@ -127,16 +128,15 @@ EXAMPLES = ''' state: absent user: "{{ github_user }}" password: "{{ github_password }}" -''' +""" -RETURN = ''' ---- +RETURN = r""" hook_id: - description: The GitHub ID of the hook created/updated + description: The GitHub ID of the hook created/updated. returned: when state is 'present' type: int sample: 6206 -''' +""" import traceback @@ -153,13 +153,18 @@ from ansible.module_utils.common.text.converters import to_native def _create_hook_config(module): - return { + hook_config = { "url": module.params["url"], "content_type": module.params["content_type"], - "secret": module.params.get("secret"), "insecure_ssl": "1" if module.params["insecure_ssl"] else "0" } + secret = module.params.get("secret") + if secret: + hook_config["secret"] = secret + + return hook_config + def create_hook(repo, module): config = _create_hook_config(module) @@ -201,25 +206,16 @@ def main(): argument_spec=dict( repository=dict(type='str', required=True, aliases=['repo']), url=dict(type='str', required=True), - content_type=dict( - type='str', - choices=('json', 'form'), - required=False, - default='form'), - secret=dict(type='str', required=False, no_log=True), - insecure_ssl=dict(type='bool', required=False, default=False), - events=dict(type='list', elements='str', required=False), - active=dict(type='bool', required=False, default=True), - state=dict( - type='str', - required=False, - choices=('absent', 'present'), - default='present'), + content_type=dict(type='str', choices=('json', 'form'), default='form'), + secret=dict(type='str', no_log=True), + insecure_ssl=dict(type='bool', default=False), + events=dict(type='list', elements='str', ), + active=dict(type='bool', default=True), + state=dict(type='str', choices=('absent', 'present'), default='present'), user=dict(type='str', required=True), - password=dict(type='str', required=False, no_log=True), - token=dict(type='str', required=False, no_log=True), - github_url=dict( - type='str', required=False, default="https://api.github.com")), + password=dict(type='str', no_log=True), + token=dict(type='str', no_log=True), + github_url=dict(type='str', default="https://api.github.com")), mutually_exclusive=(('password', 'token'),), required_one_of=(("password", "token"),), required_if=(("state", "present", ("events",)),), diff --git a/plugins/modules/source_control/github/github_webhook_info.py b/plugins/modules/github_webhook_info.py similarity index 75% rename from plugins/modules/source_control/github/github_webhook_info.py rename to plugins/modules/github_webhook_info.py index 98a7516e75..30b3e719f3 100644 --- a/plugins/modules/source_control/github/github_webhook_info.py +++ b/plugins/modules/github_webhook_info.py @@ -1,56 +1,56 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: github_webhook_info short_description: Query information about GitHub webhooks description: - - "Query information about GitHub webhooks" - - This module was called C(github_webhook_facts) before Ansible 2.9. The usage did not change. + - Query information about GitHub webhooks. requirements: - "PyGithub >= 1.3.5" +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module options: repository: description: - - Full name of the repository to configure a hook for + - Full name of the repository to configure a hook for. type: str required: true aliases: - repo user: description: - - User to authenticate to GitHub as + - User to authenticate to GitHub as. type: str required: true password: description: - - Password to authenticate to GitHub with + - Password to authenticate to GitHub with. type: str required: false token: description: - - Token to authenticate to GitHub with + - Token to authenticate to GitHub with. type: str required: false github_url: description: - - Base URL of the github api + - Base URL of the GitHub API. type: str required: false default: https://api.github.com author: - "Chris St. Pierre (@stpierre)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: List hooks for a repository (password auth) community.general.github_webhook_info: repository: ansible/ansible @@ -65,24 +65,27 @@ EXAMPLES = ''' token: "{{ github_user_api_token }}" github_url: https://github.example.com/api/v3/ register: myrepo_webhooks -''' +""" -RETURN = ''' ---- +RETURN = r""" hooks: - description: A list of hooks that exist for the repo + description: A list of hooks that exist for the repo. returned: always type: list - sample: > - [{"has_shared_secret": true, - "url": "https://jenkins.example.com/ghprbhook/", - "events": ["issue_comment", "pull_request"], - "insecure_ssl": "1", - "content_type": "json", - "active": true, - "id": 6206, - "last_response": {"status": "active", "message": "OK", "code": 200}}] -''' + elements: dict + sample: + - has_shared_secret: true + url: https://jenkins.example.com/ghprbhook/ + events: [issue_comment, pull_request] + insecure_ssl: "1" + content_type: json + active: true + id: 6206 + last_response: + status: active + message: OK + code: 200 +""" import traceback @@ -119,10 +122,10 @@ def main(): argument_spec=dict( repository=dict(type='str', required=True, aliases=["repo"]), user=dict(type='str', required=True), - password=dict(type='str', required=False, no_log=True), - token=dict(type='str', required=False, no_log=True), + password=dict(type='str', no_log=True), + token=dict(type='str', no_log=True), github_url=dict( - type='str', required=False, default="https://api.github.com")), + type='str', default="https://api.github.com")), mutually_exclusive=(('password', 'token'), ), required_one_of=(("password", "token"), ), supports_check_mode=True) diff --git a/plugins/modules/source_control/gitlab/gitlab_branch.py b/plugins/modules/gitlab_branch.py similarity index 84% rename from plugins/modules/source_control/gitlab/gitlab_branch.py rename to plugins/modules/gitlab_branch.py index 8707e6453a..514300a924 100644 --- a/plugins/modules/source_control/gitlab/gitlab_branch.py +++ b/plugins/modules/gitlab_branch.py @@ -1,12 +1,11 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_branch short_description: Create or delete a branch version_added: 4.2.0 @@ -15,11 +14,17 @@ description: author: - paytroff (@paytroff) requirements: - - python >= 2.7 - python-gitlab >= 2.3.0 extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: none + diff_mode: + support: none options: state: @@ -41,12 +46,12 @@ options: ref_branch: description: - Reference branch to create from. - - This must be specified if I(state=present). + - This must be specified if O(state=present). type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create branch branch2 from main community.general.gitlab_branch: api_url: https://gitlab.com @@ -63,28 +68,20 @@ EXAMPLES = ''' project: "group1/project1" branch: branch2 state: absent +""" -''' - -RETURN = ''' -''' +RETURN = r""" +""" import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, gitlab_authentication +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab +) class GitlabBranch(object): @@ -119,7 +116,7 @@ def main(): argument_spec.update( project=dict(type='str', required=True), branch=dict(type='str', required=True), - ref_branch=dict(type='str', required=False), + ref_branch=dict(type='str'), state=dict(type='str', default="present", choices=["absent", "present"]), ) @@ -144,20 +141,19 @@ def main(): supports_check_mode=False ) + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + project = module.params['project'] branch = module.params['branch'] ref_branch = module.params['ref_branch'] state = module.params['state'] - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - gitlab_version = gitlab.__version__ if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) - gitlab_instance = gitlab_authentication(module) this_gitlab = GitlabBranch(module=module, project=project, gitlab_instance=gitlab_instance) this_branch = this_gitlab.get_branch(branch) diff --git a/plugins/modules/source_control/gitlab/gitlab_deploy_key.py b/plugins/modules/gitlab_deploy_key.py similarity index 81% rename from plugins/modules/source_control/gitlab/gitlab_deploy_key.py rename to plugins/modules/gitlab_deploy_key.py index 5746186ca5..9252341863 100644 --- a/plugins/modules/source_control/gitlab/gitlab_deploy_key.py +++ b/plugins/modules/gitlab_deploy_key.py @@ -1,34 +1,39 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2018, Marcus Watkins +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Marcus Watkins # Based on code: -# Copyright: (c) 2013, Phillip Gentry -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, Phillip Gentry +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_deploy_key -short_description: Manages GitLab project deploy keys. +short_description: Manages GitLab project deploy keys description: - - Adds, updates and removes project deploy keys + - Adds, updates and removes project deploy keys. author: - Marcus Watkins (@marwatk) - Guillaume Martinez (@Lunik) requirements: - - python >= 2.7 - python-gitlab python module extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: project: description: - - Id or Full path of project in the form of group/name. + - ID or Full path of project in the form of group/name. required: true type: str title: @@ -38,24 +43,24 @@ options: type: str key: description: - - Deploy key + - Deploy key. required: true type: str can_push: description: - Whether this key can push to the project. type: bool - default: no + default: false state: description: - - When C(present) the deploy key added to the project if it doesn't exist. - - When C(absent) it will be removed from the project if it exists. + - When V(present) the deploy key is added to the project if it does not exist. + - When V(absent) it is removed from the project if it exists. default: present type: str - choices: [ "present", "absent" ] -''' + choices: ["present", "absent"] +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Adding a project deploy key" community.general.gitlab_deploy_key: api_url: https://gitlab.example.com/ @@ -72,7 +77,7 @@ EXAMPLES = ''' project: "my_group/my_project" title: "Jenkins CI" state: present - can_push: yes + can_push: true - name: "Remove the previous deploy key from the project" community.general.gitlab_deploy_key: @@ -81,49 +86,39 @@ EXAMPLES = ''' project: "my_group/my_project" state: absent key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..." +""" -''' - -RETURN = ''' +RETURN = r""" msg: - description: Success or failure message + description: Success or failure message. returned: always type: str sample: "Success" result: - description: json parsed response from the server + description: JSON-parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: key is already in use" deploy_key: - description: API object + description: API object. returned: always type: dict -''' - -import re -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False +""" from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, find_project, gitlab_authentication +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_project, gitlab_authentication, gitlab, list_all_kwargs +) class GitLabDeployKey(object): @@ -159,6 +154,7 @@ class GitLabDeployKey(object): changed = True else: changed, deploy_key = self.update_deploy_key(self.deploy_key_object, { + 'title': key_title, 'can_push': options['can_push']}) self.deploy_key_object = deploy_key @@ -197,9 +193,9 @@ class GitLabDeployKey(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(deploy_key, arg_key) != arguments[arg_key]: - setattr(deploy_key, arg_key, arguments[arg_key]) + if arg_value is not None: + if getattr(deploy_key, arg_key) != arg_value: + setattr(deploy_key, arg_key, arg_value) changed = True return (changed, deploy_key) @@ -209,9 +205,8 @@ class GitLabDeployKey(object): @param key_title Title of the key ''' def find_deploy_key(self, project, key_title): - deploy_keys = project.keys.list(all=True) - for deploy_key in deploy_keys: - if (deploy_key.title == key_title): + for deploy_key in project.keys.list(**list_all_kwargs): + if deploy_key.title == key_title: return deploy_key ''' @@ -262,17 +257,15 @@ def main(): supports_check_mode=True, ) + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + state = module.params['state'] project_identifier = module.params['project'] key_title = module.params['title'] key_keyfile = module.params['key'] key_can_push = module.params['can_push'] - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlab_authentication(module) - gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance) project = find_project(gitlab_instance, project_identifier) diff --git a/plugins/modules/source_control/gitlab/gitlab_group.py b/plugins/modules/gitlab_group.py similarity index 51% rename from plugins/modules/source_control/gitlab/gitlab_group.py rename to plugins/modules/gitlab_group.py index 1c4a0c9b27..6356ce2e2c 100644 --- a/plugins/modules/source_control/gitlab/gitlab_group.py +++ b/plugins/modules/gitlab_group.py @@ -1,106 +1,192 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gitlab_group short_description: Creates/updates/deletes GitLab Groups description: - - When the group does not exist in GitLab, it will be created. - - When the group does exist and state=absent, the group will be deleted. + - When the group does not exist in GitLab, it is created. + - When the group does exist and O(state=absent), the group is deleted. author: - Werner Dijkerman (@dj-wasabi) - Guillaume Martinez (@Lunik) requirements: - - python >= 2.7 - python-gitlab python module extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the group you want to create. - required: true - type: str - path: - description: - - The path of the group you want to create, this will be api_url/group_path - - If not supplied, the group_name will be used. - type: str - description: - description: - - A description for the group. - type: str - state: - description: - - create or delete group. - - Possible values are present and absent. - default: present - type: str - choices: ["present", "absent"] - parent: - description: - - Allow to create subgroups - - Id or Full path of parent group in the form of group/name - type: str - visibility: - description: - - Default visibility of the group - choices: ["private", "internal", "public"] - default: private - type: str - project_creation_level: - description: - - Determine if developers can create projects in the group. - choices: ["developer", "maintainer", "noone"] - type: str - version_added: 3.7.0 auto_devops_enabled: description: - Default to Auto DevOps pipeline for all projects within this group. type: bool version_added: 3.7.0 - subgroup_creation_level: - description: - - Allowed to create subgroups. - choices: ["maintainer", "owner"] - type: str - version_added: 3.7.0 - require_two_factor_authentication: - description: - - Require all users in this group to setup two-factor authentication. - type: bool - version_added: 3.7.0 avatar_path: description: - Absolute path image to configure avatar. File size should not exceed 200 kb. - This option is only used on creation, not for updates. type: path version_added: 4.2.0 -''' + default_branch: + description: + - All merge requests and commits are made against this branch unless you specify a different one. + type: str + version_added: 9.5.0 + description: + description: + - A description for the group. + type: str + enabled_git_access_protocol: + description: + - V(all) means SSH and HTTP(S) is enabled. + - V(ssh) means only SSH is enabled. + - V(http) means only HTTP(S) is enabled. + - Only available for top level groups. + choices: ["all", "ssh", "http"] + type: str + version_added: 9.5.0 + force_delete: + description: + - Force delete group even if projects in it. + - Used only when O(state=absent). + type: bool + default: false + version_added: 7.5.0 + lfs_enabled: + description: + - Projects in this group can use Git LFS. + type: bool + version_added: 9.5.0 + lock_duo_features_enabled: + description: + - Enforce GitLab Duo features for all subgroups. + - Only available for top level groups. + type: bool + version_added: 9.5.0 + membership_lock: + description: + - Users cannot be added to projects in this group. + type: bool + version_added: 9.5.0 + mentions_disabled: + description: + - Group mentions are disabled. + type: bool + version_added: 9.5.0 + name: + description: + - Name of the group you want to create. + required: true + type: str + parent: + description: + - Allow to create subgroups. + - ID or Full path of parent group in the form of group/name. + type: str + path: + description: + - The path of the group you want to create, this is O(api_url)/O(path). + - If not supplied, O(name) is used. + type: str + prevent_forking_outside_group: + description: + - Prevent forking outside of the group. + type: bool + version_added: 9.5.0 + prevent_sharing_groups_outside_hierarchy: + description: + - Members cannot invite groups outside of this group and its subgroups. + - Only available for top level groups. + type: bool + version_added: 9.5.0 + project_creation_level: + description: + - Determine if developers can create projects in the group. + choices: ["developer", "maintainer", "noone"] + type: str + version_added: 3.7.0 + request_access_enabled: + description: + - Users can request access (if visibility is public or internal). + type: bool + version_added: 9.5.0 + service_access_tokens_expiration_enforced: + description: + - Service account token expiration. + - Changes do not affect existing token expiration dates. + - Only available for top level groups. + type: bool + version_added: 9.5.0 + share_with_group_lock: + description: + - Projects cannot be shared with other groups. + type: bool + version_added: 9.5.0 + require_two_factor_authentication: + description: + - Require all users in this group to setup two-factor authentication. + type: bool + version_added: 3.7.0 + state: + description: + - Create or delete group. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] + subgroup_creation_level: + description: + - Allowed to create subgroups. + choices: ["maintainer", "owner"] + type: str + version_added: 3.7.0 + two_factor_grace_period: + description: + - Delay 2FA enforcement (hours). + type: str + version_added: 9.5.0 + visibility: + description: + - Default visibility of the group. + choices: ["private", "internal", "public"] + default: private + type: str + wiki_access_level: + description: + - V(enabled) means everyone can access the wiki. + - V(private) means only members of this group can access the wiki. + - V(disabled) means group-level wiki is disabled. + choices: ["enabled", "private", "disabled"] + type: str + version_added: 9.5.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Delete GitLab Group" community.general.gitlab_group: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" - validate_certs: False name: my_first_group state: absent - name: "Create GitLab Group" community.general.gitlab_group: api_url: https://gitlab.example.com/ - validate_certs: True + validate_certs: true api_username: dj-wasabi api_password: "MySecretPassword" name: my_first_group @@ -111,7 +197,7 @@ EXAMPLES = ''' - name: "Create GitLab SubGroup" community.general.gitlab_group: api_url: https://gitlab.example.com/ - validate_certs: True + validate_certs: true api_username: dj-wasabi api_password: "MySecretPassword" name: my_first_group @@ -123,7 +209,7 @@ EXAMPLES = ''' - name: "Create GitLab Group for SubGroups only" community.general.gitlab_group: api_url: https://gitlab.example.com/ - validate_certs: True + validate_certs: true api_username: dj-wasabi api_password: "MySecretPassword" name: my_main_group @@ -132,47 +218,39 @@ EXAMPLES = ''' project_creation_level: noone auto_devops_enabled: false subgroup_creation_level: maintainer -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: Success or failure message + description: Success or failure message. returned: always type: str sample: "Success" result: - description: json parsed response from the server + description: JSON-parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: path is already in use" group: - description: API object + description: API object. returned: always type: dict -''' - -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False +""" from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, find_group, gitlab_authentication +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_group, gitlab_authentication, gitlab +) class GitLabGroup(object): @@ -197,23 +275,38 @@ class GitLabGroup(object): def create_or_update_group(self, name, parent, options): changed = False + payload = { + 'auto_devops_enabled': options['auto_devops_enabled'], + 'default_branch': options['default_branch'], + 'description': options['description'], + 'lfs_enabled': options['lfs_enabled'], + 'membership_lock': options['membership_lock'], + 'mentions_disabled': options['mentions_disabled'], + 'name': name, + 'path': options['path'], + 'prevent_forking_outside_group': options['prevent_forking_outside_group'], + 'project_creation_level': options['project_creation_level'], + 'request_access_enabled': options['request_access_enabled'], + 'require_two_factor_authentication': options['require_two_factor_authentication'], + 'share_with_group_lock': options['share_with_group_lock'], + 'subgroup_creation_level': options['subgroup_creation_level'], + 'visibility': options['visibility'], + 'wiki_access_level': options['wiki_access_level'], + } + if options.get('enabled_git_access_protocol') and parent is None: + payload['enabled_git_access_protocol'] = options['enabled_git_access_protocol'] + if options.get('lock_duo_features_enabled') and parent is None: + payload['lock_duo_features_enabled'] = options['lock_duo_features_enabled'] + if options.get('prevent_sharing_groups_outside_hierarchy') and parent is None: + payload['prevent_sharing_groups_outside_hierarchy'] = options['prevent_sharing_groups_outside_hierarchy'] + if options.get('service_access_tokens_expiration_enforced') and parent is None: + payload['service_access_tokens_expiration_enforced'] = options['service_access_tokens_expiration_enforced'] + if options.get('two_factor_grace_period'): + payload['two_factor_grace_period'] = int(options['two_factor_grace_period']) + # Because we have already call userExists in main() if self.group_object is None: - parent_id = self.get_group_id(parent) - - payload = { - 'name': name, - 'path': options['path'], - 'parent_id': parent_id, - 'visibility': options['visibility'], - 'project_creation_level': options['project_creation_level'], - 'auto_devops_enabled': options['auto_devops_enabled'], - 'subgroup_creation_level': options['subgroup_creation_level'], - } - if options.get('description'): - payload['description'] = options['description'] - if options.get('require_two_factor_authentication'): - payload['require_two_factor_authentication'] = options['require_two_factor_authentication'] + payload['parent_id'] = self.get_group_id(parent) group = self.create_group(payload) # add avatar to group @@ -224,15 +317,7 @@ class GitLabGroup(object): self._module.fail_json(msg='Cannot open {0}: {1}'.format(options['avatar_path'], e)) changed = True else: - changed, group = self.update_group(self.group_object, { - 'name': name, - 'description': options['description'], - 'visibility': options['visibility'], - 'project_creation_level': options['project_creation_level'], - 'auto_devops_enabled': options['auto_devops_enabled'], - 'subgroup_creation_level': options['subgroup_creation_level'], - 'require_two_factor_authentication': options['require_two_factor_authentication'], - }) + changed, group = self.update_group(self.group_object, payload) self.group_object = group if changed: @@ -255,7 +340,10 @@ class GitLabGroup(object): return True try: - group = self._gitlab.groups.create(arguments) + # Filter out None values + filtered = {arg_key: arg_value for arg_key, arg_value in arguments.items() if arg_value is not None} + + group = self._gitlab.groups.create(filtered) except (gitlab.exceptions.GitlabCreateError) as e: self._module.fail_json(msg="Failed to create group: %s " % to_native(e)) @@ -269,19 +357,25 @@ class GitLabGroup(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(group, arg_key) != arguments[arg_key]: - setattr(group, arg_key, arguments[arg_key]) + if arg_value is not None: + if getattr(group, arg_key) != arg_value: + setattr(group, arg_key, arg_value) changed = True return (changed, group) - def delete_group(self): + ''' + @param force To delete even if projects inside + ''' + def delete_group(self, force=False): group = self.group_object - if len(group.projects.list(all=False)) >= 1: + if not force and len(group.projects.list(all=False)) >= 1: self._module.fail_json( - msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.") + msg=("There are still projects in this group. " + "These needs to be moved or deleted before this group can be removed. " + "Use 'force_delete' to 'true' to force deletion of existing projects.") + ) else: if self._module.check_mode: return True @@ -292,7 +386,7 @@ class GitLabGroup(object): self._module.fail_json(msg="Failed to delete group: %s " % to_native(e)) ''' - @param name Name of the groupe + @param name Name of the group @param full_path Complete path of the Group including parent group path. / ''' def exists_group(self, project_identifier): @@ -308,27 +402,41 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update(dict( - name=dict(type='str', required=True), - path=dict(type='str'), - description=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - parent=dict(type='str'), - visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), - project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), auto_devops_enabled=dict(type='bool'), - subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), - require_two_factor_authentication=dict(type='bool'), avatar_path=dict(type='path'), + default_branch=dict(type='str'), + description=dict(type='str'), + enabled_git_access_protocol=dict(type='str', choices=['all', 'ssh', 'http']), + force_delete=dict(type='bool', default=False), + lfs_enabled=dict(type='bool'), + lock_duo_features_enabled=dict(type='bool'), + membership_lock=dict(type='bool'), + mentions_disabled=dict(type='bool'), + name=dict(type='str', required=True), + parent=dict(type='str'), + path=dict(type='str'), + prevent_forking_outside_group=dict(type='bool'), + prevent_sharing_groups_outside_hierarchy=dict(type='bool'), + project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), + request_access_enabled=dict(type='bool'), + require_two_factor_authentication=dict(type='bool'), + service_access_tokens_expiration_enforced=dict(type='bool'), + share_with_group_lock=dict(type='bool'), + state=dict(type='str', default="present", choices=["absent", "present"]), + subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), + two_factor_grace_period=dict(type='str'), + visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), + wiki_access_level=dict(type='str', choices=['enabled', 'private', 'disabled']), )) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], ['api_token', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_token'], ], required_together=[ ['api_username', 'api_password'], @@ -339,22 +447,34 @@ def main(): supports_check_mode=True, ) + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + auto_devops_enabled = module.params['auto_devops_enabled'] + avatar_path = module.params['avatar_path'] + default_branch = module.params['default_branch'] + description = module.params['description'] + enabled_git_access_protocol = module.params['enabled_git_access_protocol'] + force_delete = module.params['force_delete'] group_name = module.params['name'] group_path = module.params['path'] - description = module.params['description'] - state = module.params['state'] - parent_identifier = module.params['parent'] group_visibility = module.params['visibility'] + lfs_enabled = module.params['lfs_enabled'] + lock_duo_features_enabled = module.params['lock_duo_features_enabled'] + membership_lock = module.params['membership_lock'] + mentions_disabled = module.params['mentions_disabled'] + parent_identifier = module.params['parent'] + prevent_forking_outside_group = module.params['prevent_forking_outside_group'] + prevent_sharing_groups_outside_hierarchy = module.params['prevent_sharing_groups_outside_hierarchy'] project_creation_level = module.params['project_creation_level'] - auto_devops_enabled = module.params['auto_devops_enabled'] - subgroup_creation_level = module.params['subgroup_creation_level'] + request_access_enabled = module.params['request_access_enabled'] require_two_factor_authentication = module.params['require_two_factor_authentication'] - avatar_path = module.params['avatar_path'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlab_authentication(module) + service_access_tokens_expiration_enforced = module.params['service_access_tokens_expiration_enforced'] + share_with_group_lock = module.params['share_with_group_lock'] + state = module.params['state'] + subgroup_creation_level = module.params['subgroup_creation_level'] + two_factor_grace_period = module.params['two_factor_grace_period'] + wiki_access_level = module.params['wiki_access_level'] # Define default group_path based on group_name if group_path is None: @@ -366,7 +486,7 @@ def main(): if parent_identifier: parent_group = find_group(gitlab_instance, parent_identifier) if not parent_group: - module.fail_json(msg="Failed create GitLab group: Parent group doesn't exists") + module.fail_json(msg="Failed to create GitLab group: Parent group doesn't exist") group_exists = gitlab_group.exists_group(parent_group.full_path + '/' + group_path) else: @@ -374,21 +494,34 @@ def main(): if state == 'absent': if group_exists: - gitlab_group.delete_group() + gitlab_group.delete_group(force=force_delete) module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name) else: - module.exit_json(changed=False, msg="Group deleted or does not exists") + module.exit_json(changed=False, msg="Group deleted or does not exist") if state == 'present': if gitlab_group.create_or_update_group(group_name, parent_group, { - "path": group_path, - "description": description, - "visibility": group_visibility, - "project_creation_level": project_creation_level, "auto_devops_enabled": auto_devops_enabled, - "subgroup_creation_level": subgroup_creation_level, - "require_two_factor_authentication": require_two_factor_authentication, "avatar_path": avatar_path, + "default_branch": default_branch, + "description": description, + "enabled_git_access_protocol": enabled_git_access_protocol, + "lfs_enabled": lfs_enabled, + "lock_duo_features_enabled": lock_duo_features_enabled, + "membership_lock": membership_lock, + "mentions_disabled": mentions_disabled, + "path": group_path, + "prevent_forking_outside_group": prevent_forking_outside_group, + "prevent_sharing_groups_outside_hierarchy": prevent_sharing_groups_outside_hierarchy, + "project_creation_level": project_creation_level, + "request_access_enabled": request_access_enabled, + "require_two_factor_authentication": require_two_factor_authentication, + "service_access_tokens_expiration_enforced": service_access_tokens_expiration_enforced, + "share_with_group_lock": share_with_group_lock, + "subgroup_creation_level": subgroup_creation_level, + "two_factor_grace_period": two_factor_grace_period, + "visibility": group_visibility, + "wiki_access_level": wiki_access_level, }): module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.group_object._attrs) else: diff --git a/plugins/modules/gitlab_group_access_token.py b/plugins/modules/gitlab_group_access_token.py new file mode 100644 index 0000000000..59afc74bea --- /dev/null +++ b/plugins/modules/gitlab_group_access_token.py @@ -0,0 +1,339 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Zoran Krleza (zoran.krleza@true-north.hr) +# Based on code: +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Marcus Watkins +# Copyright (c) 2013, Phillip Gentry +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_group_access_token +short_description: Manages GitLab group access tokens +version_added: 8.4.0 +description: + - Creates and revokes group access tokens. +author: + - Zoran Krleza (@pixslx) +requirements: + - python-gitlab >= 3.1.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes +notes: + - Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. Whether tokens + are recreated or not is controlled by the O(recreate) option, which defaults to V(never). + - Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards. + - Token matching is done by comparing O(name) option. +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + group: + description: + - ID or full path of group in the form of group/subgroup. + required: true + type: str + name: + description: + - Access token's name. + required: true + type: str + scopes: + description: + - Scope of the access token. + - The values V(read_virtual_registry), V(write_virtual_registry), V(manage_runner), and V(self_rotate) were added in community.general 11.3.0. + required: true + type: list + elements: str + aliases: ["scope"] + choices: + - api + - read_api + - read_registry + - write_registry + - read_virtual_registry + - write_virtual_registry + - read_repository + - write_repository + - create_runner + - manage_runner + - ai_features + - k8s_proxy + - self_rotate + access_level: + description: + - Access level of the access token. + - The value V(planner) was added in community.general 11.3.0. + type: str + default: maintainer + choices: ["guest", "planner", "reporter", "developer", "maintainer", "owner"] + expires_at: + description: + - Expiration date of the access token in C(YYYY-MM-DD) format. + - Make sure to quote this value in YAML to ensure it is kept as a string and not interpreted as a YAML date. + type: str + required: true + recreate: + description: + - Whether the access token is recreated if it already exists. + - When V(never) the token is never recreated. + - When V(always) the token is always recreated. + - When V(state_change) the token is recreated if there is a difference between desired state and actual state. + type: str + choices: ["never", "always", "state_change"] + default: never + state: + description: + - When V(present) the access token is added to the group if it does not exist. + - When V(absent) it is removed from the group if it exists. + default: present + type: str + choices: ["present", "absent"] +""" + +EXAMPLES = r""" +- name: "Creating a group access token" + community.general.gitlab_group_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + group: "my_group/my_subgroup" + name: "group_token" + expires_at: "2024-12-31" + access_level: developer + scopes: + - api + - read_api + - read_repository + - write_repository + state: present + +- name: "Revoking a group access token" + community.general.gitlab_group_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + group: "my_group/my_group" + name: "group_token" + expires_at: "2024-12-31" + scopes: + - api + - read_api + - read_repository + - write_repository + state: absent + +- name: "Change (recreate) existing token if its actual state is different than desired state" + community.general.gitlab_group_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + group: "my_group/my_group" + name: "group_token" + expires_at: "2024-12-31" + scopes: + - api + - read_api + - read_repository + - write_repository + recreate: state_change + state: present +""" + +RETURN = r""" +access_token: + description: + - API object. + - Only contains the value of the token if the token was created or recreated. + returned: success and O(state=present) + type: dict +""" + +from datetime import datetime + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_group, gitlab_authentication, gitlab +) + +ACCESS_LEVELS = dict(guest=10, planner=15, reporter=20, developer=30, maintainer=40, owner=50) + + +class GitLabGroupAccessToken(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.access_token_object = None + + ''' + @param project Project Object + @param group Group Object + @param arguments Attributes of the access_token + ''' + def create_access_token(self, group, arguments): + changed = False + if self._module.check_mode: + return True + + try: + self.access_token_object = group.access_tokens.create(arguments) + changed = True + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create access token: %s " % to_native(e)) + + return changed + + ''' + @param project Project object + @param group Group Object + @param name of the access token + ''' + def find_access_token(self, group, name): + access_tokens = [x for x in group.access_tokens.list(all=True) if not getattr(x, 'revoked', False)] + for access_token in access_tokens: + if access_token.name == name: + self.access_token_object = access_token + return False + return False + + def revoke_access_token(self): + if self._module.check_mode: + return True + + changed = False + try: + self.access_token_object.delete() + changed = True + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to revoke access token: %s " % to_native(e)) + + return changed + + def access_tokens_equal(self): + if self.access_token_object.name != self._module.params['name']: + return False + if self.access_token_object.scopes != self._module.params['scopes']: + return False + if self.access_token_object.access_level != ACCESS_LEVELS[self._module.params['access_level']]: + return False + if self.access_token_object.expires_at != self._module.params['expires_at']: + return False + return True + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default="present", choices=["absent", "present"]), + group=dict(type='str', required=True), + name=dict(type='str', required=True), + scopes=dict(type='list', + required=True, + aliases=['scope'], + elements='str', + choices=['api', + 'read_api', + 'read_registry', + 'write_registry', + 'read_virtual_registry', + 'write_virtual_registry', + 'read_repository', + 'write_repository', + 'create_runner', + 'manage_runner', + 'ai_features', + 'k8s_proxy', + 'self_rotate']), + access_level=dict(type='str', default='maintainer', choices=['guest', 'planner', 'reporter', 'developer', 'maintainer', 'owner']), + expires_at=dict(type='str', required=True), + recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change']) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'] + ], + required_together=[ + ['api_username', 'api_password'] + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + + state = module.params['state'] + group_identifier = module.params['group'] + name = module.params['name'] + scopes = module.params['scopes'] + access_level_str = module.params['access_level'] + expires_at = module.params['expires_at'] + recreate = module.params['recreate'] + + access_level = ACCESS_LEVELS[access_level_str] + + try: + datetime.strptime(expires_at, '%Y-%m-%d') + except ValueError: + module.fail_json(msg="Argument expires_at is not in required format YYYY-MM-DD") + + gitlab_instance = gitlab_authentication(module) + + gitlab_access_token = GitLabGroupAccessToken(module, gitlab_instance) + + group = find_group(gitlab_instance, group_identifier) + if group is None: + module.fail_json(msg="Failed to create access token: group %s does not exists" % group_identifier) + + gitlab_access_token_exists = False + gitlab_access_token.find_access_token(group, name) + if gitlab_access_token.access_token_object is not None: + gitlab_access_token_exists = True + + if state == 'absent': + if gitlab_access_token_exists: + gitlab_access_token.revoke_access_token() + module.exit_json(changed=True, msg="Successfully deleted access token %s" % name) + else: + module.exit_json(changed=False, msg="Access token does not exists") + + if state == 'present': + if gitlab_access_token_exists: + if gitlab_access_token.access_tokens_equal(): + if recreate == 'always': + gitlab_access_token.revoke_access_token() + gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + else: + module.exit_json(changed=False, msg="Access token already exists", access_token=gitlab_access_token.access_token_object._attrs) + else: + if recreate == 'never': + module.fail_json(msg="Access token already exists and its state is different. It can not be updated without recreating.") + else: + gitlab_access_token.revoke_access_token() + gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + else: + gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + if module.check_mode: + module.exit_json(changed=True, msg="Successfully created access token", access_token={}) + else: + module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/gitlab/gitlab_group_members.py b/plugins/modules/gitlab_group_members.py similarity index 87% rename from plugins/modules/source_control/gitlab/gitlab_group_members.py rename to plugins/modules/gitlab_group_members.py index 31f835dd08..b101cb4e43 100644 --- a/plugins/modules/source_control/gitlab/gitlab_group_members.py +++ b/plugins/modules/gitlab_group_members.py @@ -1,14 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Zainab Alsaffar -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, Zainab Alsaffar +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: gitlab_group_members short_description: Manage group members on GitLab Server description: @@ -21,33 +19,40 @@ requirements: extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: gitlab_group: description: - The C(full_path) of the GitLab group the member is added to/removed from. - - Setting this to C(name) or C(path) is deprecated and will be removed in community.general 6.0.0. Use C(full_path) instead. + - Setting this to C(name) or C(path) has been disallowed since community.general 6.0.0. Use C(full_path) instead. required: true type: str gitlab_user: description: - A username or a list of usernames to add to/remove from the GitLab group. - - Mutually exclusive with I(gitlab_users_access). + - Mutually exclusive with O(gitlab_users_access). type: list elements: str access_level: description: - The access level for the user. - - Required if I(state=present), user state is set to present. - - Mutually exclusive with I(gitlab_users_access). + - Required if O(state=present), user state is set to present. + - Mutually exclusive with O(gitlab_users_access). type: str choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] gitlab_users_access: description: - Provide a list of user to access level mappings. - Every dictionary in this list specifies a user (by username) and the access level the user should have. - - Mutually exclusive with I(gitlab_user) and I(access_level). - - Use together with I(purge_users) to remove all users not specified here from the group. + - Mutually exclusive with O(gitlab_user) and O(access_level). + - Use together with O(purge_users) to remove all users not specified here from the group. type: list elements: dict suboptions: @@ -58,7 +63,7 @@ options: access_level: description: - The access level for the user. - - Required if I(state=present), user state is set to present. + - Required if O(state=present), user state is set to present. type: str choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] required: true @@ -66,25 +71,23 @@ options: state: description: - State of the member in the group. - - On C(present), it adds a user to a GitLab group. - - On C(absent), it removes a user from a GitLab group. + - On V(present), it adds a user to a GitLab group. + - On V(absent), it removes a user from a GitLab group. choices: ['present', 'absent'] default: 'present' type: str purge_users: description: - - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list. - If omitted do not purge orphaned members. - - Is only used when I(state=present). + - Adds/remove users of the given access_level to match the given O(gitlab_user)/O(gitlab_users_access) list. If omitted + do not purge orphaned members. + - Is only used when O(state=present). type: list elements: str choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] version_added: 3.6.0 -notes: - - Supports C(check_mode). -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add a user to a GitLab Group community.general.gitlab_group_members: api_url: 'https://gitlab.example.com' @@ -146,23 +149,16 @@ EXAMPLES = r''' - name: user2 access_level: maintainer state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, gitlab_authentication - -import traceback - -try: - import gitlab - HAS_PY_GITLAB = True -except ImportError: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_PY_GITLAB = False +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab, list_all_kwargs +) class GitLabGroup(object): @@ -172,22 +168,20 @@ class GitLabGroup(object): # get user id if the user exists def get_user_id(self, gitlab_user): - user_exists = self._gitlab.users.list(username=gitlab_user, all=True) - if user_exists: - return user_exists[0].id + return next( + (u.id for u in self._gitlab.users.list(username=gitlab_user, **list_all_kwargs)), + None + ) # get group id if group exists def get_group_id(self, gitlab_group): - groups = self._gitlab.groups.list(search=gitlab_group, all=True) - for group in groups: - if group.full_path == gitlab_group: - return group.id - for group in groups: - if group.path == gitlab_group or group.name == gitlab_group: - self._module.deprecate( - msg="Setting 'gitlab_group' to 'name' or 'path' is deprecated. Use 'full_path' instead", - version="6.0.0", collection_name="community.general") - return group.id + return next( + ( + g.id for g in self._gitlab.groups.list(search=gitlab_group, **list_all_kwargs) + if g.full_path == gitlab_group + ), + None + ) # get all members in a group def get_members_in_a_group(self, gitlab_group_id): @@ -281,15 +275,15 @@ def main(): supports_check_mode=True, ) - if not HAS_PY_GITLAB: - module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR) + # check prerequisites and connect to gitlab server + gl = gitlab_authentication(module) access_level_int = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS, - 'owner': gitlab.OWNER_ACCESS, + 'guest': gitlab.const.GUEST_ACCESS, + 'reporter': gitlab.const.REPORTER_ACCESS, + 'developer': gitlab.const.DEVELOPER_ACCESS, + 'maintainer': gitlab.const.MAINTAINER_ACCESS, + 'owner': gitlab.const.OWNER_ACCESS, } gitlab_group = module.params['gitlab_group'] @@ -300,9 +294,6 @@ def main(): if purge_users: purge_users = [access_level_int[level] for level in purge_users] - # connect to gitlab server - gl = gitlab_authentication(module) - group = GitLabGroup(module, gl) gitlab_group_id = group.get_group_id(gitlab_group) diff --git a/plugins/modules/source_control/gitlab/gitlab_group_variable.py b/plugins/modules/gitlab_group_variable.py similarity index 67% rename from plugins/modules/source_control/gitlab/gitlab_group_variable.py rename to plugins/modules/gitlab_group_variable.py index 9be3a3ab39..c505547d87 100644 --- a/plugins/modules/source_control/gitlab/gitlab_group_variable.py +++ b/plugins/modules/gitlab_group_variable.py @@ -1,30 +1,36 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Florent Madiot (scodeman@scode.io) +# Copyright (c) 2020, Florent Madiot (scodeman@scode.io) # Based on code: -# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# Copyright (c) 2019, Markus Bergholz (markuman@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: gitlab_group_variable short_description: Creates, updates, or deletes GitLab groups variables version_added: 1.2.0 description: - Creates a group variable if it does not exist. - - When a group variable does exist, its value will be updated when the values are different. - - Variables which are untouched in the playbook, but are not untouched in the GitLab group, - they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)). + - When a group variable does exist and is not hidden, its value is updated when the values are different. + When a group variable does exist and is hidden, its value is updated. In this case, the module is B(not idempotent). + - Variables which are untouched in the playbook, but are not untouched in the GitLab group, they stay untouched (O(purge=false)) + or are deleted (O(purge=true)). author: - Florent Madiot (@scodeman) requirements: - - python >= 2.7 - python-gitlab python module extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: @@ -40,29 +46,31 @@ options: type: str purge: description: - - When set to C(true), delete all variables which are not untouched in the task. + - When set to V(true), delete all variables which are not untouched in the task. default: false type: bool vars: description: - - When the list element is a simple key-value pair, set masked and protected to false. - - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can - have full control about whether a value should be masked, protected or both. + - When the list element is a simple key-value pair, C(masked), C(hidden), C(raw), and C(protected) are set to V(false). + - When the list element is a dict with the keys C(value), C(masked), C(hidden), C(raw), and C(protected), the user can have full + control about whether a value should be masked, hidden, raw, protected, or a combination. - Support for group variables requires GitLab >= 9.5. - Support for environment_scope requires GitLab Premium >= 13.11. - Support for protected values requires GitLab >= 9.3. - Support for masked values requires GitLab >= 11.10. - - A I(value) must be a string or a number. - - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file). - - When a value is masked, it must be in Base64 and have a length of at least 8 characters. - See GitLab documentation on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)). + - Support for hidden values requires GitLab >= 17.4, and was added in community.general 11.3.0. + - Support for raw values requires GitLab >= 15.7. + - A C(value) must be a string or a number. + - Field C(variable_type) must be a string with either V(env_var), which is the default, or V(file). + - When a value is masked, it must be in Base64 and have a length of at least 8 characters. See GitLab documentation + on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)). default: {} type: dict variables: version_added: 4.5.0 description: - A list of dictionaries that represents CI/CD variables. - - This modules works internal with this sructure, even if the older I(vars) parameter is used. + - This modules works internal with this structure, even if the older O(vars) parameter is used. default: [] type: list elements: dict @@ -75,35 +83,54 @@ options: value: description: - The variable value. - - Required when I(state=present). + - Required when O(state=present). type: str + description: + description: + - A description for the variable. + - Support for descriptions requires GitLab >= 16.2. + type: str + version_added: '11.4.0' masked: description: - - Wether variable value is masked or not. + - Whether variable value is masked or not. type: bool default: false + hidden: + description: + - Whether variable value is hidden or not. + - Implies C(masked). + - Support for hidden values requires GitLab >= 17.4. + type: bool + default: false + version_added: '11.3.0' protected: description: - - Wether variable value is protected or not. + - Whether variable value is protected or not. type: bool default: false + raw: + description: + - Whether variable value is raw or not. + - Support for raw values requires GitLab >= 15.7. + type: bool + default: false + version_added: '7.4.0' variable_type: description: - - Wether a variable is an environment variable (C(env_var)) or a file (C(file)). + - Whether a variable is an environment variable (V(env_var)) or a file (V(file)). type: str - choices: [ "env_var", "file" ] + choices: ["env_var", "file"] default: env_var environment_scope: description: - The scope for the variable. type: str default: '*' -notes: -- Supports I(check_mode). -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set or update some CI/CD variables community.general.gitlab_group_variable: api_url: https://gitlab.com @@ -120,6 +147,38 @@ EXAMPLES = r''' variable_type: env_var environment_scope: production +- name: Set or update some CI/CD variables with raw value + community.general.gitlab_group_variable: + api_url: https://gitlab.com + api_token: secret_access_token + group: scodeman/testgroup/ + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: + value: 3214cbad + masked: true + protected: true + raw: true + variable_type: env_var + environment_scope: '*' + +- name: Set or update some CI/CD variables with expandable value + community.general.gitlab_group_variable: + api_url: https://gitlab.com + api_token: secret_access_token + group: scodeman/testgroup/ + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: + value: '$MY_OTHER_VARIABLE' + masked: true + protected: true + raw: false + variable_type: env_var + environment_scope: '*' + - name: Delete one variable community.general.gitlab_group_variable: api_url: https://gitlab.com @@ -128,9 +187,9 @@ EXAMPLES = r''' state: absent vars: ACCESS_KEY_ID: abc123 -''' +""" -RETURN = r''' +RETURN = r""" group_variable: description: Four lists of the variablenames which were added, updated, removed or exist. returned: always @@ -140,77 +199,30 @@ group_variable: description: A list of variables which were created. returned: always type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] untouched: description: A list of variables which exist. returned: always type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] removed: description: A list of variables which were deleted. returned: always type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] updated: description: A list of variables whose values were changed. returned: always type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" -''' + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] +""" -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.six import string_types -from ansible.module_utils.six import integer_types - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, gitlab_authentication - - -def vars_to_variables(vars, module): - # transform old vars to new variables structure - variables = list() - for item, value in vars.items(): - if (isinstance(value, string_types) or - isinstance(value, (integer_types, float))): - variables.append( - { - "name": item, - "value": str(value), - "masked": False, - "protected": False, - "variable_type": "env_var", - } - ) - - elif isinstance(value, dict): - new_item = {"name": item, "value": value.get('value')} - - new_item = { - "name": item, - "value": value.get('value'), - "masked": value.get('masked'), - "protected": value.get('protected'), - "variable_type": value.get('variable_type'), - } - - if value.get('environment_scope'): - new_item['environment_scope'] = value.get('environment_scope') - - variables.append(new_item) - - else: - module.fail_json(msg="value must be of type string, integer, float or dict") - - return variables +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, filter_returned_variables, vars_to_variables, + list_all_kwargs +) class GitlabGroupVariables(object): @@ -224,14 +236,7 @@ class GitlabGroupVariables(object): return self.repo.groups.get(group_name) def list_all_group_variables(self): - page_nb = 1 - variables = [] - vars_page = self.group.variables.list(page=page_nb) - while len(vars_page) > 0: - variables += vars_page - page_nb += 1 - vars_page = self.group.variables.list(page=page_nb) - return variables + return list(self.group.variables.list(**list_all_kwargs)) def create_variable(self, var_obj): if self._module.check_mode: @@ -239,8 +244,11 @@ class GitlabGroupVariables(object): var = { "key": var_obj.get('key'), "value": var_obj.get('value'), + "description": var_obj.get('description'), "masked": var_obj.get('masked'), + "masked_and_hidden": var_obj.get('hidden'), "protected": var_obj.get('protected'), + "raw": var_obj.get('raw'), "variable_type": var_obj.get('variable_type'), } if var_obj.get('environment_scope') is not None: @@ -302,19 +310,19 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): before = [x.attributes for x in gitlab_keys] gitlab_keys = this_gitlab.list_all_group_variables() - existing_variables = [x.attributes for x in gitlab_keys] - - # preprocessing:filter out and enrich before compare - for item in existing_variables: - item.pop('group_id') + existing_variables = filter_returned_variables(gitlab_keys) for item in requested_variables: item['key'] = item.pop('name') item['value'] = str(item.get('value')) if item.get('protected') is None: item['protected'] = False + if item.get('raw') is None: + item['raw'] = False if item.get('masked') is None: item['masked'] = False + if item.get('hidden') is None: + item['hidden'] = False if item.get('environment_scope') is None: item['environment_scope'] = '*' if item.get('variable_type') is None: @@ -337,9 +345,7 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): if purge: # refetch and filter gitlab_keys = this_gitlab.list_all_group_variables() - existing_variables = [x.attributes for x in gitlab_keys] - for item in existing_variables: - item.pop('group_id') + existing_variables = filter_returned_variables(gitlab_keys) remove = [x for x in existing_variables if x not in requested_variables] for item in remove: @@ -347,14 +353,13 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): return_value['removed'].append(item) elif state == 'absent': - # value does not matter on removing variables. - # key and environment scope are sufficient - for item in existing_variables: - item.pop('value') - item.pop('variable_type') - for item in requested_variables: - item.pop('value') - item.pop('variable_type') + # value, type, and description do not matter on removing variables. + keys_ignored_on_deletion = ['value', 'variable_type', 'description'] + for key in keys_ignored_on_deletion: + for item in existing_variables: + item.pop(key) + for item in requested_variables: + item.pop(key) if not purge: remove_requested = [x for x in requested_variables if x in existing_variables] @@ -384,13 +389,18 @@ def main(): argument_spec.update(auth_argument_spec()) argument_spec.update( group=dict(type='str', required=True), - purge=dict(type='bool', required=False, default=False), - vars=dict(type='dict', required=False, default=dict(), no_log=True), - variables=dict(type='list', elements='dict', required=False, default=list(), options=dict( + purge=dict(type='bool', default=False), + vars=dict(type='dict', default=dict(), no_log=True), + # please mind whenever changing the variables dict to also change module_utils/gitlab.py's + # KNOWN dict in filter_returned_variables or bad evil will happen + variables=dict(type='list', elements='dict', default=list(), options=dict( name=dict(type='str', required=True), value=dict(type='str', no_log=True), + description=dict(type='str'), masked=dict(type='bool', default=False), + hidden=dict(type='bool', default=False), protected=dict(type='bool', default=False), + raw=dict(type='bool', default=False), environment_scope=dict(type='str', default='*'), variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]) )), @@ -416,8 +426,8 @@ def main(): supports_check_mode=True ) - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) purge = module.params['purge'] var_list = module.params['vars'] @@ -432,8 +442,6 @@ def main(): if any(x['value'] is None for x in variables): module.fail_json(msg='value parameter is required in state present') - gitlab_instance = gitlab_authentication(module) - this_gitlab = GitlabGroupVariables(module=module, gitlab_instance=gitlab_instance) changed, raw_return_value, before, after = native_python_main(this_gitlab, purge, variables, state, module) diff --git a/plugins/modules/source_control/gitlab/gitlab_hook.py b/plugins/modules/gitlab_hook.py similarity index 79% rename from plugins/modules/source_control/gitlab/gitlab_hook.py rename to plugins/modules/gitlab_hook.py index 8a850b1c9e..46997c5f62 100644 --- a/plugins/modules/source_control/gitlab/gitlab_hook.py +++ b/plugins/modules/gitlab_hook.py @@ -1,110 +1,120 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2018, Marcus Watkins +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Marcus Watkins # Based on code: -# Copyright: (c) 2013, Phillip Gentry -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, Phillip Gentry +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gitlab_hook -short_description: Manages GitLab project hooks. +short_description: Manages GitLab project hooks description: - - Adds, updates and removes project hook + - Adds, updates and removes project hook. author: - Marcus Watkins (@marwatk) - Guillaume Martinez (@Lunik) requirements: - - python >= 2.7 - python-gitlab python module extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: project: description: - - Id or Full path of the project in the form of group/name. + - ID or Full path of the project in the form of group/name. required: true type: str hook_url: description: - - The url that you want GitLab to post to, this is used as the primary key for updates and deletion. + - The URL that you want GitLab to post to, this is used as the primary key for updates and deletion. required: true type: str state: description: - - When C(present) the hook will be updated to match the input or created if it doesn't exist. - - When C(absent) hook will be deleted if it exists. + - When V(present) the hook is updated to match the input or created if it does not exist. + - When V(absent) hook is deleted if it exists. default: present type: str - choices: [ "present", "absent" ] + choices: ["present", "absent"] push_events: description: - Trigger hook on push events. type: bool - default: yes + default: true push_events_branch_filter: description: - - Branch name of wildcard to trigger hook on push events + - Branch name of wildcard to trigger hook on push events. type: str version_added: '0.2.0' + default: '' issues_events: description: - Trigger hook on issues events. type: bool - default: no + default: false merge_requests_events: description: - Trigger hook on merge requests events. type: bool - default: no + default: false tag_push_events: description: - Trigger hook on tag push events. type: bool - default: no + default: false note_events: description: - Trigger hook on note events or when someone adds a comment. type: bool - default: no + default: false job_events: description: - Trigger hook on job events. type: bool - default: no + default: false pipeline_events: description: - Trigger hook on pipeline events. type: bool - default: no + default: false wiki_page_events: description: - Trigger hook on wiki events. type: bool - default: no + default: false + releases_events: + description: + - Trigger hook on release events. + type: bool + version_added: '8.4.0' hook_validate_certs: description: - - Whether GitLab will do SSL verification when triggering the hook. + - Whether GitLab performs SSL verification when triggering the hook. type: bool - default: no - aliases: [ enable_ssl_verification ] + default: false + aliases: [enable_ssl_verification] token: description: - Secret token to validate hook messages at the receiver. - - If this is present it will always result in a change as it cannot be retrieved from GitLab. - - Will show up in the X-GitLab-Token HTTP request header. + - If this is present it always results in a change as it cannot be retrieved from GitLab. + - It shows up in the C(X-GitLab-Token) HTTP request header. required: false type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Adding a project hook" community.general.gitlab_hook: api_url: https://gitlab.example.com/ @@ -112,9 +122,8 @@ EXAMPLES = ''' project: "my_group/my_project" hook_url: "https://my-ci-server.example.com/gitlab-hook" state: present - push_events: yes - tag_push_events: yes - hook_validate_certs: no + push_events: true + tag_push_events: true token: "my-super-secret-token-that-my-ci-server-will-check" - name: "Delete the previous hook" @@ -132,48 +141,38 @@ EXAMPLES = ''' project: 10 hook_url: "https://my-ci-server.example.com/gitlab-hook" state: absent -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: Success or failure message + description: Success or failure message. returned: always type: str sample: "Success" result: - description: json parsed response from the server + description: JSON parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: path is already in use" hook: - description: API object + description: API object. returned: always type: dict -''' - -import re -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False +""" from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, find_project, gitlab_authentication +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_project, gitlab_authentication, list_all_kwargs +) class GitLabHook(object): @@ -204,6 +203,7 @@ class GitLabHook(object): 'job_events': options['job_events'], 'pipeline_events': options['pipeline_events'], 'wiki_page_events': options['wiki_page_events'], + 'releases_events': options['releases_events'], 'enable_ssl_verification': options['enable_ssl_verification'], 'token': options['token'], }) @@ -219,6 +219,7 @@ class GitLabHook(object): 'job_events': options['job_events'], 'pipeline_events': options['pipeline_events'], 'wiki_page_events': options['wiki_page_events'], + 'releases_events': options['releases_events'], 'enable_ssl_verification': options['enable_ssl_verification'], 'token': options['token'], }) @@ -232,9 +233,8 @@ class GitLabHook(object): hook.save() except Exception as e: self._module.fail_json(msg="Failed to update hook: %s " % e) - return True - else: - return False + + return changed ''' @param project Project Object @@ -256,9 +256,9 @@ class GitLabHook(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(hook, arg_key, None) != arguments[arg_key]: - setattr(hook, arg_key, arguments[arg_key]) + if arg_value is not None: + if getattr(hook, arg_key, None) != arg_value: + setattr(hook, arg_key, arg_value) changed = True return (changed, hook) @@ -268,9 +268,8 @@ class GitLabHook(object): @param hook_url Url to call on event ''' def find_hook(self, project, hook_url): - hooks = project.hooks.list(all=True) - for hook in hooks: - if (hook.url == hook_url): + for hook in project.hooks.list(**list_all_kwargs): + if hook.url == hook_url: return hook ''' @@ -286,10 +285,8 @@ class GitLabHook(object): return False def delete_hook(self): - if self._module.check_mode: - return True - - return self.hook_object.delete() + if not self._module.check_mode: + self.hook_object.delete() def main(): @@ -308,6 +305,7 @@ def main(): job_events=dict(type='bool', default=False), pipeline_events=dict(type='bool', default=False), wiki_page_events=dict(type='bool', default=False), + releases_events=dict(type='bool'), hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']), token=dict(type='str', no_log=True), )) @@ -330,6 +328,9 @@ def main(): supports_check_mode=True, ) + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + state = module.params['state'] project_identifier = module.params['project'] hook_url = module.params['hook_url'] @@ -342,14 +343,10 @@ def main(): job_events = module.params['job_events'] pipeline_events = module.params['pipeline_events'] wiki_page_events = module.params['wiki_page_events'] + releases_events = module.params['releases_events'] enable_ssl_verification = module.params['hook_validate_certs'] hook_token = module.params['token'] - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlab_authentication(module) - gitlab_hook = GitLabHook(module, gitlab_instance) project = find_project(gitlab_instance, project_identifier) @@ -377,6 +374,7 @@ def main(): "job_events": job_events, "pipeline_events": pipeline_events, "wiki_page_events": wiki_page_events, + "releases_events": releases_events, "enable_ssl_verification": enable_ssl_verification, "token": hook_token, }): diff --git a/plugins/modules/gitlab_instance_variable.py b/plugins/modules/gitlab_instance_variable.py new file mode 100644 index 0000000000..c7075f7454 --- /dev/null +++ b/plugins/modules/gitlab_instance_variable.py @@ -0,0 +1,376 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Benedikt Braunger (bebr@adm.ku.dk) +# Based on code: +# Copyright (c) 2020, Florent Madiot (scodeman@scode.io) +# Copyright (c) 2019, Markus Bergholz (markuman@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_instance_variable +short_description: Creates, updates, or deletes GitLab instance variables +version_added: 7.1.0 +description: + - Creates a instance variable if it does not exist. + - When a instance variable does exist, its value is updated if the values are different. + - Support for instance variables requires GitLab >= 13.0. + - Variables which are not mentioned in the modules options, but are present on the GitLab instance, either stay (O(purge=false)) + or are deleted (O(purge=true)). +author: + - Benedikt Braunger (@benibr) +requirements: + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete instance variable. + default: present + type: str + choices: ["present", "absent"] + purge: + description: + - When set to V(true), delete all variables which are not mentioned in the task. + default: false + type: bool + variables: + description: + - A list of dictionaries that represents CI/CD variables. + default: [] + type: list + elements: dict + suboptions: + name: + description: + - The name of the variable. + type: str + required: true + value: + description: + - The variable value. + - Required when O(state=present). + type: str + description: + description: + - A description for the variable. + - Support for descriptions requires GitLab >= 16.8. + type: str + version_added: '11.4.0' + masked: + description: + - Whether variable value is masked or not. + type: bool + default: false + protected: + description: + - Whether variable value is protected or not. + type: bool + default: false + raw: + description: + - Whether variable value is raw or not. + - Support for raw values requires GitLab >= 15.7. + type: bool + default: false + version_added: 10.2.0 + variable_type: + description: + - Whether a variable is an environment variable (V(env_var)) or a file (V(file)). + type: str + choices: ["env_var", "file"] + default: env_var +""" + + +EXAMPLES = r""" +- name: Set or update some CI/CD variables + community.general.gitlab_instance_variable: + api_url: https://gitlab.com + api_token: secret_access_token + purge: false + variables: + - name: ACCESS_KEY_ID + value: abc1312cba + - name: SECRET_ACCESS_KEY + value: 1337 + masked: true + protected: true + variable_type: env_var + +- name: Delete one variable + community.general.gitlab_instance_variable: + api_url: https://gitlab.com + api_token: secret_access_token + state: absent + variables: + - name: ACCESS_KEY_ID +""" + +RETURN = r""" +instance_variable: + description: Four lists of the variablenames which were added, updated, removed or exist. + returned: always + type: dict + contains: + added: + description: A list of variables which were created. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] + untouched: + description: A list of variables which exist. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] + removed: + description: A list of variables which were deleted. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] + updated: + description: A list pre-existing variables whose values have been set. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, filter_returned_variables, + list_all_kwargs +) + + +class GitlabInstanceVariables(object): + + def __init__(self, module, gitlab_instance): + self.instance = gitlab_instance + self._module = module + + def list_all_instance_variables(self): + return list(self.instance.variables.list(**list_all_kwargs)) + + def create_variable(self, var_obj): + if self._module.check_mode: + return True + var = { + "key": var_obj.get('key'), + "value": var_obj.get('value'), + "description": var_obj.get('description'), + "masked": var_obj.get('masked'), + "protected": var_obj.get('protected'), + "raw": var_obj.get('raw'), + "variable_type": var_obj.get('variable_type'), + } + + self.instance.variables.create(var) + return True + + def update_variable(self, var_obj): + if self._module.check_mode: + return True + self.delete_variable(var_obj) + self.create_variable(var_obj) + return True + + def delete_variable(self, var_obj): + if self._module.check_mode: + return True + self.instance.variables.delete(var_obj.get('key')) + return True + + +def compare(requested_variables, existing_variables, state): + # we need to do this, because it was determined in a previous version - more or less buggy + # basically it is not necessary and might results in more/other bugs! + # but it is required and only relevant for check mode!! + # logic represents state 'present' when not purge. all other can be derived from that + # untouched => equal in both + # updated => name and scope are equal + # added => name and scope does not exist + untouched = list() + updated = list() + added = list() + + if state == 'present': + existing_key_scope_vars = list() + for item in existing_variables: + existing_key_scope_vars.append({'key': item.get('key')}) + + for var in requested_variables: + if var in existing_variables: + untouched.append(var) + else: + compare_item = {'key': var.get('name')} + if compare_item in existing_key_scope_vars: + updated.append(var) + else: + added.append(var) + + return untouched, updated, added + + +def native_python_main(this_gitlab, purge, requested_variables, state, module): + + change = False + return_value = dict(added=list(), updated=list(), removed=list(), untouched=list()) + + gitlab_keys = this_gitlab.list_all_instance_variables() + before = [x.attributes for x in gitlab_keys] + + existing_variables = filter_returned_variables(gitlab_keys) + + for item in requested_variables: + item['key'] = item.pop('name') + item['value'] = str(item.get('value')) + if item.get('protected') is None: + item['protected'] = False + if item.get('masked') is None: + item['masked'] = False + if item.get('raw') is None: + item['raw'] = False + if item.get('variable_type') is None: + item['variable_type'] = 'env_var' + + if module.check_mode: + untouched, updated, added = compare(requested_variables, existing_variables, state) + + if state == 'present': + add_or_update = [x for x in requested_variables if x not in existing_variables] + for item in add_or_update: + try: + if this_gitlab.create_variable(item): + return_value['added'].append(item) + + except Exception: + if this_gitlab.update_variable(item): + return_value['updated'].append(item) + + if purge: + # refetch and filter + gitlab_keys = this_gitlab.list_all_instance_variables() + existing_variables = filter_returned_variables(gitlab_keys) + + remove = [x for x in existing_variables if x not in requested_variables] + for item in remove: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + elif state == 'absent': + # value, type, and description do not matter on removing variables. + keys_ignored_on_deletion = ['value', 'variable_type', 'description'] + for key in keys_ignored_on_deletion: + for item in existing_variables: + item.pop(key) + for item in requested_variables: + item.pop(key) + + if not purge: + remove_requested = [x for x in requested_variables if x in existing_variables] + for item in remove_requested: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + else: + for item in existing_variables: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + if module.check_mode: + return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched) + + if len(return_value['added'] + return_value['removed'] + return_value['updated']) > 0: + change = True + + gitlab_keys = this_gitlab.list_all_instance_variables() + after = [x.attributes for x in gitlab_keys] + + return change, return_value, before, after + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + purge=dict(type='bool', default=False), + variables=dict(type='list', elements='dict', default=list(), options=dict( + name=dict(type='str', required=True), + value=dict(type='str', no_log=True), + description=dict(type='str'), + masked=dict(type='bool', default=False), + protected=dict(type='bool', default=False), + raw=dict(type='bool', default=False), + variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]) + )), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + purge = module.params['purge'] + state = module.params['state'] + + variables = module.params['variables'] + + if state == 'present': + if any(x['value'] is None for x in variables): + module.fail_json(msg='value parameter is required in state present') + + this_gitlab = GitlabInstanceVariables(module=module, gitlab_instance=gitlab_instance) + + changed, raw_return_value, before, after = native_python_main(this_gitlab, purge, variables, state, module) + + # postprocessing + for item in after: + item['name'] = item.pop('key') + for item in before: + item['name'] = item.pop('key') + + untouched_key_name = 'key' + if not module.check_mode: + untouched_key_name = 'name' + raw_return_value['untouched'] = [x for x in before if x in after] + + added = [x.get('key') for x in raw_return_value['added']] + updated = [x.get('key') for x in raw_return_value['updated']] + removed = [x.get('key') for x in raw_return_value['removed']] + untouched = [x.get(untouched_key_name) for x in raw_return_value['untouched']] + return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) + + module.exit_json(changed=changed, instance_variable=return_value) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_issue.py b/plugins/modules/gitlab_issue.py new file mode 100644 index 0000000000..aab9f2a346 --- /dev/null +++ b/plugins/modules/gitlab_issue.py @@ -0,0 +1,400 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Ondrej Zvara (ozvara1@gmail.com) +# Based on code: +# Copyright (c) 2021, Lennert Mertens (lennert@nubera.be) +# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_issue +short_description: Create, update, or delete GitLab issues +version_added: '8.1.0' +description: + - Creates an issue if it does not exist. + - When an issue does exist, it is updated if the provided parameters are different. + - When an issue does exist and O(state=absent), the issue is deleted. + - When multiple issues are detected, the task fails. + - Existing issues are matched based on O(title) and O(state_filter) filters. +author: + - zvaraondrej (@zvaraondrej) +requirements: + - python-gitlab >= 2.3.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + assignee_ids: + description: + - A list of assignee usernames omitting V(@) character. + - Set to an empty array to unassign all assignees. + type: list + elements: str + description: + description: + - A description of the issue. + - Gets overridden by a content of file specified at O(description_path), if found. + type: str + description_path: + description: + - A path of file containing issue's description. + - Accepts MarkDown formatted files. + type: path + issue_type: + description: + - Type of the issue. + default: issue + type: str + choices: ["issue", "incident", "test_case"] + labels: + description: + - A list of label names. + - Set to an empty array to remove all labels. + type: list + elements: str + milestone_search: + description: + - The name of the milestone. + - Set to empty string to unassign milestone. + type: str + milestone_group_id: + description: + - The path or numeric ID of the group hosting desired milestone. + type: str + project: + description: + - The path or name of the project. + required: true + type: str + state: + description: + - Create or delete issue. + default: present + type: str + choices: ["present", "absent"] + state_filter: + description: + - Filter specifying state of issues while searching. + type: str + choices: ["opened", "closed"] + default: opened + title: + description: + - A title for the issue. The title is used as a unique identifier to ensure idempotency. + type: str + required: true +""" + + +EXAMPLES = r""" +- name: Create Issue + community.general.gitlab_issue: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + title: "Ansible demo Issue" + description: "Demo Issue description" + labels: + - Ansible + - Demo + assignee_ids: + - testassignee + state_filter: "opened" + state: present + +- name: Delete Issue + community.general.gitlab_issue: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + title: "Ansible demo Issue" + state_filter: "opened" + state: absent +""" + +RETURN = r""" +msg: + description: Success or failure message. + returned: always + type: str + sample: "Success" + +issue: + description: API object. + returned: success + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.common.text.converters import to_native, to_text + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab, find_project, find_group +) + + +class GitlabIssue(object): + + def __init__(self, module, project, gitlab_instance): + self._gitlab = gitlab_instance + self._module = module + self.project = project + + ''' + @param milestone_id Title of the milestone + ''' + def get_milestone(self, milestone_id, group): + milestones = [] + try: + milestones = group.milestones.list(search=milestone_id) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to list the Milestones: %s" % to_native(e)) + + if len(milestones) > 1: + self._module.fail_json(msg="Multiple Milestones matched search criteria.") + if len(milestones) < 1: + self._module.fail_json(msg="No Milestones matched search criteria.") + if len(milestones) == 1: + try: + return group.milestones.get(id=milestones[0].id) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to get the Milestones: %s" % to_native(e)) + + ''' + @param title Title of the Issue + @param state_filter Issue's state to filter on + ''' + def get_issue(self, title, state_filter): + issues = [] + try: + issues = self.project.issues.list(query_parameters={"search": title, "in": "title", "state": state_filter}) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to list the Issues: %s" % to_native(e)) + + if len(issues) > 1: + self._module.fail_json(msg="Multiple Issues matched search criteria.") + if len(issues) == 1: + try: + return self.project.issues.get(id=issues[0].iid) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to get the Issue: %s" % to_native(e)) + + ''' + @param username Name of the user + ''' + def get_user(self, username): + users = [] + try: + users = [user for user in self.project.users.list(username=username, all=True) if user.username == username] + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to list the users: %s" % to_native(e)) + + if len(users) > 1: + self._module.fail_json(msg="Multiple Users matched search criteria.") + elif len(users) < 1: + self._module.fail_json(msg="No User matched search criteria.") + else: + return users[0] + + ''' + @param users List of usernames + ''' + def get_user_ids(self, users): + return [self.get_user(user).id for user in users] + + ''' + @param options Options of the Issue + ''' + def create_issue(self, options): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created Issue '%s'." % options["title"]) + + try: + return self.project.issues.create(options) + except gitlab.exceptions.GitlabCreateError as e: + self._module.fail_json(msg="Failed to create Issue: %s " % to_native(e)) + + ''' + @param issue Issue object to delete + ''' + def delete_issue(self, issue): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully deleted Issue '%s'." % issue["title"]) + + try: + return issue.delete() + except gitlab.exceptions.GitlabDeleteError as e: + self._module.fail_json(msg="Failed to delete Issue: '%s'." % to_native(e)) + + ''' + @param issue Issue object to update + @param options Options of the Issue + ''' + def update_issue(self, issue, options): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully updated Issue '%s'." % issue["title"]) + + try: + return self.project.issues.update(issue.iid, options) + except gitlab.exceptions.GitlabUpdateError as e: + self._module.fail_json(msg="Failed to update Issue %s." % to_native(e)) + + ''' + @param issue Issue object to evaluate + @param options New options to update Issue with + ''' + def issue_has_changed(self, issue, options): + for key, value in options.items(): + if value is not None: + + if key == 'milestone_id': + old_milestone = getattr(issue, 'milestone')['id'] if getattr(issue, 'milestone') else "" + if value != old_milestone: + return True + elif key == 'assignee_ids': + if value != sorted([user["id"] for user in getattr(issue, 'assignees')]): + return True + + elif key == 'labels': + if value != sorted(getattr(issue, key)): + return True + + elif getattr(issue, key) != value: + return True + + return False + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + assignee_ids=dict(type='list', elements='str'), + description=dict(type='str'), + description_path=dict(type='path'), + issue_type=dict(type='str', default='issue', choices=["issue", "incident", "test_case"]), + labels=dict(type='list', elements='str'), + milestone_search=dict(type='str'), + milestone_group_id=dict(type='str'), + project=dict(type='str', required=True), + state=dict(type='str', default="present", choices=["absent", "present"]), + state_filter=dict(type='str', default="opened", choices=["opened", "closed"]), + title=dict(type='str', required=True), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['description', 'description_path'], + ], + required_together=[ + ['api_username', 'api_password'], + ['milestone_search', 'milestone_group_id'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + + assignee_ids = module.params['assignee_ids'] + description = module.params['description'] + description_path = module.params['description_path'] + issue_type = module.params['issue_type'] + labels = module.params['labels'] + milestone_id = module.params['milestone_search'] + milestone_group_id = module.params['milestone_group_id'] + project = module.params['project'] + state = module.params['state'] + state_filter = module.params['state_filter'] + title = module.params['title'] + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module, min_version='2.3.0') + + this_project = find_project(gitlab_instance, project) + if this_project is None: + module.fail_json(msg="Failed to get the project: %s" % project) + + this_gitlab = GitlabIssue(module=module, project=this_project, gitlab_instance=gitlab_instance) + + if milestone_id and milestone_group_id: + this_group = find_group(gitlab_instance, milestone_group_id) + if this_group is None: + module.fail_json(msg="Failed to get the group: %s" % milestone_group_id) + + milestone_id = this_gitlab.get_milestone(milestone_id, this_group).id + + this_issue = this_gitlab.get_issue(title, state_filter) + + if state == "present": + if description_path: + try: + with open(description_path, 'rb') as f: + description = to_text(f.read(), errors='surrogate_or_strict') + except IOError as e: + module.fail_json(msg='Cannot open {0}: {1}'.format(description_path, e)) + + # sorting necessary in order to properly detect changes, as we don't want to get false positive + # results due to differences in ids ordering; + assignee_ids = sorted(this_gitlab.get_user_ids(assignee_ids)) if assignee_ids else assignee_ids + labels = sorted(labels) if labels else labels + + options = { + "title": title, + "description": description, + "labels": labels, + "issue_type": issue_type, + "milestone_id": milestone_id, + "assignee_ids": assignee_ids, + } + + if not this_issue: + issue = this_gitlab.create_issue(options) + module.exit_json( + changed=True, msg="Created Issue '{t}'.".format(t=title), + issue=issue.asdict() + ) + else: + if this_gitlab.issue_has_changed(this_issue, options): + issue = this_gitlab.update_issue(this_issue, options) + module.exit_json( + changed=True, msg="Updated Issue '{t}'.".format(t=title), + issue=issue + ) + else: + module.exit_json( + changed=False, msg="Issue '{t}' already exists".format(t=title), + issue=this_issue.asdict() + ) + elif state == "absent": + if not this_issue: + module.exit_json(changed=False, msg="Issue '{t}' does not exist or has already been deleted.".format(t=title)) + else: + issue = this_gitlab.delete_issue(this_issue) + module.exit_json( + changed=True, msg="Issue '{t}' deleted.".format(t=title), + issue=issue + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_label.py b/plugins/modules/gitlab_label.py new file mode 100644 index 0000000000..5b6d80e20c --- /dev/null +++ b/plugins/modules/gitlab_label.py @@ -0,0 +1,492 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Gabriele Pongelli (gabriele.pongelli@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_label +short_description: Creates/updates/deletes GitLab Labels belonging to project or group +version_added: 8.3.0 +description: + - When a label does not exist, it is created. + - When a label does exist, its value is updated when the values are different. + - Labels can be purged. +author: + - "Gabriele Pongelli (@gpongelli)" +requirements: + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete project or group label. + default: present + type: str + choices: ["present", "absent"] + purge: + description: + - When set to V(true), delete all labels which are not mentioned in the task. + default: false + type: bool + required: false + project: + description: + - The path and name of the project. Either this or O(group) is required. + required: false + type: str + group: + description: + - The path of the group. Either this or O(project) is required. + required: false + type: str + labels: + description: + - A list of dictionaries that represents gitlab project's or group's labels. + type: list + elements: dict + required: false + default: [] + suboptions: + name: + description: + - The name of the label. + type: str + required: true + color: + description: + - The color of the label. + - Required when O(state=present). + type: str + priority: + description: + - Integer value to give priority to the label. + type: int + required: false + default: + description: + description: + - Label's description. + type: str + default: + new_name: + description: + - Optional field to change label's name. + type: str + default: +""" + + +EXAMPLES = r""" +# same project's task can be executed for group +- name: Create one Label + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + color: "#123456" + state: present + +- name: Create many group labels + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + group: "group1" + labels: + - name: label_one + color: "#123456" + description: this is a label + priority: 20 + - name: label_two + color: "#554422" + state: present + +- name: Create many project labels + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + color: "#123456" + description: this is a label + priority: 20 + - name: label_two + color: "#554422" + state: present + +- name: Set or update some labels + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + color: "#224488" + state: present + +- name: Add label in check mode + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + color: "#224488" + check_mode: true + +- name: Delete Label + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + state: absent + +- name: Change Label name + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + new_name: label_two + state: absent + +- name: Purge all labels + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + purge: true + +- name: Delete many labels + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + state: absent + labels: + - name: label-abc123 + - name: label-two +""" + +RETURN = r""" +labels: + description: Four lists of the labels which were added, updated, removed or exist. + returned: success + type: dict + contains: + added: + description: A list of labels which were created. + returned: always + type: list + sample: ["abcd", "label-one"] + untouched: + description: A list of labels which exist. + returned: always + type: list + sample: ["defg", "new-label"] + removed: + description: A list of labels which were deleted. + returned: always + type: list + sample: ["defg", "new-label"] + updated: + description: A list pre-existing labels whose values have been set. + returned: always + type: list + sample: ["defg", "new-label"] +labels_obj: + description: API object. + returned: success + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, ensure_gitlab_package, find_group, find_project +) + + +class GitlabLabels(object): + + def __init__(self, module, gitlab_instance, group_id, project_id): + self._gitlab = gitlab_instance + self.gitlab_object = group_id if group_id else project_id + self.is_group_label = True if group_id else False + self._module = module + + def list_all_labels(self): + page_nb = 1 + labels = [] + vars_page = self.gitlab_object.labels.list(page=page_nb) + while len(vars_page) > 0: + labels += vars_page + page_nb += 1 + vars_page = self.gitlab_object.labels.list(page=page_nb) + return labels + + def create_label(self, var_obj): + if self._module.check_mode: + return True, True + + var = { + "name": var_obj.get('name'), + "color": var_obj.get('color'), + } + + if var_obj.get('description') is not None: + var["description"] = var_obj.get('description') + + if var_obj.get('priority') is not None: + var["priority"] = var_obj.get('priority') + + _obj = self.gitlab_object.labels.create(var) + return True, _obj.asdict() + + def update_label(self, var_obj): + if self._module.check_mode: + return True, True + _label = self.gitlab_object.labels.get(var_obj.get('name')) + + if var_obj.get('new_name') is not None: + _label.new_name = var_obj.get('new_name') + + if var_obj.get('description') is not None: + _label.description = var_obj.get('description') + if var_obj.get('priority') is not None: + _label.priority = var_obj.get('priority') + if var_obj.get('color') is not None: + _label.color = var_obj.get('color') + + # save returns None + _label.save() + return True, _label.asdict() + + def delete_label(self, var_obj): + if self._module.check_mode: + return True, True + _label = self.gitlab_object.labels.get(var_obj.get('name')) + # delete returns None + _label.delete() + return True, _label.asdict() + + +def compare(requested_labels, existing_labels, state): + # we need to do this, because it was determined in a previous version - more or less buggy + # basically it is not necessary and might result in more/other bugs! + # but it is required and only relevant for check mode!! + # logic represents state 'present' when not purge. all other can be derived from that + # untouched => equal in both + # updated => name and scope are equal + # added => name and scope does not exist + untouched = list() + updated = list() + added = list() + + if state == 'present': + _existing_labels = list() + for item in existing_labels: + _existing_labels.append({'name': item.get('name')}) + + for var in requested_labels: + if var in existing_labels: + untouched.append(var) + else: + compare_item = {'name': var.get('name')} + if compare_item in _existing_labels: + updated.append(var) + else: + added.append(var) + + return untouched, updated, added + + +def native_python_main(this_gitlab, purge, requested_labels, state, module): + change = False + return_value = dict(added=[], updated=[], removed=[], untouched=[]) + return_obj = dict(added=[], updated=[], removed=[]) + + labels_before = [x.asdict() for x in this_gitlab.list_all_labels()] + + # filter out and enrich before compare + for item in requested_labels: + # add defaults when not present + if item.get('description') is None: + item['description'] = "" + if item.get('new_name') is None: + item['new_name'] = None + if item.get('priority') is None: + item['priority'] = None + + # group label does not have priority, removing for comparison + if this_gitlab.is_group_label: + item.pop('priority') + + for item in labels_before: + # remove field only from server + item.pop('id') + item.pop('description_html') + item.pop('text_color') + item.pop('subscribed') + # field present only when it is a project's label + if 'is_project_label' in item: + item.pop('is_project_label') + item['new_name'] = None + + if state == 'present': + add_or_update = [x for x in requested_labels if x not in labels_before] + for item in add_or_update: + try: + _rv, _obj = this_gitlab.create_label(item) + if _rv: + return_value['added'].append(item) + return_obj['added'].append(_obj) + except Exception: + # create raises exception with following error message when label already exists + _rv, _obj = this_gitlab.update_label(item) + if _rv: + return_value['updated'].append(item) + return_obj['updated'].append(_obj) + + if purge: + # re-fetch + _labels = this_gitlab.list_all_labels() + + for item in labels_before: + _rv, _obj = this_gitlab.delete_label(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + + elif state == 'absent': + if not purge: + _label_names_requested = [x['name'] for x in requested_labels] + remove_requested = [x for x in labels_before if x['name'] in _label_names_requested] + for item in remove_requested: + _rv, _obj = this_gitlab.delete_label(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + else: + for item in labels_before: + _rv, _obj = this_gitlab.delete_label(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + + if module.check_mode: + _untouched, _updated, _added = compare(requested_labels, labels_before, state) + return_value = dict(added=_added, updated=_updated, removed=return_value['removed'], untouched=_untouched) + + if any(return_value[x] for x in ['added', 'removed', 'updated']): + change = True + + labels_after = [x.asdict() for x in this_gitlab.list_all_labels()] + + return change, return_value, labels_before, labels_after, return_obj + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + project=dict(type='str'), + group=dict(type='str'), + purge=dict(type='bool', default=False), + labels=dict(type='list', elements='dict', default=list(), + options=dict( + name=dict(type='str', required=True), + color=dict(type='str'), + description=dict(type='str'), + priority=dict(type='int'), + new_name=dict(type='str')) + ), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['project', 'group'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ['project', 'group'] + ], + supports_check_mode=True + ) + ensure_gitlab_package(module) + + gitlab_project = module.params['project'] + gitlab_group = module.params['group'] + purge = module.params['purge'] + label_list = module.params['labels'] + state = module.params['state'] + + gitlab_instance = gitlab_authentication(module, min_version='3.2.0') + + # find_project can return None, but the other must exist + gitlab_project_id = find_project(gitlab_instance, gitlab_project) + + # find_group can return None, but the other must exist + gitlab_group_id = find_group(gitlab_instance, gitlab_group) + + # if both not found, module must exist + if not gitlab_project_id and not gitlab_group_id: + if gitlab_project and not gitlab_project_id: + module.fail_json(msg="project '%s' not found." % gitlab_project) + if gitlab_group and not gitlab_group_id: + module.fail_json(msg="group '%s' not found." % gitlab_group) + + this_gitlab = GitlabLabels(module=module, gitlab_instance=gitlab_instance, group_id=gitlab_group_id, + project_id=gitlab_project_id) + + if state == 'present': + _existing_labels = [x.asdict()['name'] for x in this_gitlab.list_all_labels()] + + # color is mandatory when creating label, but it is optional when changing name or updating other fields + if any(x['color'] is None and x['new_name'] is None and x['name'] not in _existing_labels for x in label_list): + module.fail_json(msg='color parameter is required for new labels') + + change, raw_return_value, before, after, _obj = native_python_main(this_gitlab, purge, label_list, state, module) + + if not module.check_mode: + raw_return_value['untouched'] = [x for x in before if x in after] + + added = [x.get('name') for x in raw_return_value['added']] + updated = [x.get('name') for x in raw_return_value['updated']] + removed = [x.get('name') for x in raw_return_value['removed']] + untouched = [x.get('name') for x in raw_return_value['untouched']] + return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) + + module.exit_json(changed=change, labels=return_value, labels_obj=_obj) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_merge_request.py b/plugins/modules/gitlab_merge_request.py new file mode 100644 index 0000000000..83000a8ac1 --- /dev/null +++ b/plugins/modules/gitlab_merge_request.py @@ -0,0 +1,413 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Ondrej Zvara (ozvara1@gmail.com) +# Based on code: +# Copyright (c) 2021, Lennert Mertens (lennert@nubera.be) +# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_merge_request +short_description: Create, update, or delete GitLab merge requests +version_added: 7.1.0 +description: + - Creates a merge request if it does not exist. + - When a single merge request does exist, it is updated if the provided parameters are different. + - When a single merge request does exist and O(state=absent), the merge request is deleted. + - When multiple merge requests are detected, the task fails. + - Existing merge requests are matched based on O(title), O(source_branch), O(target_branch), and O(state_filter) filters. +author: + - zvaraondrej (@zvaraondrej) +requirements: + - python-gitlab >= 2.3.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete merge request. + default: present + type: str + choices: ["present", "absent"] + project: + description: + - The path or name of the project. + required: true + type: str + source_branch: + description: + - Merge request's source branch. + - Ignored while updating existing merge request. + required: true + type: str + target_branch: + description: + - Merge request's target branch. + required: true + type: str + title: + description: + - A title for the merge request. + type: str + required: true + description: + description: + - A description for the merge request. + - Gets overridden by a content of file specified at O(description_path), if found. + type: str + description_path: + description: + - A path of file containing merge request's description. + - Accepts MarkDown formatted files. + type: path + labels: + description: + - Comma separated list of label names. + type: str + default: "" + remove_source_branch: + description: + - Flag indicating if a merge request should remove the source branch when merging. + type: bool + default: false + state_filter: + description: + - Filter specifying state of merge requests while searching. + type: str + choices: ["opened", "closed", "locked", "merged"] + default: opened + assignee_ids: + description: + - Comma separated list of assignees usernames omitting V(@) character. + - Set to empty string to unassign all assignees. + type: str + reviewer_ids: + description: + - Comma separated list of reviewers usernames omitting V(@) character. + - Set to empty string to unassign all reviewers. + type: str +""" + + +EXAMPLES = r""" +- name: Create Merge Request from branch1 to branch2 + community.general.gitlab_merge_request: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + source_branch: branch1 + target_branch: branch2 + title: "Ansible demo MR" + description: "Demo MR description" + labels: "Ansible,Demo" + state_filter: "opened" + remove_source_branch: true + state: present + +- name: Delete Merge Request from branch1 to branch2 + community.general.gitlab_merge_request: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + source_branch: branch1 + target_branch: branch2 + title: "Ansible demo MR" + state_filter: "opened" + state: absent +""" + +RETURN = r""" +msg: + description: Success or failure message. + returned: always + type: str + sample: "Success" + +mr: + description: API object. + returned: success + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.common.text.converters import to_native, to_text + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab, find_project +) + + +class GitlabMergeRequest(object): + + def __init__(self, module, project, gitlab_instance): + self._gitlab = gitlab_instance + self._module = module + self.project = project + + ''' + @param branch Name of the branch + ''' + def get_branch(self, branch): + try: + return self.project.branches.get(branch) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to get the branch: %s" % to_native(e)) + + ''' + @param title Title of the Merge Request + @param source_branch Merge Request's source branch + @param target_branch Merge Request's target branch + @param state_filter Merge Request's state to filter on + ''' + def get_mr(self, title, source_branch, target_branch, state_filter): + mrs = [] + try: + mrs = self.project.mergerequests.list(search=title, source_branch=source_branch, target_branch=target_branch, state=state_filter) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to list the Merge Request: %s" % to_native(e)) + + if len(mrs) > 1: + self._module.fail_json(msg="Multiple Merge Requests matched search criteria.") + if len(mrs) == 1: + try: + return self.project.mergerequests.get(id=mrs[0].iid) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to get the Merge Request: %s" % to_native(e)) + + ''' + @param username Name of the user + ''' + def get_user(self, username): + users = [] + try: + users = [user for user in self.project.users.list(username=username, all=True) if user.username == username] + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to list the users: %s" % to_native(e)) + + if len(users) > 1: + self._module.fail_json(msg="Multiple Users matched search criteria.") + elif len(users) < 1: + self._module.fail_json(msg="No User matched search criteria.") + else: + return users[0] + + ''' + @param users List of usernames + ''' + def get_user_ids(self, users): + return [self.get_user(user).id for user in users] + + ''' + @param options Options of the Merge Request + ''' + def create_mr(self, options): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created the Merge Request %s" % options["title"]) + + try: + return self.project.mergerequests.create(options) + except gitlab.exceptions.GitlabCreateError as e: + self._module.fail_json(msg="Failed to create Merge Request: %s " % to_native(e)) + + ''' + @param mr Merge Request object to delete + ''' + def delete_mr(self, mr): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully deleted the Merge Request %s" % mr["title"]) + + try: + return mr.delete() + except gitlab.exceptions.GitlabDeleteError as e: + self._module.fail_json(msg="Failed to delete Merge Request: %s " % to_native(e)) + + ''' + @param mr Merge Request object to update + ''' + def update_mr(self, mr, options): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully updated the Merge Request %s" % mr["title"]) + + try: + return self.project.mergerequests.update(mr.iid, options) + except gitlab.exceptions.GitlabUpdateError as e: + self._module.fail_json(msg="Failed to update Merge Request: %s " % to_native(e)) + + ''' + @param mr Merge Request object to evaluate + @param options New options to update MR with + ''' + def mr_has_changed(self, mr, options): + for key, value in options.items(): + if value is not None: + # see https://gitlab.com/gitlab-org/gitlab-foss/-/issues/27355 + if key == 'remove_source_branch': + key = 'force_remove_source_branch' + + if key == 'assignee_ids': + if value != sorted([user["id"] for user in getattr(mr, 'assignees')]): + return True + + elif key == 'reviewer_ids': + if value != sorted([user["id"] for user in getattr(mr, 'reviewers')]): + return True + + elif key == 'labels': + if value != sorted(getattr(mr, key)): + return True + + elif getattr(mr, key) != value: + return True + + return False + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + project=dict(type='str', required=True), + source_branch=dict(type='str', required=True), + target_branch=dict(type='str', required=True), + title=dict(type='str', required=True), + description=dict(type='str'), + labels=dict(type='str', default=""), + description_path=dict(type='path'), + remove_source_branch=dict(type='bool', default=False), + state_filter=dict(type='str', default="opened", choices=["opened", "closed", "locked", "merged"]), + assignee_ids=dict(type='str'), + reviewer_ids=dict(type='str'), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['description', 'description_path'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + required_if=[ + ['state', 'present', ['source_branch', 'target_branch', 'title'], True], + ['state', 'absent', ['source_branch', 'target_branch', 'title'], True], + ], + supports_check_mode=True + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + project = module.params['project'] + source_branch = module.params['source_branch'] + target_branch = module.params['target_branch'] + title = module.params['title'] + description = module.params['description'] + labels = module.params['labels'] + description_path = module.params['description_path'] + remove_source_branch = module.params['remove_source_branch'] + state_filter = module.params['state_filter'] + assignee_ids = module.params['assignee_ids'] + reviewer_ids = module.params['reviewer_ids'] + state = module.params['state'] + + gitlab_version = gitlab.__version__ + if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): + module.fail_json(msg="community.general.gitlab_merge_request requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." + " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) + + this_project = find_project(gitlab_instance, project) + if this_project is None: + module.fail_json(msg="Failed to get the project: %s" % project) + + this_gitlab = GitlabMergeRequest(module=module, project=this_project, gitlab_instance=gitlab_instance) + + r_source_branch = this_gitlab.get_branch(source_branch) + if not r_source_branch: + module.fail_json(msg="Source branch {b} not exist.".format(b=r_source_branch)) + + r_target_branch = this_gitlab.get_branch(target_branch) + if not r_target_branch: + module.fail_json(msg="Destination branch {b} not exist.".format(b=r_target_branch)) + + this_mr = this_gitlab.get_mr(title, source_branch, target_branch, state_filter) + + if state == "present": + if description_path: + try: + with open(description_path, 'rb') as f: + description = to_text(f.read(), errors='surrogate_or_strict') + except IOError as e: + module.fail_json(msg='Cannot open {0}: {1}'.format(description_path, e)) + + # sorting necessary in order to properly detect changes, as we don't want to get false positive + # results due to differences in ids ordering; see `mr_has_changed()` + assignee_ids = sorted(this_gitlab.get_user_ids(assignee_ids.split(","))) if assignee_ids else [] + reviewer_ids = sorted(this_gitlab.get_user_ids(reviewer_ids.split(","))) if reviewer_ids else [] + labels = sorted(labels.split(",")) if labels else [] + + options = { + "target_branch": target_branch, + "title": title, + "description": description, + "labels": labels, + "remove_source_branch": remove_source_branch, + "reviewer_ids": reviewer_ids, + "assignee_ids": assignee_ids, + } + + if not this_mr: + options["source_branch"] = source_branch + + mr = this_gitlab.create_mr(options) + module.exit_json( + changed=True, msg="Created the Merge Request {t} from branch {s} to branch {d}.".format(t=title, d=target_branch, s=source_branch), + mr=mr.asdict() + ) + else: + if this_gitlab.mr_has_changed(this_mr, options): + mr = this_gitlab.update_mr(this_mr, options) + module.exit_json( + changed=True, msg="Merge Request {t} from branch {s} to branch {d} updated.".format(t=title, d=target_branch, s=source_branch), + mr=mr + ) + else: + module.exit_json( + changed=False, msg="Merge Request {t} from branch {s} to branch {d} already exist".format(t=title, d=target_branch, s=source_branch), + mr=this_mr.asdict() + ) + elif this_mr and state == "absent": + mr = this_gitlab.delete_mr(this_mr) + module.exit_json( + changed=True, msg="Merge Request {t} from branch {s} to branch {d} deleted.".format(t=title, d=target_branch, s=source_branch), + mr=mr + ) + else: + module.exit_json(changed=False, msg="No changes are needed.", mr=this_mr.asdict()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_milestone.py b/plugins/modules/gitlab_milestone.py new file mode 100644 index 0000000000..bb4992117c --- /dev/null +++ b/plugins/modules/gitlab_milestone.py @@ -0,0 +1,486 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Gabriele Pongelli (gabriele.pongelli@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_milestone +short_description: Creates/updates/deletes GitLab Milestones belonging to project or group +version_added: 8.3.0 +description: + - When a milestone does not exist, it is created. + - When a milestone does exist, its value is updated when the values are different. + - Milestones can be purged. +author: + - "Gabriele Pongelli (@gpongelli)" +requirements: + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete milestone. + default: present + type: str + choices: ["present", "absent"] + purge: + description: + - When set to V(true), delete all milestone which are not mentioned in the task. + default: false + type: bool + required: false + project: + description: + - The path and name of the project. Either this or O(group) is required. + required: false + type: str + group: + description: + - The path of the group. Either this or O(project) is required. + required: false + type: str + milestones: + description: + - A list of dictionaries that represents gitlab project's or group's milestones. + type: list + elements: dict + required: false + default: [] + suboptions: + title: + description: + - The name of the milestone. + type: str + required: true + due_date: + description: + - Milestone due date in YYYY-MM-DD format. + type: str + required: false + default: null + start_date: + description: + - Milestone start date in YYYY-MM-DD format. + type: str + required: false + default: null + description: + description: + - Milestone's description. + type: str + default: null +""" + + +EXAMPLES = r""" +# same project's task can be executed for group +- name: Create one milestone + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + milestones: + - title: milestone_one + start_date: "2024-01-04" + state: present + +- name: Create many group milestones + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + group: "group1" + milestones: + - title: milestone_one + start_date: "2024-01-04" + description: this is a milestone + due_date: "2024-02-04" + - title: milestone_two + state: present + +- name: Create many project milestones + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + milestones: + - title: milestone_one + start_date: "2024-01-04" + description: this is a milestone + due_date: "2024-02-04" + - title: milestone_two + state: present + +- name: Set or update some milestones + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + milestones: + - title: milestone_one + start_date: "2024-05-04" + state: present + +- name: Add milestone in check mode + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + milestones: + - title: milestone_one + start_date: "2024-05-04" + check_mode: true + +- name: Delete milestone + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + milestones: + - title: milestone_one + state: absent + +- name: Purge all milestones + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + purge: true + +- name: Delete many milestones + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + state: absent + milestones: + - title: milestone-abc123 + - title: milestone-two +""" + +RETURN = r""" +milestones: + description: Four lists of the milestones which were added, updated, removed or exist. + returned: success + type: dict + contains: + added: + description: A list of milestones which were created. + returned: always + type: list + sample: ["abcd", "milestone-one"] + untouched: + description: A list of milestones which exist. + returned: always + type: list + sample: ["defg", "new-milestone"] + removed: + description: A list of milestones which were deleted. + returned: always + type: list + sample: ["defg", "new-milestone"] + updated: + description: A list pre-existing milestones whose values have been set. + returned: always + type: list + sample: ["defg", "new-milestone"] +milestones_obj: + description: API object. + returned: success + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, ensure_gitlab_package, find_group, find_project +) +from datetime import datetime + + +class GitlabMilestones(object): + + def __init__(self, module, gitlab_instance, group_id, project_id): + self._gitlab = gitlab_instance + self.gitlab_object = group_id if group_id else project_id + self.is_group_milestone = True if group_id else False + self._module = module + + def list_all_milestones(self): + page_nb = 1 + milestones = [] + vars_page = self.gitlab_object.milestones.list(page=page_nb) + while len(vars_page) > 0: + milestones += vars_page + page_nb += 1 + vars_page = self.gitlab_object.milestones.list(page=page_nb) + return milestones + + def create_milestone(self, var_obj): + if self._module.check_mode: + return True, True + + var = { + "title": var_obj.get('title'), + } + + if var_obj.get('description') is not None: + var["description"] = var_obj.get('description') + + if var_obj.get('start_date') is not None: + var["start_date"] = self.check_date(var_obj.get('start_date')) + + if var_obj.get('due_date') is not None: + var["due_date"] = self.check_date(var_obj.get('due_date')) + + _obj = self.gitlab_object.milestones.create(var) + return True, _obj.asdict() + + def update_milestone(self, var_obj): + if self._module.check_mode: + return True, True + _milestone = self.gitlab_object.milestones.get(self.get_milestone_id(var_obj.get('title'))) + + if var_obj.get('description') is not None: + _milestone.description = var_obj.get('description') + + if var_obj.get('start_date') is not None: + _milestone.start_date = var_obj.get('start_date') + + if var_obj.get('due_date') is not None: + _milestone.due_date = var_obj.get('due_date') + + # save returns None + _milestone.save() + return True, _milestone.asdict() + + def get_milestone_id(self, _title): + _milestone_list = self.gitlab_object.milestones.list() + _found = [x for x in _milestone_list if x.title == _title] + if _found: + return _found[0].id + else: + self._module.fail_json(msg="milestone '%s' not found." % _title) + + def check_date(self, _date): + try: + datetime.strptime(_date, '%Y-%m-%d') + except ValueError: + self._module.fail_json(msg="milestone's date '%s' not in correct format." % _date) + return _date + + def delete_milestone(self, var_obj): + if self._module.check_mode: + return True, True + _milestone = self.gitlab_object.milestones.get(self.get_milestone_id(var_obj.get('title'))) + # delete returns None + _milestone.delete() + return True, _milestone.asdict() + + +def compare(requested_milestones, existing_milestones, state): + # we need to do this, because it was determined in a previous version - more or less buggy + # basically it is not necessary and might result in more/other bugs! + # but it is required and only relevant for check mode!! + # logic represents state 'present' when not purge. all other can be derived from that + # untouched => equal in both + # updated => title are equal + # added => title does not exist + untouched = list() + updated = list() + added = list() + + if state == 'present': + _existing_milestones = list() + for item in existing_milestones: + _existing_milestones.append({'title': item.get('title')}) + + for var in requested_milestones: + if var in existing_milestones: + untouched.append(var) + else: + compare_item = {'title': var.get('title')} + if compare_item in _existing_milestones: + updated.append(var) + else: + added.append(var) + + return untouched, updated, added + + +def native_python_main(this_gitlab, purge, requested_milestones, state, module): + change = False + return_value = dict(added=[], updated=[], removed=[], untouched=[]) + return_obj = dict(added=[], updated=[], removed=[]) + + milestones_before = [x.asdict() for x in this_gitlab.list_all_milestones()] + + # filter out and enrich before compare + for item in requested_milestones: + # add defaults when not present + if item.get('description') is None: + item['description'] = "" + if item.get('due_date') is None: + item['due_date'] = None + if item.get('start_date') is None: + item['start_date'] = None + + for item in milestones_before: + # remove field only from server + item.pop('id') + item.pop('iid') + item.pop('created_at') + item.pop('expired') + item.pop('state') + item.pop('updated_at') + item.pop('web_url') + # group milestone has group_id, while project has project_id + if 'group_id' in item: + item.pop('group_id') + if 'project_id' in item: + item.pop('project_id') + + if state == 'present': + add_or_update = [x for x in requested_milestones if x not in milestones_before] + for item in add_or_update: + try: + _rv, _obj = this_gitlab.create_milestone(item) + if _rv: + return_value['added'].append(item) + return_obj['added'].append(_obj) + except Exception: + # create raises exception with following error message when milestone already exists + _rv, _obj = this_gitlab.update_milestone(item) + if _rv: + return_value['updated'].append(item) + return_obj['updated'].append(_obj) + + if purge: + # re-fetch + _milestones = this_gitlab.list_all_milestones() + + for item in milestones_before: + _rv, _obj = this_gitlab.delete_milestone(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + + elif state == 'absent': + if not purge: + _milestone_titles_requested = [x['title'] for x in requested_milestones] + remove_requested = [x for x in milestones_before if x['title'] in _milestone_titles_requested] + for item in remove_requested: + _rv, _obj = this_gitlab.delete_milestone(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + else: + for item in milestones_before: + _rv, _obj = this_gitlab.delete_milestone(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + + if module.check_mode: + _untouched, _updated, _added = compare(requested_milestones, milestones_before, state) + return_value = dict(added=_added, updated=_updated, removed=return_value['removed'], untouched=_untouched) + + if any(return_value[x] for x in ['added', 'removed', 'updated']): + change = True + + milestones_after = [x.asdict() for x in this_gitlab.list_all_milestones()] + + return change, return_value, milestones_before, milestones_after, return_obj + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + project=dict(type='str'), + group=dict(type='str'), + purge=dict(type='bool', default=False), + milestones=dict(type='list', elements='dict', default=[], + options=dict( + title=dict(type='str', required=True), + description=dict(type='str'), + due_date=dict(type='str'), + start_date=dict(type='str')) + ), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['project', 'group'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ['project', 'group'] + ], + supports_check_mode=True + ) + ensure_gitlab_package(module) + + gitlab_project = module.params['project'] + gitlab_group = module.params['group'] + purge = module.params['purge'] + milestone_list = module.params['milestones'] + state = module.params['state'] + + gitlab_instance = gitlab_authentication(module, min_version='3.2.0') + + # find_project can return None, but the other must exist + gitlab_project_id = find_project(gitlab_instance, gitlab_project) + + # find_group can return None, but the other must exist + gitlab_group_id = find_group(gitlab_instance, gitlab_group) + + # if both not found, module must exist + if not gitlab_project_id and not gitlab_group_id: + if gitlab_project and not gitlab_project_id: + module.fail_json(msg="project '%s' not found." % gitlab_project) + if gitlab_group and not gitlab_group_id: + module.fail_json(msg="group '%s' not found." % gitlab_group) + + this_gitlab = GitlabMilestones(module=module, gitlab_instance=gitlab_instance, group_id=gitlab_group_id, + project_id=gitlab_project_id) + + change, raw_return_value, before, after, _obj = native_python_main(this_gitlab, purge, milestone_list, state, + module) + + if not module.check_mode: + raw_return_value['untouched'] = [x for x in before if x in after] + + added = [x.get('title') for x in raw_return_value['added']] + updated = [x.get('title') for x in raw_return_value['updated']] + removed = [x.get('title') for x in raw_return_value['removed']] + untouched = [x.get('title') for x in raw_return_value['untouched']] + return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) + + module.exit_json(changed=change, milestones=return_value, milestones_obj=_obj) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/gitlab/gitlab_project.py b/plugins/modules/gitlab_project.py similarity index 51% rename from plugins/modules/source_control/gitlab/gitlab_project.py rename to plugins/modules/gitlab_project.py index c151837e69..b745fe9424 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project.py +++ b/plugins/modules/gitlab_project.py @@ -1,128 +1,226 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: gitlab_project short_description: Creates/updates/deletes GitLab Projects description: - - When the project does not exist in GitLab, it will be created. - - When the project does exists and I(state=absent), the project will be deleted. - - When changes are made to the project, the project will be updated. + - When the project does not exist in GitLab, it is created. + - When the project does exist and O(state=absent), the project is deleted. + - When changes are made to the project, the project is updated. author: - Werner Dijkerman (@dj-wasabi) - Guillaume Martinez (@Lunik) requirements: - - python >= 2.7 - python-gitlab python module extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: - group: - description: - - Id or the full path of the group of which this projects belongs to. - type: str - name: - description: - - The name of the project. - required: true - type: str - path: - description: - - The path of the project you want to create, this will be server_url//path. - - If not supplied, name will be used. - type: str - description: - description: - - An description for the project. - type: str - initialize_with_readme: - description: - - Will initialize the project with a default C(README.md). - - Is only used when the project is created, and ignored otherwise. - type: bool - default: false - version_added: "4.0.0" - issues_enabled: - description: - - Whether you want to create issues or not. - - Possible values are true and false. - type: bool - default: yes - merge_requests_enabled: - description: - - If merge requests can be made or not. - - Possible values are true and false. - type: bool - default: yes - wiki_enabled: - description: - - If an wiki for this project should be available or not. - type: bool - default: yes - snippets_enabled: - description: - - If creating snippets should be available or not. - type: bool - default: yes - visibility: - description: - - C(private) Project access must be granted explicitly for each user. - - C(internal) The project can be cloned by any logged in user. - - C(public) The project can be cloned without any authentication. - default: private - type: str - choices: ["private", "internal", "public"] - aliases: - - visibility_level - import_url: - description: - - Git repository which will be imported into gitlab. - - GitLab server needs read access to this git repository. - required: false - type: str - state: - description: - - Create or delete project. - - Possible values are present and absent. - default: present - type: str - choices: ["present", "absent"] - merge_method: - description: - - What requirements are placed upon merges. - - Possible values are C(merge), C(rebase_merge) merge commit with semi-linear history, C(ff) fast-forward merges only. - type: str - choices: ["ff", "merge", "rebase_merge"] - default: merge - version_added: "1.0.0" - lfs_enabled: - description: - - Enable Git large file systems to manages large files such - as audio, video, and graphics files. - type: bool - required: false - default: false - version_added: "2.0.0" - username: - description: - - Used to create a personal project under a user's name. - type: str - version_added: "3.3.0" allow_merge_on_skipped_pipeline: description: - Allow merge when skipped pipelines exist. type: bool version_added: "3.4.0" + avatar_path: + description: + - Absolute path image to configure avatar. File size should not exceed 200 kb. + - This option is only used on creation, not for updates. + type: path + version_added: "4.2.0" + build_timeout: + description: + - Maximum number of seconds a CI job can run. + - If not specified on creation, GitLab imposes a default value. + type: int + version_added: "10.6.0" + builds_access_level: + description: + - V(private) means that repository CI/CD is allowed only to project members. + - V(disabled) means that repository CI/CD is disabled. + - V(enabled) means that repository CI/CD is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.2.0" + ci_config_path: + description: + - Custom path to the CI configuration file for this project. + type: str + version_added: "3.7.0" + container_expiration_policy: + description: + - Project cleanup policy for its container registry. + type: dict + suboptions: + cadence: + description: + - How often cleanup should be run. + type: str + choices: ["1d", "7d", "14d", "1month", "3month"] + enabled: + description: + - Enable the cleanup policy. + type: bool + keep_n: + description: + - Number of tags kept per image name. + - V(0) clears the field. + type: int + choices: [0, 1, 5, 10, 25, 50, 100] + older_than: + description: + - Destroy tags older than this. + - V(0d) clears the field. + type: str + choices: ["0d", "7d", "14d", "30d", "90d"] + name_regex: + description: + - Destroy tags matching this regular expression. + type: str + name_regex_keep: + description: + - Keep tags matching this regular expression. + type: str + version_added: "9.3.0" + container_registry_access_level: + description: + - V(private) means that container registry is allowed only to project members. + - V(disabled) means that container registry is disabled. + - V(enabled) means that container registry is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.2.0" + default_branch: + description: + - The default branch name for this project. + - For project creation, this option requires O(initialize_with_readme=true). + - For project update, the branch must exist. + - Supports project's default branch update since community.general 8.0.0. + type: str + version_added: "4.2.0" + description: + description: + - An description for the project. + type: str + environments_access_level: + description: + - V(private) means that deployment to environment is allowed only to project members. + - V(disabled) means that deployment to environment is disabled. + - V(enabled) means that deployment to environment is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + feature_flags_access_level: + description: + - V(private) means that feature rollout is allowed only to project members. + - V(disabled) means that feature rollout is disabled. + - V(enabled) means that feature rollout is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + forking_access_level: + description: + - V(private) means that repository forks is allowed only to project members. + - V(disabled) means that repository forks are disabled. + - V(enabled) means that repository forks are enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.2.0" + group: + description: + - ID or the full path of the group of which this projects belongs to. + type: str + import_url: + description: + - Git repository which is imported into gitlab. + - GitLab server needs read access to this git repository. + required: false + type: str + infrastructure_access_level: + description: + - V(private) means that configuring infrastructure is allowed only to project members. + - V(disabled) means that configuring infrastructure is disabled. + - V(enabled) means that configuring infrastructure is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + initialize_with_readme: + description: + - Initializes the project with a default C(README.md). + - Is only used when the project is created, and ignored otherwise. + type: bool + default: false + version_added: "4.0.0" + issues_access_level: + description: + - V(private) means that accessing issues tab is allowed only to project members. + - V(disabled) means that accessing issues tab is disabled. + - V(enabled) means that accessing issues tab is enabled. + - O(issues_access_level) and O(issues_enabled) are mutually exclusive. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.4.0" + issues_enabled: + description: + - Whether you want to create issues or not. + - O(issues_access_level) and O(issues_enabled) are mutually exclusive. + type: bool + default: true + lfs_enabled: + description: + - Enable Git large file systems to manages large files such as audio, video, and graphics files. + type: bool + required: false + default: false + version_added: "2.0.0" + merge_method: + description: + - What requirements are placed upon merges. + - Possible values are V(merge), V(rebase_merge) merge commit with semi-linear history, V(ff) fast-forward merges only. + type: str + choices: ["ff", "merge", "rebase_merge"] + default: merge + version_added: "1.0.0" + merge_requests_enabled: + description: + - If merge requests can be made or not. + type: bool + default: true + model_registry_access_level: + description: + - V(private) means that accessing model registry tab is allowed only to project members. + - V(disabled) means that accessing model registry tab is disabled. + - V(enabled) means that accessing model registry tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" + monitor_access_level: + description: + - V(private) means that monitoring health is allowed only to project members. + - V(disabled) means that monitoring health is disabled. + - V(enabled) means that monitoring health is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + name: + description: + - The name of the project. + required: true + type: str only_allow_merge_if_all_discussions_are_resolved: description: - All discussions on a merge request (MR) have to be resolved. @@ -138,42 +236,106 @@ options: - Enable GitLab package repository. type: bool version_added: "3.4.0" + pages_access_level: + description: + - V(private) means that accessing pages tab is allowed only to project members. + - V(disabled) means that accessing pages tab is disabled. + - V(enabled) means that accessing pages tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" + path: + description: + - The path of the project you want to create, this is server_url/O(group)/O(path). + - If not supplied, O(name) is used. + type: str + releases_access_level: + description: + - V(private) means that accessing release is allowed only to project members. + - V(disabled) means that accessing release is disabled. + - V(enabled) means that accessing release is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" remove_source_branch_after_merge: description: - Remove the source branch after merge. type: bool version_added: "3.4.0" + repository_access_level: + description: + - V(private) means that accessing repository is allowed only to project members. + - V(disabled) means that accessing repository is disabled. + - V(enabled) means that accessing repository is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" + security_and_compliance_access_level: + description: + - V(private) means that accessing security and complicance tab is allowed only to project members. + - V(disabled) means that accessing security and complicance tab is disabled. + - V(enabled) means that accessing security and complicance tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + service_desk_enabled: + description: + - Enable Service Desk. + type: bool + version_added: "9.3.0" + shared_runners_enabled: + description: + - Enable shared runners for this project. + type: bool + version_added: "3.7.0" + snippets_enabled: + description: + - If creating snippets should be available or not. + type: bool + default: true squash_option: description: - Squash commits when merging. type: str choices: ["never", "always", "default_off", "default_on"] version_added: "3.4.0" - ci_config_path: + state: description: - - Custom path to the CI configuration file for this project. + - Create or delete project. + - Possible values are present and absent. + default: present type: str - version_added: "3.7.0" - shared_runners_enabled: + choices: ["present", "absent"] + topics: description: - - Enable shared runners for this project. + - A topic or list of topics to be assigned to a project. + - It is compatible with old GitLab server releases (versions before 14, correspond to C(tag_list)). + type: list + elements: str + version_added: "6.6.0" + username: + description: + - Used to create a personal project under a user's name. + type: str + version_added: "3.3.0" + visibility: + description: + - V(private) Project access must be granted explicitly for each user. + - V(internal) The project can be cloned by any logged in user. + - V(public) The project can be cloned without any authentication. + default: private + type: str + choices: ["private", "internal", "public"] + aliases: + - visibility_level + wiki_enabled: + description: + - If an wiki for this project should be available or not. type: bool - version_added: "3.7.0" - avatar_path: - description: - - Absolute path image to configure avatar. File size should not exceed 200 kb. - - This option is only used on creation, not for updates. - type: path - version_added: "4.2.0" - default_branch: - description: - - Default branch name for a new project. - - This option is only used on creation, not for updates. This is also only used if I(initialize_with_readme=true). - type: str - version_added: "4.2.0" -''' + default: true +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create GitLab Project community.general.gitlab_project: api_url: https://gitlab.example.com/ @@ -185,7 +347,6 @@ EXAMPLES = r''' community.general.gitlab_project: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" - validate_certs: False name: my_first_project state: absent delegate_to: localhost @@ -193,15 +354,15 @@ EXAMPLES = r''' - name: Create GitLab Project in group Ansible community.general.gitlab_project: api_url: https://gitlab.example.com/ - validate_certs: True + validate_certs: true api_username: dj-wasabi api_password: "MySecretPassword" name: my_first_project group: ansible - issues_enabled: False + issues_enabled: false merge_method: rebase_merge - wiki_enabled: True - snippets_enabled: True + wiki_enabled: true + snippets_enabled: true import_url: http://git.example.com/example/lab.git initialize_with_readme: true state: present @@ -219,9 +380,9 @@ EXAMPLES = r''' api_password: "{{ initial_root_password }}" name: my_second_project group: "10481470" -''' +""" -RETURN = r''' +RETURN = r""" msg: description: Success or failure message. returned: always @@ -229,12 +390,12 @@ msg: sample: "Success" result: - description: json parsed response from the server. + description: JSON-parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API. + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: path is already in use" @@ -243,23 +404,18 @@ project: description: API object. returned: always type: dict -''' +""" -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, find_group, find_project, gitlab_authentication +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_group, find_project, gitlab_authentication, gitlab +) + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion class GitLabProject(object): @@ -273,29 +429,55 @@ class GitLabProject(object): @param namespace Namespace Object (User or Group) @param options Options of the project ''' - def create_or_update_project(self, project_name, namespace, options): + def create_or_update_project(self, module, project_name, namespace, options): changed = False project_options = { - 'name': project_name, - 'description': options['description'], - 'issues_enabled': options['issues_enabled'], - 'merge_requests_enabled': options['merge_requests_enabled'], - 'merge_method': options['merge_method'], - 'wiki_enabled': options['wiki_enabled'], - 'snippets_enabled': options['snippets_enabled'], - 'visibility': options['visibility'], - 'lfs_enabled': options['lfs_enabled'], 'allow_merge_on_skipped_pipeline': options['allow_merge_on_skipped_pipeline'], + 'builds_access_level': options['builds_access_level'], + 'build_timeout': options['build_timeout'], + 'ci_config_path': options['ci_config_path'], + 'container_expiration_policy': options['container_expiration_policy'], + 'container_registry_access_level': options['container_registry_access_level'], + 'description': options['description'], + 'environments_access_level': options['environments_access_level'], + 'feature_flags_access_level': options['feature_flags_access_level'], + 'forking_access_level': options['forking_access_level'], + 'infrastructure_access_level': options['infrastructure_access_level'], + 'issues_access_level': options['issues_access_level'], + 'issues_enabled': options['issues_enabled'], + 'lfs_enabled': options['lfs_enabled'], + 'merge_method': options['merge_method'], + 'merge_requests_enabled': options['merge_requests_enabled'], + 'model_registry_access_level': options['model_registry_access_level'], + 'monitor_access_level': options['monitor_access_level'], + 'name': project_name, 'only_allow_merge_if_all_discussions_are_resolved': options['only_allow_merge_if_all_discussions_are_resolved'], 'only_allow_merge_if_pipeline_succeeds': options['only_allow_merge_if_pipeline_succeeds'], 'packages_enabled': options['packages_enabled'], + 'pages_access_level': options['pages_access_level'], + 'releases_access_level': options['releases_access_level'], 'remove_source_branch_after_merge': options['remove_source_branch_after_merge'], - 'squash_option': options['squash_option'], - 'ci_config_path': options['ci_config_path'], + 'repository_access_level': options['repository_access_level'], + 'security_and_compliance_access_level': options['security_and_compliance_access_level'], + 'service_desk_enabled': options['service_desk_enabled'], 'shared_runners_enabled': options['shared_runners_enabled'], + 'snippets_enabled': options['snippets_enabled'], + 'squash_option': options['squash_option'], + 'visibility': options['visibility'], + 'wiki_enabled': options['wiki_enabled'], } + + # topics was introduced on gitlab >=14 and replace tag_list. We get current gitlab version + # and check if less than 14. If yes we use tag_list instead topics + if LooseVersion(self._gitlab.version()[0]) < LooseVersion("14"): + project_options['tag_list'] = options['topics'] + else: + project_options['topics'] = options['topics'] + # Because we have already call userExists in main() if self.project_object is None: + if options['default_branch'] and not options['initialize_with_readme']: + module.fail_json(msg="Param default_branch needs param initialize_with_readme set to true") project_options.update({ 'path': options['path'], 'import_url': options['import_url'], @@ -317,6 +499,8 @@ class GitLabProject(object): changed = True else: + if options['default_branch']: + project_options['default_branch'] = options['default_branch'] changed, project = self.update_project(self.project_object, project_options) self.project_object = project @@ -327,7 +511,7 @@ class GitLabProject(object): try: project.save() except Exception as e: - self._module.fail_json(msg="Failed update project: %s " % e) + self._module.fail_json(msg="Failed to update project: %s " % e) return True return False @@ -340,6 +524,8 @@ class GitLabProject(object): return True arguments['namespace_id'] = namespace.id + if 'container_expiration_policy' in arguments: + arguments['container_expiration_policy_attributes'] = arguments['container_expiration_policy'] try: project = self._gitlab.projects.create(arguments) except (gitlab.exceptions.GitlabCreateError) as e: @@ -351,11 +537,7 @@ class GitLabProject(object): @param arguments Attributes of the project ''' def get_options_with_value(self, arguments): - ret_arguments = dict() - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - ret_arguments[arg_key] = arg_value - + ret_arguments = {k: v for k, v in arguments.items() if v is not None} return ret_arguments ''' @@ -366,9 +548,22 @@ class GitLabProject(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(project, arg_key) != arguments[arg_key]: - setattr(project, arg_key, arguments[arg_key]) + if arg_value is not None: + if getattr(project, arg_key, None) != arg_value: + if arg_key == 'container_expiration_policy': + old_val = getattr(project, arg_key, {}) + final_val = {key: value for key, value in arg_value.items() if value is not None} + + if final_val.get('older_than') == '0d': + final_val['older_than'] = None + if final_val.get('keep_n') == 0: + final_val['keep_n'] = None + + if all(old_val.get(key) == value for key, value in final_val.items()): + continue + setattr(project, 'container_expiration_policy_attributes', final_val) + else: + setattr(project, arg_key, arg_value) changed = True return (changed, project) @@ -398,31 +593,55 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update(dict( - group=dict(type='str'), - name=dict(type='str', required=True), - path=dict(type='str'), - description=dict(type='str'), - initialize_with_readme=dict(type='bool', default=False), - default_branch=dict(type='str'), - issues_enabled=dict(type='bool', default=True), - merge_requests_enabled=dict(type='bool', default=True), - merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]), - wiki_enabled=dict(type='bool', default=True), - snippets_enabled=dict(default=True, type='bool'), - visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]), - import_url=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - lfs_enabled=dict(default=False, type='bool'), - username=dict(type='str'), allow_merge_on_skipped_pipeline=dict(type='bool'), + avatar_path=dict(type='path'), + builds_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + build_timeout=dict(type='int'), + ci_config_path=dict(type='str'), + container_expiration_policy=dict(type='dict', options=dict( + cadence=dict(type='str', choices=["1d", "7d", "14d", "1month", "3month"]), + enabled=dict(type='bool'), + keep_n=dict(type='int', choices=[0, 1, 5, 10, 25, 50, 100]), + older_than=dict(type='str', choices=["0d", "7d", "14d", "30d", "90d"]), + name_regex=dict(type='str'), + name_regex_keep=dict(type='str'), + )), + container_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + default_branch=dict(type='str'), + description=dict(type='str'), + environments_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + feature_flags_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + forking_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + group=dict(type='str'), + import_url=dict(type='str'), + infrastructure_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + initialize_with_readme=dict(type='bool', default=False), + issues_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + issues_enabled=dict(type='bool', default=True), + lfs_enabled=dict(default=False, type='bool'), + merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]), + merge_requests_enabled=dict(type='bool', default=True), + model_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + monitor_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + name=dict(type='str', required=True), only_allow_merge_if_all_discussions_are_resolved=dict(type='bool'), only_allow_merge_if_pipeline_succeeds=dict(type='bool'), packages_enabled=dict(type='bool'), + pages_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + path=dict(type='str'), + releases_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), remove_source_branch_after_merge=dict(type='bool'), - squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']), - ci_config_path=dict(type='str'), + repository_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + security_and_compliance_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + service_desk_enabled=dict(type='bool'), shared_runners_enabled=dict(type='bool'), - avatar_path=dict(type='path'), + snippets_enabled=dict(default=True, type='bool'), + squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']), + state=dict(type='str', default="present", choices=["absent", "present"]), + topics=dict(type='list', elements='str'), + username=dict(type='str'), + visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]), + wiki_enabled=dict(type='bool', default=True), )) module = AnsibleModule( @@ -434,6 +653,7 @@ def main(): ['api_token', 'api_oauth_token'], ['api_token', 'api_job_token'], ['group', 'username'], + ['issues_access_level', 'issues_enabled'], ], required_together=[ ['api_username', 'api_password'], @@ -444,39 +664,51 @@ def main(): supports_check_mode=True, ) - group_identifier = module.params['group'] - project_name = module.params['name'] - project_path = module.params['path'] - project_description = module.params['description'] - initialize_with_readme = module.params['initialize_with_readme'] - issues_enabled = module.params['issues_enabled'] - merge_requests_enabled = module.params['merge_requests_enabled'] - merge_method = module.params['merge_method'] - wiki_enabled = module.params['wiki_enabled'] - snippets_enabled = module.params['snippets_enabled'] - visibility = module.params['visibility'] - import_url = module.params['import_url'] - state = module.params['state'] - lfs_enabled = module.params['lfs_enabled'] - username = module.params['username'] + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + allow_merge_on_skipped_pipeline = module.params['allow_merge_on_skipped_pipeline'] + avatar_path = module.params['avatar_path'] + builds_access_level = module.params['builds_access_level'] + build_timeout = module.params['build_timeout'] + ci_config_path = module.params['ci_config_path'] + container_expiration_policy = module.params['container_expiration_policy'] + container_registry_access_level = module.params['container_registry_access_level'] + default_branch = module.params['default_branch'] + environments_access_level = module.params['environments_access_level'] + feature_flags_access_level = module.params['feature_flags_access_level'] + forking_access_level = module.params['forking_access_level'] + group_identifier = module.params['group'] + import_url = module.params['import_url'] + infrastructure_access_level = module.params['infrastructure_access_level'] + initialize_with_readme = module.params['initialize_with_readme'] + issues_access_level = module.params['issues_access_level'] + issues_enabled = module.params['issues_enabled'] + lfs_enabled = module.params['lfs_enabled'] + merge_method = module.params['merge_method'] + merge_requests_enabled = module.params['merge_requests_enabled'] + model_registry_access_level = module.params['model_registry_access_level'] + monitor_access_level = module.params['monitor_access_level'] only_allow_merge_if_all_discussions_are_resolved = module.params['only_allow_merge_if_all_discussions_are_resolved'] only_allow_merge_if_pipeline_succeeds = module.params['only_allow_merge_if_pipeline_succeeds'] packages_enabled = module.params['packages_enabled'] + pages_access_level = module.params['pages_access_level'] + project_description = module.params['description'] + project_name = module.params['name'] + project_path = module.params['path'] + releases_access_level = module.params['releases_access_level'] remove_source_branch_after_merge = module.params['remove_source_branch_after_merge'] - squash_option = module.params['squash_option'] - ci_config_path = module.params['ci_config_path'] + repository_access_level = module.params['repository_access_level'] + security_and_compliance_access_level = module.params['security_and_compliance_access_level'] + service_desk_enabled = module.params['service_desk_enabled'] shared_runners_enabled = module.params['shared_runners_enabled'] - avatar_path = module.params['avatar_path'] - default_branch = module.params['default_branch'] - - if default_branch and not initialize_with_readme: - module.fail_json(msg="Param default_branch need param initialize_with_readme set to true") - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlab_authentication(module) + snippets_enabled = module.params['snippets_enabled'] + squash_option = module.params['squash_option'] + state = module.params['state'] + topics = module.params['topics'] + username = module.params['username'] + visibility = module.params['visibility'] + wiki_enabled = module.params['wiki_enabled'] # Set project_path to project_name if it is empty. if project_path is None: @@ -489,7 +721,7 @@ def main(): if group_identifier: group = find_group(gitlab_instance, group_identifier) if group is None: - module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier) + module.fail_json(msg="Failed to create project: group %s doesn't exist" % group_identifier) namespace_id = group.id else: @@ -515,32 +747,49 @@ def main(): if project_exists: gitlab_project.delete_project() module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name) - module.exit_json(changed=False, msg="Project deleted or does not exists") + module.exit_json(changed=False, msg="Project deleted or does not exist") if state == 'present': - if gitlab_project.create_or_update_project(project_name, namespace, { - "path": project_path, - "description": project_description, - "initialize_with_readme": initialize_with_readme, - "default_branch": default_branch, - "issues_enabled": issues_enabled, - "merge_requests_enabled": merge_requests_enabled, - "merge_method": merge_method, - "wiki_enabled": wiki_enabled, - "snippets_enabled": snippets_enabled, - "visibility": visibility, - "import_url": import_url, - "lfs_enabled": lfs_enabled, + if gitlab_project.create_or_update_project(module, project_name, namespace, { "allow_merge_on_skipped_pipeline": allow_merge_on_skipped_pipeline, + "avatar_path": avatar_path, + "builds_access_level": builds_access_level, + "build_timeout": build_timeout, + "ci_config_path": ci_config_path, + "container_expiration_policy": container_expiration_policy, + "container_registry_access_level": container_registry_access_level, + "default_branch": default_branch, + "description": project_description, + "environments_access_level": environments_access_level, + "feature_flags_access_level": feature_flags_access_level, + "forking_access_level": forking_access_level, + "import_url": import_url, + "infrastructure_access_level": infrastructure_access_level, + "initialize_with_readme": initialize_with_readme, + "issues_access_level": issues_access_level, + "issues_enabled": issues_enabled, + "lfs_enabled": lfs_enabled, + "merge_method": merge_method, + "merge_requests_enabled": merge_requests_enabled, + "model_registry_access_level": model_registry_access_level, + "monitor_access_level": monitor_access_level, "only_allow_merge_if_all_discussions_are_resolved": only_allow_merge_if_all_discussions_are_resolved, "only_allow_merge_if_pipeline_succeeds": only_allow_merge_if_pipeline_succeeds, "packages_enabled": packages_enabled, + "pages_access_level": pages_access_level, + "path": project_path, + "releases_access_level": releases_access_level, "remove_source_branch_after_merge": remove_source_branch_after_merge, - "squash_option": squash_option, - "ci_config_path": ci_config_path, + "repository_access_level": repository_access_level, + "security_and_compliance_access_level": security_and_compliance_access_level, + "service_desk_enabled": service_desk_enabled, "shared_runners_enabled": shared_runners_enabled, - "avatar_path": avatar_path, + "snippets_enabled": snippets_enabled, + "squash_option": squash_option, + "topics": topics, + "visibility": visibility, + "wiki_enabled": wiki_enabled, }): module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.project_object._attrs) diff --git a/plugins/modules/gitlab_project_access_token.py b/plugins/modules/gitlab_project_access_token.py new file mode 100644 index 0000000000..27e3b07129 --- /dev/null +++ b/plugins/modules/gitlab_project_access_token.py @@ -0,0 +1,333 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Zoran Krleza (zoran.krleza@true-north.hr) +# Based on code: +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Marcus Watkins +# Copyright (c) 2013, Phillip Gentry +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_project_access_token +short_description: Manages GitLab project access tokens +version_added: 8.4.0 +description: + - Creates and revokes project access tokens. +author: + - Zoran Krleza (@pixslx) +requirements: + - python-gitlab >= 3.1.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes +notes: + - Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. Whether tokens + are recreated or not is controlled by the O(recreate) option, which defaults to V(never). + - Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards. + - Token matching is done by comparing O(name) option. +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + project: + description: + - ID or full path of project in the form of group/name. + required: true + type: str + name: + description: + - Access token's name. + required: true + type: str + scopes: + description: + - Scope of the access token. + - The values V(manage_runner) and V(self_rotate) were added in community.general 11.3.0. + required: true + type: list + elements: str + aliases: ["scope"] + choices: + - api + - read_api + - read_registry + - write_registry + - read_repository + - write_repository + - create_runner + - manage_runner + - ai_features + - k8s_proxy + - self_rotate + access_level: + description: + - Access level of the access token. + - The value V(planner) was added in community.general 11.3.0. + type: str + default: maintainer + choices: ["guest", "planner", "reporter", "developer", "maintainer", "owner"] + expires_at: + description: + - Expiration date of the access token in C(YYYY-MM-DD) format. + - Make sure to quote this value in YAML to ensure it is kept as a string and not interpreted as a YAML date. + type: str + required: true + recreate: + description: + - Whether the access token is recreated if it already exists. + - When V(never) the token is never recreated. + - When V(always) the token is always recreated. + - When V(state_change) the token is recreated if there is a difference between desired state and actual state. + type: str + choices: ["never", "always", "state_change"] + default: never + state: + description: + - When V(present) the access token is added to the project if it does not exist. + - When V(absent) it is removed from the project if it exists. + default: present + type: str + choices: ["present", "absent"] +""" + +EXAMPLES = r""" +- name: "Creating a project access token" + community.general.gitlab_project_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + project: "my_group/my_project" + name: "project_token" + expires_at: "2024-12-31" + access_level: developer + scopes: + - api + - read_api + - read_repository + - write_repository + state: present + +- name: "Revoking a project access token" + community.general.gitlab_project_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + project: "my_group/my_project" + name: "project_token" + expires_at: "2024-12-31" + scopes: + - api + - read_api + - read_repository + - write_repository + state: absent + +- name: "Change (recreate) existing token if its actual state is different than desired state" + community.general.gitlab_project_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + project: "my_group/my_project" + name: "project_token" + expires_at: "2024-12-31" + scopes: + - api + - read_api + - read_repository + - write_repository + recreate: state_change + state: present +""" + +RETURN = r""" +access_token: + description: + - API object. + - Only contains the value of the token if the token was created or recreated. + returned: success and O(state=present) + type: dict +""" + +from datetime import datetime + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_project, gitlab_authentication, gitlab +) + +ACCESS_LEVELS = dict(guest=10, planner=15, reporter=20, developer=30, maintainer=40, owner=50) + + +class GitLabProjectAccessToken(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.access_token_object = None + + ''' + @param project Project Object + @param arguments Attributes of the access_token + ''' + def create_access_token(self, project, arguments): + changed = False + if self._module.check_mode: + return True + + try: + self.access_token_object = project.access_tokens.create(arguments) + changed = True + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create access token: %s " % to_native(e)) + + return changed + + ''' + @param project Project object + @param name of the access token + ''' + def find_access_token(self, project, name): + access_tokens = [x for x in project.access_tokens.list(all=True) if not getattr(x, 'revoked', False)] + for access_token in access_tokens: + if access_token.name == name: + self.access_token_object = access_token + return False + return False + + def revoke_access_token(self): + if self._module.check_mode: + return True + + changed = False + try: + self.access_token_object.delete() + changed = True + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to revoke access token: %s " % to_native(e)) + + return changed + + def access_tokens_equal(self): + if self.access_token_object.name != self._module.params['name']: + return False + if self.access_token_object.scopes != self._module.params['scopes']: + return False + if self.access_token_object.access_level != ACCESS_LEVELS[self._module.params['access_level']]: + return False + if self.access_token_object.expires_at != self._module.params['expires_at']: + return False + return True + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default="present", choices=["absent", "present"]), + project=dict(type='str', required=True), + name=dict(type='str', required=True), + scopes=dict(type='list', + required=True, + aliases=['scope'], + elements='str', + choices=['api', + 'read_api', + 'read_registry', + 'write_registry', + 'read_repository', + 'write_repository', + 'create_runner', + 'manage_runner', + 'ai_features', + 'k8s_proxy', + 'self_rotate']), + access_level=dict(type='str', default='maintainer', choices=['guest', 'planner', 'reporter', 'developer', 'maintainer', 'owner']), + expires_at=dict(type='str', required=True), + recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change']) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'] + ], + required_together=[ + ['api_username', 'api_password'] + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + + state = module.params['state'] + project_identifier = module.params['project'] + name = module.params['name'] + scopes = module.params['scopes'] + access_level_str = module.params['access_level'] + expires_at = module.params['expires_at'] + recreate = module.params['recreate'] + + access_level = ACCESS_LEVELS[access_level_str] + + try: + datetime.strptime(expires_at, '%Y-%m-%d') + except ValueError: + module.fail_json(msg="Argument expires_at is not in required format YYYY-MM-DD") + + gitlab_instance = gitlab_authentication(module) + + gitlab_access_token = GitLabProjectAccessToken(module, gitlab_instance) + + project = find_project(gitlab_instance, project_identifier) + if project is None: + module.fail_json(msg="Failed to create access token: project %s does not exists" % project_identifier) + + gitlab_access_token_exists = False + gitlab_access_token.find_access_token(project, name) + if gitlab_access_token.access_token_object is not None: + gitlab_access_token_exists = True + + if state == 'absent': + if gitlab_access_token_exists: + gitlab_access_token.revoke_access_token() + module.exit_json(changed=True, msg="Successfully deleted access token %s" % name) + else: + module.exit_json(changed=False, msg="Access token does not exists") + + if state == 'present': + if gitlab_access_token_exists: + if gitlab_access_token.access_tokens_equal(): + if recreate == 'always': + gitlab_access_token.revoke_access_token() + gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + else: + module.exit_json(changed=False, msg="Access token already exists", access_token=gitlab_access_token.access_token_object._attrs) + else: + if recreate == 'never': + module.fail_json(msg="Access token already exists and its state is different. It can not be updated without recreating.") + else: + gitlab_access_token.revoke_access_token() + gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + else: + gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + if module.check_mode: + module.exit_json(changed=True, msg="Successfully created access token", access_token={}) + else: + module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_project_badge.py b/plugins/modules/gitlab_project_badge.py new file mode 100644 index 0000000000..8d81765f99 --- /dev/null +++ b/plugins/modules/gitlab_project_badge.py @@ -0,0 +1,212 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Guillaume MARTINEZ (lunik@tiwabbit.fr) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_project_badge +short_description: Manage project badges on GitLab Server +version_added: 6.1.0 +description: + - This module allows to add and remove badges to/from a project. +author: Guillaume MARTINEZ (@Lunik) +requirements: + - C(owner) or C(maintainer) rights to project on the GitLab server +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + project: + description: + - The name (or full path) of the GitLab project the badge is added to/removed from. + required: true + type: str + + state: + description: + - State of the badge in the project. + - On V(present), it adds a badge to a GitLab project. + - On V(absent), it removes a badge from a GitLab project. + choices: ['present', 'absent'] + default: 'present' + type: str + + link_url: + description: + - The URL associated with the badge. + required: true + type: str + + image_url: + description: + - The image URL of the badge. + - A badge is identified by this URL. + required: true + type: str +""" + +EXAMPLES = r""" +- name: Add a badge to a GitLab Project + community.general.gitlab_project_badge: + api_url: 'https://example.gitlab.com' + api_token: 'Your-Private-Token' + project: projectname + state: present + link_url: 'https://example.gitlab.com/%{project_path}' + image_url: 'https://example.gitlab.com/%{project_path}/badges/%{default_branch}/pipeline.svg' + +- name: Remove a badge from a GitLab Project + community.general.gitlab_project_badge: + api_url: 'https://example.gitlab.com' + api_token: 'Your-Private-Token' + project: projectname + state: absent + link_url: 'https://example.gitlab.com/%{project_path}' + image_url: 'https://example.gitlab.com/%{project_path}/badges/%{default_branch}/pipeline.svg' +""" + +RETURN = r""" +badge: + description: The badge information. + returned: when O(state=present) + type: dict + sample: + id: 1 + link_url: 'http://example.com/ci_status.svg?project=%{project_path}&ref=%{default_branch}' + image_url: 'https://shields.io/my/badge' + rendered_link_url: 'http://example.com/ci_status.svg?project=example-org/example-project&ref=master' + rendered_image_url: 'https://shields.io/my/badge' + kind: project +""" + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, find_project, list_all_kwargs +) + + +def present_strategy(module, gl, project, wished_badge): + changed = False + + existing_badge = None + for badge in project.badges.list(**list_all_kwargs): + if badge.image_url == wished_badge["image_url"]: + existing_badge = badge + break + + if not existing_badge: + changed = True + if module.check_mode: + return changed, {"status": "A project badge would be created."} + + badge = project.badges.create(wished_badge) + return changed, badge.attributes + + if existing_badge.link_url != wished_badge["link_url"]: + changed = True + existing_badge.link_url = wished_badge["link_url"] + + if changed: + if module.check_mode: + return changed, {"status": "Project badge attributes would be changed."} + + existing_badge.save() + + return changed, existing_badge.attributes + + +def absent_strategy(module, gl, project, wished_badge): + changed = False + + existing_badge = None + for badge in project.badges.list(**list_all_kwargs): + if badge.image_url == wished_badge["image_url"]: + existing_badge = badge + break + + if not existing_badge: + return changed, None + + changed = True + if module.check_mode: + return changed, {"status": "Project badge would be destroyed."} + + existing_badge.delete() + + return changed, None + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + # check prerequisites and connect to gitlab server + gl = gitlab_authentication(module) + + gitlab_project = module.params['project'] + state = module.params['state'] + + project = find_project(gl, gitlab_project) + # project doesn't exist + if not project: + module.fail_json(msg="project '%s' not found." % gitlab_project) + + wished_badge = { + "link_url": module.params["link_url"], + "image_url": module.params["image_url"], + } + + changed, summary = state_strategy[state](module=module, gl=gl, project=project, wished_badge=wished_badge) + + module.exit_json(changed=changed, badge=summary) + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + project=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + link_url=dict(type='str', required=True), + image_url=dict(type='str', required=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ], + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/gitlab/gitlab_project_members.py b/plugins/modules/gitlab_project_members.py similarity index 89% rename from plugins/modules/source_control/gitlab/gitlab_project_members.py rename to plugins/modules/gitlab_project_members.py index 699c6a5867..c496d4aae5 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project_members.py +++ b/plugins/modules/gitlab_project_members.py @@ -1,15 +1,13 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Sergey Mikhaltsov -# Copyright: (c) 2020, Zainab Alsaffar -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Sergey Mikhaltsov +# Copyright (c) 2020, Zainab Alsaffar +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: gitlab_project_members short_description: Manage project members on GitLab Server version_added: 2.2.0 @@ -24,6 +22,13 @@ requirements: extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: project: @@ -34,21 +39,22 @@ options: gitlab_user: description: - A username or a list of usernames to add to/remove from the GitLab project. - - Mutually exclusive with I(gitlab_users_access). + - Mutually exclusive with O(gitlab_users_access). type: list elements: str access_level: description: - The access level for the user. - - Required if I(state=present), user state is set to present. + - Required if O(state=present), user state is set to present. + - V(owner) was added in community.general 10.6.0. type: str - choices: ['guest', 'reporter', 'developer', 'maintainer'] + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] gitlab_users_access: description: - Provide a list of user to access level mappings. - Every dictionary in this list specifies a user (by username) and the access level the user should have. - - Mutually exclusive with I(gitlab_user) and I(access_level). - - Use together with I(purge_users) to remove all users not specified here from the project. + - Mutually exclusive with O(gitlab_user) and O(access_level). + - Use together with O(purge_users) to remove all users not specified here from the project. type: list elements: dict suboptions: @@ -59,38 +65,38 @@ options: access_level: description: - The access level for the user. - - Required if I(state=present), user state is set to present. + - Required if O(state=present), user state is set to present. + - V(owner) was added in community.general 10.6.0. type: str - choices: ['guest', 'reporter', 'developer', 'maintainer'] + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] required: true version_added: 3.7.0 state: description: - State of the member in the project. - - On C(present), it adds a user to a GitLab project. - - On C(absent), it removes a user from a GitLab project. + - On V(present), it adds a user to a GitLab project. + - On V(absent), it removes a user from a GitLab project. choices: ['present', 'absent'] default: 'present' type: str purge_users: description: - - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list. - If omitted do not purge orphaned members. - - Is only used when I(state=present). + - Adds/remove users of the given access_level to match the given O(gitlab_user)/O(gitlab_users_access) list. If omitted + do not purge orphaned members. + - Is only used when O(state=present). + - V(owner) was added in community.general 10.6.0. type: list elements: str - choices: ['guest', 'reporter', 'developer', 'maintainer'] + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] version_added: 3.7.0 -notes: - - Supports C(check_mode). -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add a user to a GitLab Project community.general.gitlab_project_members: api_url: 'https://gitlab.example.com' api_token: 'Your-Private-Token' - validate_certs: True + validate_certs: true project: projectname gitlab_user: username access_level: developer @@ -100,7 +106,6 @@ EXAMPLES = r''' community.general.gitlab_project_members: api_url: 'https://gitlab.example.com' api_token: 'Your-Private-Token' - validate_certs: False project: projectname gitlab_user: username state: absent @@ -135,7 +140,7 @@ EXAMPLES = r''' project: projectname gitlab_user: username access_level: developer - pruge_users: developer + purge_users: developer state: present - name: Remove a list of Users with Dedicated Access Levels to A GitLab project @@ -149,23 +154,16 @@ EXAMPLES = r''' - name: user2 access_level: maintainer state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, gitlab_authentication - -import traceback - -try: - import gitlab - HAS_PY_GITLAB = True -except ImportError: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_PY_GITLAB = False +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab +) class GitLabProjectMembers(object): @@ -242,16 +240,16 @@ def main(): project=dict(type='str', required=True), gitlab_user=dict(type='list', elements='str'), state=dict(type='str', default='present', choices=['present', 'absent']), - access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer']), + access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), purge_users=dict(type='list', elements='str', choices=[ - 'guest', 'reporter', 'developer', 'maintainer']), + 'guest', 'reporter', 'developer', 'maintainer', 'owner']), gitlab_users_access=dict( type='list', elements='dict', options=dict( name=dict(type='str', required=True), access_level=dict(type='str', choices=[ - 'guest', 'reporter', 'developer', 'maintainer'], required=True), + 'guest', 'reporter', 'developer', 'maintainer', 'owner'], required=True), ) ), )) @@ -281,14 +279,15 @@ def main(): supports_check_mode=True, ) - if not HAS_PY_GITLAB: - module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR) + # check prerequisites and connect to gitlab server + gl = gitlab_authentication(module) access_level_int = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS, + 'guest': gitlab.const.GUEST_ACCESS, + 'reporter': gitlab.const.REPORTER_ACCESS, + 'developer': gitlab.const.DEVELOPER_ACCESS, + 'maintainer': gitlab.const.MAINTAINER_ACCESS, + 'owner': gitlab.const.OWNER_ACCESS, } gitlab_project = module.params['project'] @@ -299,9 +298,6 @@ def main(): if purge_users: purge_users = [access_level_int[level] for level in purge_users] - # connect to gitlab server - gl = gitlab_authentication(module) - project = GitLabProjectMembers(module, gl) gitlab_project_id = project.get_project(gitlab_project) diff --git a/plugins/modules/source_control/gitlab/gitlab_project_variable.py b/plugins/modules/gitlab_project_variable.py similarity index 64% rename from plugins/modules/source_control/gitlab/gitlab_project_variable.py rename to plugins/modules/gitlab_project_variable.py index f9b8d7b6e1..cf8dd47524 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project_variable.py +++ b/plugins/modules/gitlab_project_variable.py @@ -1,27 +1,33 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# Copyright (c) 2019, Markus Bergholz (markuman@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_project_variable short_description: Creates/updates/deletes GitLab Projects Variables description: - - When a project variable does not exist, it will be created. - - When a project variable does exist, its value will be updated when the values are different. - - Variables which are untouched in the playbook, but are not untouched in the GitLab project, - they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)). + - When a project variable does not exist, it is created. + - When a project variable does exist and is not hidden, its value is updated when the values are different. + When a project variable does exist and is hidden, its value is updated. In this case, the module is B(not idempotent). + - Variables which are untouched in the playbook, but are not untouched in the GitLab project, they stay untouched (O(purge=false)) + or are deleted (O(purge=true)). author: - "Markus Bergholz (@markuman)" requirements: - - python >= 2.7 - python-gitlab python module extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: @@ -38,30 +44,32 @@ options: type: str purge: description: - - When set to true, all variables which are not untouched in the task will be deleted. + - When set to V(true), all variables which are not untouched in the task are deleted. default: false type: bool vars: description: - - When the list element is a simple key-value pair, masked and protected will be set to false. - - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can - have full control about whether a value should be masked, protected or both. + - When the list element is a simple key-value pair, C(masked), C(hidden), C(raw), and C(protected) are set to V(false). + - When the list element is a dict with the keys C(value), C(masked), C(hidden), C(raw), and C(protected), the user can have full + control about whether a value should be masked, hidden, raw, protected, or a combination. - Support for protected values requires GitLab >= 9.3. - Support for masked values requires GitLab >= 11.10. + - Support for hidden values requires GitLab >= 17.4, and was added in community.general 11.3.0. + - Support for raw values requires GitLab >= 15.7. - Support for environment_scope requires GitLab Premium >= 13.11. - Support for variable_type requires GitLab >= 11.11. - - A I(value) must be a string or a number. - - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file). - - Field I(environment_scope) must be a string defined by scope environment. - - When a value is masked, it must be in Base64 and have a length of at least 8 characters. - See GitLab documentation on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables). + - A C(value) must be a string or a number. + - Field C(variable_type) must be a string with either V(env_var), which is the default, or V(file). + - Field C(environment_scope) must be a string defined by scope environment. + - When a value is masked, it must be in Base64 and have a length of at least 8 characters. See GitLab documentation + on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables). default: {} type: dict variables: version_added: 4.4.0 description: - A list of dictionaries that represents CI/CD variables. - - This module works internal with this structure, even if the older I(vars) parameter is used. + - This module works internal with this structure, even if the older O(vars) parameter is used. default: [] type: list elements: dict @@ -74,37 +82,58 @@ options: value: description: - The variable value. - - Required when I(state=present). + - Required when O(state=present). type: str + description: + description: + - A description for the variable. + - Support for descriptions requires GitLab >= 16.2. + type: str + version_added: '11.4.0' masked: description: - - Wether variable value is masked or not. + - Whether variable value is masked or not. - Support for masked values requires GitLab >= 11.10. type: bool default: false + hidden: + description: + - Whether variable value is hidden or not. + - Implies C(masked). + - Support for hidden values requires GitLab >= 17.4. + type: bool + default: false + version_added: '11.3.0' protected: description: - - Wether variable value is protected or not. + - Whether variable value is protected or not. - Support for protected values requires GitLab >= 9.3. type: bool default: false + raw: + description: + - Whether variable value is raw or not. + - Support for raw values requires GitLab >= 15.7. + type: bool + default: false + version_added: '7.4.0' variable_type: description: - - Wether a variable is an environment variable (C(env_var)) or a file (C(file)). - - Support for I(variable_type) requires GitLab >= 11.11. + - Whether a variable is an environment variable (V(env_var)) or a file (V(file)). + - Support for O(variables[].variable_type) requires GitLab >= 11.11. type: str choices: ["env_var", "file"] default: env_var environment_scope: description: - The scope for the variable. - - Support for I(environment_scope) requires GitLab Premium >= 13.11. + - Support for O(variables[].environment_scope) requires GitLab Premium >= 13.11. type: str default: '*' -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Set or update some CI/CD variables community.general.gitlab_project_variable: api_url: https://gitlab.com @@ -116,8 +145,8 @@ EXAMPLES = ''' value: abc123 - name: SECRET_ACCESS_KEY value: dassgrfaeui8989 - masked: yes - protected: yes + masked: true + protected: true environment_scope: production - name: Set or update some CI/CD variables @@ -135,6 +164,38 @@ EXAMPLES = ''' variable_type: env_var environment_scope: '*' +- name: Set or update some CI/CD variables with raw value + community.general.gitlab_project_variable: + api_url: https://gitlab.com + api_token: secret_access_token + project: markuman/dotfiles + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: + value: 3214cbad + masked: true + protected: true + raw: true + variable_type: env_var + environment_scope: '*' + +- name: Set or update some CI/CD variables with expandable value + community.general.gitlab_project_variable: + api_url: https://gitlab.com + api_token: secret_access_token + project: markuman/dotfiles + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: + value: '$MY_OTHER_VARIABLE' + masked: true + protected: true + raw: false + variable_type: env_var + environment_scope: '*' + - name: Delete one variable community.general.gitlab_project_variable: api_url: https://gitlab.com @@ -143,9 +204,9 @@ EXAMPLES = ''' state: absent vars: ACCESS_KEY_ID: abc123 -''' +""" -RETURN = ''' +RETURN = r""" project_variable: description: Four lists of the variablenames which were added, updated, removed or exist. returned: always @@ -155,77 +216,32 @@ project_variable: description: A list of variables which were created. returned: always type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] untouched: description: A list of variables which exist. returned: always type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] removed: description: A list of variables which were deleted. returned: always type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] updated: description: A list of variables whose values were changed. returned: always type: list - sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']" -''' + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] +""" -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.six import string_types -from ansible.module_utils.six import integer_types - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, gitlab_authentication -def vars_to_variables(vars, module): - # transform old vars to new variables structure - variables = list() - for item, value in vars.items(): - if (isinstance(value, string_types) or - isinstance(value, (integer_types, float))): - variables.append( - { - "name": item, - "value": str(value), - "masked": False, - "protected": False, - "variable_type": "env_var", - } - ) - - elif isinstance(value, dict): - - new_item = { - "name": item, - "value": value.get('value'), - "masked": value.get('masked'), - "protected": value.get('protected'), - "variable_type": value.get('variable_type'), - } - - if value.get('environment_scope'): - new_item['environment_scope'] = value.get('environment_scope') - - variables.append(new_item) - - else: - module.fail_json(msg="value must be of type string, integer, float or dict") - - return variables +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, filter_returned_variables, vars_to_variables, + list_all_kwargs +) class GitlabProjectVariables(object): @@ -239,23 +255,21 @@ class GitlabProjectVariables(object): return self.repo.projects.get(project_name) def list_all_project_variables(self): - page_nb = 1 - variables = [] - vars_page = self.project.variables.list(page=page_nb) - while len(vars_page) > 0: - variables += vars_page - page_nb += 1 - vars_page = self.project.variables.list(page=page_nb) - return variables + return list(self.project.variables.list(**list_all_kwargs)) def create_variable(self, var_obj): if self._module.check_mode: return True var = { - "key": var_obj.get('key'), "value": var_obj.get('value'), - "masked": var_obj.get('masked'), "protected": var_obj.get('protected'), - "variable_type": var_obj.get('variable_type') + "key": var_obj.get('key'), + "value": var_obj.get('value'), + "description": var_obj.get('description'), + "masked": var_obj.get('masked'), + "masked_and_hidden": var_obj.get('hidden'), + "protected": var_obj.get('protected'), + "raw": var_obj.get('raw'), + "variable_type": var_obj.get('variable_type'), } if var_obj.get('environment_scope') is not None: @@ -311,25 +325,26 @@ def compare(requested_variables, existing_variables, state): def native_python_main(this_gitlab, purge, requested_variables, state, module): change = False - return_value = dict(added=list(), updated=list(), removed=list(), untouched=list()) + return_value = dict(added=[], updated=[], removed=[], untouched=[]) gitlab_keys = this_gitlab.list_all_project_variables() before = [x.attributes for x in gitlab_keys] gitlab_keys = this_gitlab.list_all_project_variables() - existing_variables = [x.attributes for x in gitlab_keys] - - # preprocessing:filter out and enrich before compare - for item in existing_variables: - item.pop('project_id') + existing_variables = filter_returned_variables(gitlab_keys) + # filter out and enrich before compare for item in requested_variables: item['key'] = item.pop('name') item['value'] = str(item.get('value')) if item.get('protected') is None: item['protected'] = False + if item.get('raw') is None: + item['raw'] = False if item.get('masked') is None: item['masked'] = False + if item.get('hidden') is None: + item['hidden'] = False if item.get('environment_scope') is None: item['environment_scope'] = '*' if item.get('variable_type') is None: @@ -352,9 +367,7 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): if purge: # refetch and filter gitlab_keys = this_gitlab.list_all_project_variables() - existing_variables = [x.attributes for x in gitlab_keys] - for item in existing_variables: - item.pop('project_id') + existing_variables = filter_returned_variables(gitlab_keys) remove = [x for x in existing_variables if x not in requested_variables] for item in remove: @@ -362,14 +375,13 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): return_value['removed'].append(item) elif state == 'absent': - # value does not matter on removing variables. - # key and environment scope are sufficient - for item in existing_variables: - item.pop('value') - item.pop('variable_type') - for item in requested_variables: - item.pop('value') - item.pop('variable_type') + # value, type, and description do not matter on removing variables. + keys_ignored_on_deletion = ['value', 'variable_type', 'description'] + for key in keys_ignored_on_deletion: + for item in existing_variables: + item.pop(key) + for item in requested_variables: + item.pop(key) if not purge: remove_requested = [x for x in requested_variables if x in existing_variables] @@ -385,7 +397,7 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): if module.check_mode: return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched) - if return_value['added'] or return_value['removed'] or return_value['updated']: + if any(return_value[x] for x in ['added', 'removed', 'updated']): change = True gitlab_keys = this_gitlab.list_all_project_variables() @@ -399,15 +411,20 @@ def main(): argument_spec.update(auth_argument_spec()) argument_spec.update( project=dict(type='str', required=True), - purge=dict(type='bool', required=False, default=False), - vars=dict(type='dict', required=False, default=dict(), no_log=True), - variables=dict(type='list', elements='dict', required=False, default=list(), options=dict( + purge=dict(type='bool', default=False), + vars=dict(type='dict', default=dict(), no_log=True), + # please mind whenever changing the variables dict to also change module_utils/gitlab.py's + # KNOWN dict in filter_returned_variables or bad evil will happen + variables=dict(type='list', elements='dict', default=list(), options=dict( name=dict(type='str', required=True), value=dict(type='str', no_log=True), + description=dict(type='str'), masked=dict(type='bool', default=False), + hidden=dict(type='bool', default=False), protected=dict(type='bool', default=False), + raw=dict(type='bool', default=False), environment_scope=dict(type='str', default='*'), - variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]) + variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]), )), state=dict(type='str', default="present", choices=["absent", "present"]), ) @@ -431,8 +448,8 @@ def main(): supports_check_mode=True ) - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) purge = module.params['purge'] var_list = module.params['vars'] @@ -445,9 +462,7 @@ def main(): if state == 'present': if any(x['value'] is None for x in variables): - module.fail_json(msg='value parameter is required in state present') - - gitlab_instance = gitlab_authentication(module) + module.fail_json(msg='value parameter is required for all variables in state present') this_gitlab = GitlabProjectVariables(module=module, gitlab_instance=gitlab_instance) diff --git a/plugins/modules/source_control/gitlab/gitlab_protected_branch.py b/plugins/modules/gitlab_protected_branch.py similarity index 51% rename from plugins/modules/source_control/gitlab/gitlab_protected_branch.py rename to plugins/modules/gitlab_protected_branch.py index fe8e98a3f3..c779736cc6 100644 --- a/plugins/modules/source_control/gitlab/gitlab_protected_branch.py +++ b/plugins/modules/gitlab_protected_branch.py @@ -1,30 +1,35 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_protected_branch -short_description: (un)Marking existing branches for protection +short_description: Manage protection of existing branches version_added: 3.4.0 description: - (un)Marking existing branches for protection. author: - "Werner Dijkerman (@dj-wasabi)" requirements: - - python >= 2.7 - python-gitlab >= 2.3.0 extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: description: - - Create or delete proteced branch. + - Create or delete protected branch. default: present type: str choices: ["present", "absent"] @@ -36,7 +41,7 @@ options: name: description: - The name of the branch that needs to be protected. - - Can make use a wildcard charachter for like C(production/*) or just have C(main) or C(develop) as value. + - Can make use a wildcard character for like V(production/*) or just have V(main) or V(develop) as value. required: true type: str merge_access_levels: @@ -51,10 +56,20 @@ options: default: maintainer type: str choices: ["maintainer", "developer", "nobody"] -''' + allow_force_push: + description: + - Whether or not to allow force pushes to the protected branch. + type: bool + version_added: '11.3.0' + code_owner_approval_required: + description: + - Whether or not to require code owner approval to push. + type: bool + version_added: '11.3.0' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create protected branch on main community.general.gitlab_protected_branch: api_url: https://gitlab.com @@ -63,28 +78,19 @@ EXAMPLES = ''' name: main merge_access_levels: maintainer push_access_level: nobody +""" -''' +RETURN = r""" +""" -RETURN = ''' -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, gitlab_authentication +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab +) class GitlabProtectedBranch(object): @@ -94,9 +100,9 @@ class GitlabProtectedBranch(object): self._module = module self.project = self.get_project(project) self.ACCESS_LEVEL = { - 'nobody': gitlab.NO_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS + 'nobody': gitlab.const.NO_ACCESS, + 'developer': gitlab.const.DEVELOPER_ACCESS, + 'maintainer': gitlab.const.MAINTAINER_ACCESS } def get_project(self, project_name): @@ -108,27 +114,43 @@ class GitlabProtectedBranch(object): except Exception as e: return False - def create_protected_branch(self, name, merge_access_levels, push_access_level): - if self._module.check_mode: - return True - merge = self.ACCESS_LEVEL[merge_access_levels] - push = self.ACCESS_LEVEL[push_access_level] - self.project.protectedbranches.create({ + def create_or_update_protected_branch(self, name, options): + protected_branch_options = { 'name': name, - 'merge_access_level': merge, - 'push_access_level': push - }) + 'allow_force_push': options['allow_force_push'], + 'code_owner_approval_required': options['code_owner_approval_required'], + } + protected_branch = self.protected_branch_exist(name=name) + changed = False + if protected_branch and self.can_update(protected_branch, options): + for arg_key, arg_value in protected_branch_options.items(): + if arg_value is not None: + if getattr(protected_branch, arg_key) != arg_value: + setattr(protected_branch, arg_key, arg_value) + changed = True + if changed and not self._module.check_mode: + protected_branch.save() + else: + # Set immutable options only on (re)creation + protected_branch_options['merge_access_level'] = options['merge_access_levels'] + protected_branch_options['push_access_level'] = options['push_access_level'] + if protected_branch: + # Exists, but couldn't update. So, delete first + self.delete_protected_branch(name) + if not self._module.check_mode: + self.project.protectedbranches.create(protected_branch_options) + changed = True - def compare_protected_branch(self, name, merge_access_levels, push_access_level): - configured_merge = self.ACCESS_LEVEL[merge_access_levels] - configured_push = self.ACCESS_LEVEL[push_access_level] - current = self.protected_branch_exist(name=name) - current_merge = current.merge_access_levels[0]['access_level'] - current_push = current.push_access_levels[0]['access_level'] - if current: - if current.name == name and current_merge == configured_merge and current_push == configured_push: - return True - return False + return changed + + def can_update(self, protected_branch, options): + # these keys are not set on update the same way they are on creation + configured_merge = options['merge_access_levels'] + configured_push = options['push_access_level'] + current_merge = protected_branch.merge_access_levels[0]['access_level'] + current_push = protected_branch.push_access_levels[0]['access_level'] + return ((configured_merge is None or current_merge == configured_merge) and + (configured_push is None or current_push == configured_push)) def delete_protected_branch(self, name): if self._module.check_mode: @@ -144,6 +166,8 @@ def main(): name=dict(type='str', required=True), merge_access_levels=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), push_access_level=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), + allow_force_push=dict(type='bool'), + code_owner_approval_required=dict(type='bool'), state=dict(type='str', default="present", choices=["absent", "present"]), ) @@ -165,35 +189,35 @@ def main(): supports_check_mode=True ) + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + project = module.params['project'] name = module.params['name'] merge_access_levels = module.params['merge_access_levels'] push_access_level = module.params['push_access_level'] state = module.params['state'] - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - gitlab_version = gitlab.__version__ if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): - module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." + module.fail_json(msg="community.general.gitlab_protected_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) - gitlab_instance = gitlab_authentication(module) this_gitlab = GitlabProtectedBranch(module=module, project=project, gitlab_instance=gitlab_instance) p_branch = this_gitlab.protected_branch_exist(name=name) - if not p_branch and state == "present": - this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level) - module.exit_json(changed=True, msg="Created the proteched branch.") - elif p_branch and state == "present": - if not this_gitlab.compare_protected_branch(name, merge_access_levels, push_access_level): - this_gitlab.delete_protected_branch(name=name) - this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level) - module.exit_json(changed=True, msg="Recreated the proteched branch.") + options = { + "merge_access_levels": this_gitlab.ACCESS_LEVEL[merge_access_levels], + "push_access_level": this_gitlab.ACCESS_LEVEL[push_access_level], + "allow_force_push": module.params["allow_force_push"], + "code_owner_approval_required": module.params["code_owner_approval_required"], + } + if state == "present": + changed = this_gitlab.create_or_update_protected_branch(name, options) + module.exit_json(changed=changed, msg="Created or updated the protected branch.") elif p_branch and state == "absent": this_gitlab.delete_protected_branch(name=name) - module.exit_json(changed=True, msg="Deleted the proteched branch.") + module.exit_json(changed=True, msg="Deleted the protected branch.") module.exit_json(changed=False, msg="No changes are needed.") diff --git a/plugins/modules/gitlab_runner.py b/plugins/modules/gitlab_runner.py new file mode 100644 index 0000000000..889e2471cc --- /dev/null +++ b/plugins/modules/gitlab_runner.py @@ -0,0 +1,530 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Raphaël Droz (raphael.droz@gmail.com) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Samy Coenen +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_runner +short_description: Create, modify and delete GitLab Runners +description: + - Register, update and delete runners on GitLab Server side with the GitLab API. + - All operations are performed using the GitLab API v4. + - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html) and + U(https://docs.gitlab.com/ee/api/users.html#create-a-runner-linked-to-a-user). + - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web + interface at U(https://$GITLAB_URL/profile/personal_access_tokens). + - A valid registration token is required for registering a new runner. To create shared runners, you need to ask your administrator + to give you this token. It can be found at U(https://$GITLAB_URL/admin/runners/). + - This module does not handle the C(gitlab-runner) process part, but only manages the runner on GitLab Server side through + its API. Once the module has created the runner, you may use the generated token to run C(gitlab-runner register) command. +notes: + - To create a new runner at least the O(api_token), O(description) and O(api_url) options are required. + - Runners need to have unique descriptions, since this attribute is used as key for idempotency. +author: + - Samy Coenen (@SamyCoenen) + - Guillaume Martinez (@Lunik) +requirements: + - python-gitlab >= 1.5.0 for legacy runner registration workflow (runner registration token - + U(https://docs.gitlab.com/runner/register/#register-with-a-runner-registration-token-deprecated)) + - python-gitlab >= 4.0.0 for new runner registration workflow (runner authentication token - + U(https://docs.gitlab.com/runner/register/#register-with-a-runner-authentication-token)) +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + group: + description: + - ID or full path of the group in the form group/subgroup. + - Mutually exclusive with O(owned) and O(project). + - Must be group's numeric ID if O(registration_token) is not set and O(state=present). + type: str + version_added: '6.5.0' + project: + description: + - ID or full path of the project in the form of group/name. + - Mutually exclusive with O(owned) since community.general 4.5.0. + - Mutually exclusive with O(group). + - Must be project's numeric ID if O(registration_token) is not set and O(state=present). + type: str + version_added: '3.7.0' + description: + description: + - The unique name of the runner. + required: true + type: str + aliases: + - name + state: + description: + - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same + name. + required: false + default: present + choices: ["present", "absent"] + type: str + registration_token: + description: + - The registration token is used to register new runners before GitLab 16.0. + - Required if O(state=present) for GitLab < 16.0. + - If set, the runner is created using the old runner creation workflow. + - If not set, the runner is created using the new runner creation workflow, introduced in GitLab 16.0. + - If not set, requires python-gitlab >= 4.0.0. + type: str + owned: + description: + - Searches only runners available to the user when searching for existing, when false admin token required. + - Mutually exclusive with O(project) since community.general 4.5.0. + - Mutually exclusive with O(group). + default: false + type: bool + version_added: 2.0.0 + active: + description: + - Define if the runners is immediately active after creation. + - Mutually exclusive with O(paused). + required: false + default: true + type: bool + paused: + description: + - Define if the runners is active or paused after creation. + - Mutually exclusive with O(active). + required: false + default: false + type: bool + version_added: 8.1.0 + locked: + description: + - Determines if the runner is locked or not. + required: false + default: false + type: bool + access_level: + description: + - Determines if a runner can pick up jobs only from protected branches. + - If O(access_level_on_creation) is not explicitly set to V(true), this option is ignored on registration and is only + applied on updates. + - If set to V(not_protected), runner can pick up jobs from both protected and unprotected branches. + - If set to V(ref_protected), runner can pick up jobs only from protected branches. + - Before community.general 8.0.0 the default was V(ref_protected). This was changed to no default in community.general + 8.0.0. If this option is not specified explicitly, GitLab uses V(not_protected) on creation, and the value set is + not changed on any updates. + required: false + choices: ["not_protected", "ref_protected"] + type: str + access_level_on_creation: + description: + - Whether the runner should be registered with an access level or not. + - If set to V(true), the value of O(access_level) is used for runner registration. + - If set to V(false), GitLab registers the runner with the default access level. + - The default of this option changed to V(true) in community.general 7.0.0. Before, it was V(false). + required: false + default: true + type: bool + version_added: 6.3.0 + maximum_timeout: + description: + - The maximum time that a runner has to complete a specific job. + required: false + default: 3600 + type: int + run_untagged: + description: + - Run untagged jobs or not. + required: false + default: true + type: bool + tag_list: + description: The tags that apply to the runner. + required: false + default: [] + type: list + elements: str +""" + +EXAMPLES = r""" +- name: Create an instance-level runner + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: present + active: true + tag_list: ['docker'] + run_untagged: false + locked: false + register: runner # Register module output to run C(gitlab-runner register) command in another task + +- name: Create a group-level runner + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: present + active: true + tag_list: ['docker'] + run_untagged: false + locked: false + group: top-level-group/subgroup + register: runner # Register module output to run C(gitlab-runner register) command in another task + +- name: Create a project-level runner + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: present + active: true + tag_list: ['docker'] + run_untagged: false + locked: false + project: top-level-group/subgroup/project + register: runner # Register module output to run C(gitlab-runner register) command in another task + +- name: "Register instance-level runner with registration token (deprecated)" + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + registration_token: 4gfdsg345 + description: Docker Machine t1 + state: present + active: true + tag_list: ['docker'] + run_untagged: false + locked: false + register: runner # Register module output to run C(gitlab-runner register) command in another task + +- name: "Delete runner" + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: absent + +- name: Delete an owned runner as a non-admin + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + owned: true + state: absent + +- name: "Register a project-level runner with registration token (deprecated)" + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + registration_token: 4gfdsg345 + description: MyProject runner + state: present + project: mygroup/mysubgroup/myproject + register: runner # Register module output to run C(gitlab-runner register) command in another task +""" + +RETURN = r""" +msg: + description: Success or failure message. + returned: always + type: str + sample: "Success" + +result: + description: JSON-parsed response from the server. + returned: always + type: dict + +error: + description: The error message returned by the GitLab API. + returned: failed + type: str + sample: "400: path is already in use" + +runner: + description: API object. + returned: always + type: dict +""" + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab, list_all_kwargs +) + + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class GitLabRunner(object): + def __init__(self, module, gitlab_instance, group=None, project=None): + self._module = module + self._gitlab = gitlab_instance + self.runner_object = None + + # Whether to operate on GitLab-instance-wide or project-wide runners + # See https://gitlab.com/gitlab-org/gitlab-ce/issues/60774 + # for group runner token access + if project: + self._runners_endpoint = project.runners.list + elif group: + self._runners_endpoint = group.runners.list + elif module.params['owned']: + self._runners_endpoint = gitlab_instance.runners.list + else: + self._runners_endpoint = gitlab_instance.runners.all + + def create_or_update_runner(self, description, options): + changed = False + + arguments = { + 'locked': options['locked'], + 'run_untagged': options['run_untagged'], + 'maximum_timeout': options['maximum_timeout'], + 'tag_list': options['tag_list'], + } + + if options.get('paused') is not None: + arguments['paused'] = options['paused'] + else: + arguments['active'] = options['active'] + + if options.get('access_level') is not None: + arguments['access_level'] = options['access_level'] + # Because we have already call userExists in main() + if self.runner_object is None: + arguments['description'] = description + if options.get('registration_token') is not None: + arguments['token'] = options['registration_token'] + elif options.get('group') is not None: + arguments['runner_type'] = 'group_type' + arguments['group_id'] = options['group'] + elif options.get('project') is not None: + arguments['runner_type'] = 'project_type' + arguments['project_id'] = options['project'] + else: + arguments['runner_type'] = 'instance_type' + + access_level_on_creation = self._module.params['access_level_on_creation'] + if not access_level_on_creation: + arguments.pop('access_level', None) + + runner = self.create_runner(arguments) + changed = True + else: + changed, runner = self.update_runner(self.runner_object, arguments) + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully updated the runner %s" % description) + + try: + runner.save() + except Exception as e: + self._module.fail_json(msg="Failed to update runner: %s " % to_native(e)) + + self.runner_object = runner + return changed + + ''' + @param arguments Attributes of the runner + ''' + def create_runner(self, arguments): + if self._module.check_mode: + class MockRunner: + def __init__(self): + self._attrs = {} + return MockRunner() + + try: + if arguments.get('token') is not None: + runner = self._gitlab.runners.create(arguments) + elif LooseVersion(gitlab.__version__) < LooseVersion('4.0.0'): + self._module.fail_json(msg="New runner creation workflow requires python-gitlab 4.0.0 or higher") + else: + runner = self._gitlab.user.runners.create(arguments) + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create runner: %s " % to_native(e)) + + return runner + + ''' + @param runner Runner object + @param arguments Attributes of the runner + ''' + def update_runner(self, runner, arguments): + changed = False + + for arg_key, arg_value in arguments.items(): + if arg_value is not None: + if isinstance(arg_value, list): + list1 = getattr(runner, arg_key) + list1.sort() + list2 = arg_value + list2.sort() + if list1 != list2: + setattr(runner, arg_key, arg_value) + changed = True + else: + if getattr(runner, arg_key) != arg_value: + setattr(runner, arg_key, arg_value) + changed = True + + return (changed, runner) + + ''' + @param description Description of the runner + ''' + def find_runner(self, description): + runners = self._runners_endpoint(**list_all_kwargs) + + for runner in runners: + # python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner + # object, so we need to handle both + if hasattr(runner, "description"): + if runner.description == description: + return self._gitlab.runners.get(runner.id) + else: + if runner['description'] == description: + return self._gitlab.runners.get(runner['id']) + + ''' + @param description Description of the runner + ''' + def exists_runner(self, description): + # When runner exists, object will be stored in self.runner_object. + runner = self.find_runner(description) + + if runner: + self.runner_object = runner + return True + return False + + def delete_runner(self): + if self._module.check_mode: + return True + + runner = self.runner_object + + return runner.delete() + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + description=dict(type='str', required=True, aliases=["name"]), + active=dict(type='bool', default=True), + paused=dict(type='bool', default=False), + owned=dict(type='bool', default=False), + tag_list=dict(type='list', elements='str', default=[]), + run_untagged=dict(type='bool', default=True), + locked=dict(type='bool', default=False), + access_level=dict(type='str', choices=["not_protected", "ref_protected"]), + access_level_on_creation=dict(type='bool', default=True), + maximum_timeout=dict(type='int', default=3600), + registration_token=dict(type='str', no_log=True), + project=dict(type='str'), + group=dict(type='str'), + state=dict(type='str', default="present", choices=["absent", "present"]), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['project', 'owned'], + ['group', 'owned'], + ['project', 'group'], + ['active', 'paused'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ], + supports_check_mode=True, + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + state = module.params['state'] + runner_description = module.params['description'] + runner_active = module.params['active'] + runner_paused = module.params['paused'] + tag_list = module.params['tag_list'] + run_untagged = module.params['run_untagged'] + runner_locked = module.params['locked'] + access_level = module.params['access_level'] + maximum_timeout = module.params['maximum_timeout'] + registration_token = module.params['registration_token'] + project = module.params['project'] + group = module.params['group'] + + gitlab_project = None + gitlab_group = None + + if project: + try: + gitlab_project = gitlab_instance.projects.get(project) + except gitlab.exceptions.GitlabGetError as e: + module.fail_json(msg='No such a project %s' % project, exception=to_native(e)) + elif group: + try: + gitlab_group = gitlab_instance.groups.get(group) + except gitlab.exceptions.GitlabGetError as e: + module.fail_json(msg='No such a group %s' % group, exception=to_native(e)) + + gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_group, gitlab_project) + runner_exists = gitlab_runner.exists_runner(runner_description) + + if state == 'absent': + if runner_exists: + gitlab_runner.delete_runner() + module.exit_json(changed=True, msg="Successfully deleted runner %s" % runner_description) + else: + module.exit_json(changed=False, msg="Runner deleted or does not exists") + + if state == 'present': + runner_values = { + "active": runner_active, + "tag_list": tag_list, + "run_untagged": run_untagged, + "locked": runner_locked, + "access_level": access_level, + "maximum_timeout": maximum_timeout, + "registration_token": registration_token, + "group": group, + "project": project, + } + if LooseVersion(gitlab_runner._gitlab.version()[0]) >= LooseVersion("14.8.0"): + # the paused attribute for runners is available since 14.8 + runner_values["paused"] = runner_paused + if gitlab_runner.create_or_update_runner(runner_description, runner_values): + module.exit_json(changed=True, runner=gitlab_runner.runner_object._attrs, + msg="Successfully created or updated the runner %s" % runner_description) + else: + module.exit_json(changed=False, runner=gitlab_runner.runner_object._attrs, + msg="No need to update the runner %s" % runner_description) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/source_control/gitlab/gitlab_user.py b/plugins/modules/gitlab_user.py similarity index 87% rename from plugins/modules/source_control/gitlab/gitlab_user.py rename to plugins/modules/gitlab_user.py index 803c54bc83..58bfc126ac 100644 --- a/plugins/modules/source_control/gitlab/gitlab_user.py +++ b/plugins/modules/gitlab_user.py @@ -1,23 +1,21 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Lennert Mertens (lennert@nubera.be) -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Lennert Mertens (lennert@nubera.be) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gitlab_user short_description: Creates/updates/deletes/blocks/unblocks GitLab Users description: - - When the user does not exist in GitLab, it will be created. - - When the user exists and state=absent, the user will be deleted. - - When the user exists and state=blocked, the user will be blocked. - - When changes are made to user, the user will be updated. + - When the user does not exist in GitLab, it is created. + - When the user exists and state=absent, the user is deleted. + - When the user exists and state=blocked, the user is blocked. + - When changes are made to user, the user is updated. notes: - From community.general 0.2.0 and onwards, name, email and password are optional while deleting the user. author: @@ -26,18 +24,24 @@ author: - Lennert Mertens (@LennertMertens) - Stef Graces (@stgrace) requirements: - - python >= 2.7 - python-gitlab python module - administrator rights on the GitLab server extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - Name of the user you want to create. - - Required only if C(state) is set to C(present). + - Required only if O(state=present). type: str username: description: @@ -58,7 +62,7 @@ options: email: description: - The email that belongs to the user. - - Required only if C(state) is set to C(present). + - Required only if O(state=present). type: str sshkey_name: description: @@ -76,18 +80,13 @@ options: version_added: 3.1.0 group: description: - - Id or Full path of parent group in the form of group/name. + - ID or Full path of parent group in the form of group/name. - Add user as a member to this group. type: str access_level: description: - - The access level to the group. One of the following can be used. - - guest - - reporter - - developer - - master (alias for maintainer) - - maintainer - - owner + - The access level to the group. + - The value V(master) is an alias for V(maintainer). default: guest type: str choices: ["guest", "reporter", "developer", "master", "maintainer", "owner"] @@ -101,27 +100,27 @@ options: description: - Require confirmation. type: bool - default: yes + default: true isadmin: description: - Grant admin privileges to the user. type: bool - default: no + default: false external: description: - Define external parameter for this user. type: bool - default: no + default: false identities: description: - List of identities to be added/updated for this user. - - To remove all other identities from this user, set I(overwrite_identities=true). + - To remove all other identities from this user, set O(overwrite_identities=true). type: list elements: dict suboptions: provider: description: - - The name of the external identity provider + - The name of the external identity provider. type: str extern_uid: description: @@ -131,26 +130,25 @@ options: overwrite_identities: description: - Overwrite identities with identities added in this module. - - This means that all identities that the user has and that are not listed in I(identities) are removed from the user. - - This is only done if a list is provided for I(identities). To remove all identities, provide an empty list. + - This means that all identities that the user has and that are not listed in O(identities) are removed from the user. + - This is only done if a list is provided for O(identities). To remove all identities, provide an empty list. type: bool default: false version_added: 3.3.0 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Delete GitLab User" community.general.gitlab_user: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" - validate_certs: False username: myusername state: absent - name: "Create GitLab User" community.general.gitlab_user: api_url: https://gitlab.example.com/ - validate_certs: True + validate_certs: true api_username: dj-wasabi api_password: "MySecretPassword" name: My Name @@ -166,15 +164,15 @@ EXAMPLES = ''' - name: "Create GitLab User using external identity provider" community.general.gitlab_user: api_url: https://gitlab.example.com/ - validate_certs: True + validate_certs: true api_token: "{{ access_token }}" name: My Name username: myusername password: mysecretpassword email: me@example.com identities: - - provider: Keycloak - extern_uid: f278f95c-12c7-4d51-996f-758cc2eb11bc + - provider: Keycloak + extern_uid: f278f95c-12c7-4d51-996f-758cc2eb11bc state: present group: super_group/mon_group access_level: owner @@ -183,7 +181,6 @@ EXAMPLES = ''' community.general.gitlab_user: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" - validate_certs: False username: myusername state: blocked @@ -191,50 +188,42 @@ EXAMPLES = ''' community.general.gitlab_user: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" - validate_certs: False username: myusername state: unblocked -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: Success or failure message + description: Success or failure message. returned: always type: str sample: "Success" result: - description: json parsed response from the server + description: JSON-parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: path is already in use" user: - description: API object + description: API object. returned: always type: dict -''' +""" -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, find_group, gitlab_authentication +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_group, gitlab_authentication, gitlab, list_all_kwargs +) class GitLabUser(object): @@ -243,12 +232,12 @@ class GitLabUser(object): self._gitlab = gitlab_instance self.user_object = None self.ACCESS_LEVEL = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'master': gitlab.MAINTAINER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS, - 'owner': gitlab.OWNER_ACCESS, + 'guest': gitlab.const.GUEST_ACCESS, + 'reporter': gitlab.const.REPORTER_ACCESS, + 'developer': gitlab.const.DEVELOPER_ACCESS, + 'master': gitlab.const.MAINTAINER_ACCESS, + 'maintainer': gitlab.const.MAINTAINER_ACCESS, + 'owner': gitlab.const.OWNER_ACCESS, } ''' @@ -305,7 +294,7 @@ class GitLabUser(object): # note: as we unfortunately have some uncheckable parameters # where it is not possible to determine if the update # changed something or not, we must assume here that a - # changed happend and that an user object update is needed + # changed happened and that an user object update is needed potentionally_changed = True # Assign ssh keys @@ -348,9 +337,10 @@ class GitLabUser(object): @param sshkey_name Name of the ssh key ''' def ssh_key_exists(self, user, sshkey_name): - keyList = map(lambda k: k.title, user.keys.list(all=True)) - - return sshkey_name in keyList + return any( + k.title == sshkey_name + for k in user.keys.list(**list_all_kwargs) + ) ''' @param user User object @@ -484,7 +474,7 @@ class GitLabUser(object): ''' @param user User object - @param identites List of identities to be added/updated + @param identities List of identities to be added/updated @param overwrite_identities Overwrite user identities with identities passed to this module ''' def add_identities(self, user, identities, overwrite_identities=False): @@ -503,7 +493,7 @@ class GitLabUser(object): ''' @param user User object - @param identites List of identities to be added/updated + @param identities List of identities to be added/updated ''' def delete_identities(self, user, identities): changed = False @@ -518,10 +508,13 @@ class GitLabUser(object): @param username Username of the user ''' def find_user(self, username): - users = self._gitlab.users.list(search=username, all=True) - for user in users: - if (user.username == username): - return user + return next( + ( + user for user in self._gitlab.users.list(search=username, **list_all_kwargs) + if user.username == username + ), + None + ) ''' @param username Username of the user @@ -616,6 +609,9 @@ def main(): ) ) + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + user_name = module.params['name'] state = module.params['state'] user_username = module.params['username'].lower() @@ -633,11 +629,6 @@ def main(): user_identities = module.params['identities'] overwrite_identities = module.params['overwrite_identities'] - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlab_authentication(module) - gitlab_user = GitLabUser(module, gitlab_instance) user_exists = gitlab_user.exists_user(user_username) if user_exists: diff --git a/plugins/modules/notification/grove.py b/plugins/modules/grove.py similarity index 72% rename from plugins/modules/notification/grove.py rename to plugins/modules/grove.py index 12c910902e..fc71322688 100644 --- a/plugins/modules/notification/grove.py +++ b/plugins/modules/grove.py @@ -1,20 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: grove short_description: Sends a notification to a grove.io channel description: - - The C(grove) module sends a message for a service to a Grove.io - channel. + - The C(grove) module sends a message for a service to a Grove.io channel. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: channel_token: type: str @@ -24,44 +28,45 @@ options: service: type: str description: - - Name of the service (displayed as the "user" in the message) + - Name of the service (displayed as the "user" in the message). required: false default: ansible message_content: type: str description: - Message content. - - The alias I(message) is deprecated and will be removed in community.general 4.0.0. + - The alias O(ignore:message) has been removed in community.general 4.0.0. required: true url: type: str description: - - Service URL for the web client + - Service URL for the web client. required: false icon_url: type: str description: - - Icon for the service + - Icon for the service. required: false validate_certs: description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - default: 'yes' + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + default: true type: bool author: "Jonas Pfenniger (@zimbatm)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Sends a notification to a grove.io channel community.general.grove: channel_token: 6Ph62VBBJOccmtTPZbubiPzdrhipZXtg service: my-app message: 'deployed {{ target }}' -''' +""" + +from urllib.parse import urlencode from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url @@ -95,8 +100,8 @@ def main(): channel_token=dict(type='str', required=True, no_log=True), message_content=dict(type='str', required=True), service=dict(type='str', default='ansible'), - url=dict(type='str', default=None), - icon_url=dict(type='str', default=None), + url=dict(type='str'), + icon_url=dict(type='str'), validate_certs=dict(default=True, type='bool'), ) ) diff --git a/plugins/modules/web_infrastructure/gunicorn.py b/plugins/modules/gunicorn.py similarity index 75% rename from plugins/modules/web_infrastructure/gunicorn.py rename to plugins/modules/gunicorn.py index 4c9e5da45b..b7033d3471 100644 --- a/plugins/modules/web_infrastructure/gunicorn.py +++ b/plugins/modules/gunicorn.py @@ -1,26 +1,29 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Alejandro Gomez -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Alejandro Gomez +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gunicorn -short_description: Run gunicorn with various settings. +short_description: Run gunicorn with various settings description: - - Starts gunicorn with the parameters specified. Common settings for gunicorn - configuration are supported. For additional configuration use a config file - See U(https://gunicorn-docs.readthedocs.io/en/latest/settings.html) for more - options. It's recommended to always use the chdir option to avoid problems - with the location of the app. + - Starts gunicorn with the parameters specified. Common settings for gunicorn configuration are supported. For additional + configuration use a config file See U(https://gunicorn-docs.readthedocs.io/en/latest/settings.html) for more options. + It's recommended to always use the chdir option to avoid problems with the location of the app. requirements: [gunicorn] author: - - "Alejandro Gomez (@agmezr)" + - "Alejandro Gomez (@agmezr)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: app: type: str @@ -32,37 +35,36 @@ options: type: path aliases: ['virtualenv'] description: - - 'Path to the virtualenv directory.' + - Path to the virtualenv directory. config: type: path description: - - 'Path to the gunicorn configuration file.' + - Path to the gunicorn configuration file. aliases: ['conf'] chdir: type: path description: - - 'Chdir to specified directory before apps loading.' + - Chdir to specified directory before apps loading. pid: type: path description: - - 'A filename to use for the PID file. If not set and not found on the configuration file a tmp - pid file will be created to check a successful run of gunicorn.' + - A filename to use for the PID file. If not set and not found on the configuration file a tmp pid file is created to + check a successful run of gunicorn. worker: type: str choices: ['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp'] description: - - 'The type of workers to use. The default class (sync) should handle most "normal" types of workloads.' + - The type of workers to use. The default class (sync) should handle most "normal" types of workloads. user: type: str description: - - 'Switch worker processes to run as this user.' + - Switch worker processes to run as this user. notes: - - If not specified on config file, a temporary error log will be created on /tmp dir. - Please make sure you have write access in /tmp dir. Not needed but will help you to - identify any problem with configuration. -''' + - If not specified on config file, a temporary error log is created on C(/tmp) directory. Please make sure you have write + access in C(/tmp) directory. Not needed but it is helpful to identify any problem with configuration. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Simple gunicorn run example community.general.gunicorn: app: 'wsgi' @@ -88,15 +90,15 @@ EXAMPLES = ''' venv: '/workspace/example/venv' pid: '/workspace/example/gunicorn.pid' user: 'ansible' -''' +""" -RETURN = ''' +RETURN = r""" gunicorn: - description: process id of gunicorn - returned: changed - type: str - sample: "1234" -''' + description: Process ID of gunicorn. + returned: changed + type: str + sample: "1234" +""" import os import time diff --git a/plugins/modules/net_tools/haproxy.py b/plugins/modules/haproxy.py similarity index 83% rename from plugins/modules/net_tools/haproxy.py rename to plugins/modules/haproxy.py index f736036671..5fd927ba4e 100644 --- a/plugins/modules/net_tools/haproxy.py +++ b/plugins/modules/haproxy.py @@ -1,38 +1,42 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2014, Ravi Bhure -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Ravi Bhure +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: haproxy short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands author: -- Ravi Bhure (@ravibhure) + - Ravi Bhure (@ravibhure) description: - - Enable, disable, drain and set weights for HAProxy backend servers using socket commands. + - Enable, disable, drain and set weights for HAProxy backend servers using socket commands. notes: - - Enable, disable and drain commands are restricted and can only be issued on - sockets configured for level 'admin'. For example, you can add the line - 'stats socket /var/run/haproxy.sock level admin' to the general section of - haproxy.cfg. See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt). - - Depends on netcat (nc) being available; you need to install the appropriate - package for your operating system before this module can be used. + - Enable, disable and drain commands are restricted and can only be issued on sockets configured for level C(admin). For + example, you can add the line C(stats socket /var/run/haproxy.sock level admin) to the general section of C(haproxy.cfg). + See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt). + - Depends on netcat (C(nc)) being available; you need to install the appropriate package for your operating system before + this module can be used. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: backend: description: - Name of the HAProxy backend pool. - - If this parameter is unset, it will be auto-detected. + - If this parameter is unset, it is auto-detected. type: str drain: description: - - Wait until the server has no active connections or until the timeout - determined by wait_interval and wait_retries is reached. - - Continue only after the status changes to 'MAINT'. + - Wait until the server has no active connections or until the timeout determined by O(wait_interval) and O(wait_retries) + is reached. + - Continue only after the status changes to C(MAINT). - This overrides the shutdown_sessions option. type: bool default: false @@ -43,12 +47,11 @@ options: required: true shutdown_sessions: description: - - When disabling a server, immediately terminate all the sessions attached - to the specified server. - - This can be used to terminate long-running sessions after a server is put - into maintenance mode. Overridden by the drain option. + - When disabling a server, immediately terminate all the sessions attached to the specified server. + - This can be used to terminate long-running sessions after a server is put into maintenance mode. Overridden by the + drain option. type: bool - default: no + default: false socket: description: - Path to the HAProxy socket file. @@ -57,35 +60,33 @@ options: state: description: - Desired state of the provided backend host. - - Note that C(drain) state was added in version 2.4. - - It is supported only by HAProxy version 1.5 or later, - - When used on versions < 1.5, it will be ignored. + - Note that V(drain) state is supported only by HAProxy version 1.5 or later. When used on versions < 1.5, it is ignored. type: str required: true - choices: [ disabled, drain, enabled ] + choices: [disabled, drain, enabled] agent: description: - - Disable/enable agent checks (depending on I(state) value). + - Disable/enable agent checks (depending on O(state) value). type: bool - default: no + default: false version_added: 1.0.0 health: description: - - Disable/enable health checks (depending on I(state) value). + - Disable/enable health checks (depending on O(state) value). type: bool - default: no + default: false version_added: "1.0.0" fail_on_not_found: description: - - Fail whenever trying to enable/disable a backend host that does not exist + - Fail whenever trying to enable/disable a backend host that does not exist. type: bool - default: no + default: false wait: description: - - Wait until the server reports a status of 'UP' when C(state=enabled), - status of 'MAINT' when C(state=disabled) or status of 'DRAIN' when C(state=drain) + - Wait until the server reports a status of C(UP) when O(state=enabled), status of C(MAINT) when O(state=disabled) or + status of C(DRAIN) when O(state=drain). type: bool - default: no + default: false wait_interval: description: - Number of seconds to wait between retries. @@ -99,14 +100,12 @@ options: weight: description: - The value passed in argument. - - If the value ends with the `%` sign, then the new weight will be - relative to the initially configured weight. - - Relative weights are only permitted between 0 and 100% and absolute - weights are permitted between 0 and 256. + - If the value ends with the V(%) sign, then the new weight is relative to the initially configured weight. + - Relative weights are only permitted between 0 and 100% and absolute weights are permitted between 0 and 256. type: str -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Disable server in 'www' backend pool community.general.haproxy: state: disabled @@ -117,8 +116,8 @@ EXAMPLES = r''' community.general.haproxy: state: disabled host: '{{ inventory_hostname }}' - health: yes - agent: yes + health: true + agent: true - name: Disable server without backend pool name (apply to all available backend pool) community.general.haproxy: @@ -138,7 +137,7 @@ EXAMPLES = r''' host: '{{ inventory_hostname }}' socket: /var/run/haproxy.sock backend: www - wait: yes + wait: true # Place server in drain mode, providing a socket file. Then check the server's # status every minute to see if it changes to maintenance mode, continuing if it @@ -148,8 +147,8 @@ EXAMPLES = r''' host: '{{ inventory_hostname }}' socket: /var/run/haproxy.sock backend: www - wait: yes - drain: yes + wait: true + drain: true wait_interval: 60 wait_retries: 60 @@ -159,13 +158,14 @@ EXAMPLES = r''' host: '{{ inventory_hostname }}' backend: www socket: /var/run/haproxy.sock - shutdown_sessions: yes + shutdown_sessions: true -- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found +- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is + not found community.general.haproxy: state: disabled host: '{{ inventory_hostname }}' - fail_on_not_found: yes + fail_on_not_found: true - name: Enable server in 'www' backend pool community.general.haproxy: @@ -178,14 +178,15 @@ EXAMPLES = r''' state: enabled host: '{{ inventory_hostname }}' backend: www - wait: yes + wait: true -- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health +- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the + health community.general.haproxy: state: enabled host: '{{ inventory_hostname }}' backend: www - wait: yes + wait: true wait_retries: 10 wait_interval: 5 @@ -203,7 +204,7 @@ EXAMPLES = r''' host: '{{ inventory_hostname }}' socket: /var/run/haproxy.sock backend: www -''' +""" import csv import socket @@ -296,7 +297,7 @@ class HAProxy(object): """ data = self.execute('show stat', 200, False).lstrip('# ') r = csv.DictReader(data.splitlines()) - return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r))) + return tuple(d['pxname'] for d in r if d['svname'] == 'BACKEND') def discover_version(self): """ @@ -335,7 +336,7 @@ class HAProxy(object): if state is not None: self.execute(Template(cmd).substitute(pxname=backend, svname=svname)) - if self.wait: + if self.wait and not (wait_for_status == "DRAIN" and state == "DOWN"): self.wait_until_status(backend, svname, wait_for_status) def get_state_for(self, pxname, svname): @@ -345,13 +346,11 @@ class HAProxy(object): """ data = self.execute('show stat', 200, False).lstrip('# ') r = csv.DictReader(data.splitlines()) - state = tuple( - map( - lambda d: {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']}, - filter(lambda d: (pxname is None or d['pxname'] - == pxname) and d['svname'] == svname, r) - ) - ) + + def unpack_state(d): + return {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']} + + state = tuple(unpack_state(d) for d in r if (pxname is None or d['pxname'] == pxname) and d['svname'] == svname) return state or None def wait_until_status(self, pxname, svname, status): diff --git a/plugins/modules/cloud/heroku/heroku_collaborator.py b/plugins/modules/heroku_collaborator.py similarity index 69% rename from plugins/modules/cloud/heroku/heroku_collaborator.py rename to plugins/modules/heroku_collaborator.py index bbc34fdb30..e8094760a6 100644 --- a/plugins/modules/cloud/heroku/heroku_collaborator.py +++ b/plugins/modules/heroku_collaborator.py @@ -1,58 +1,63 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: heroku_collaborator -short_description: "Add or delete app collaborators on Heroku" +short_description: Add or delete app collaborators on Heroku description: - Manages collaborators for Heroku apps. - - If set to C(present) and heroku user is already collaborator, then do nothing. - - If set to C(present) and heroku user is not collaborator, then add user to app. - - If set to C(absent) and heroku user is collaborator, then delete user from app. + - If set to V(present) and heroku user is already collaborator, then do nothing. + - If set to V(present) and heroku user is not collaborator, then add user to app. + - If set to V(absent) and heroku user is collaborator, then delete user from app. author: - Marcel Arns (@marns93) requirements: - heroku3 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: api_key: type: str description: - - Heroku API key + - Heroku API key. apps: type: list elements: str description: - - List of Heroku App names + - List of Heroku App names. required: true suppress_invitation: description: - - Suppress email invitation when creating collaborator + - Suppress email invitation when creating collaborator. type: bool - default: "no" + default: false user: type: str description: - - User ID or e-mail + - User ID or e-mail. required: true state: type: str description: - - Create or remove the heroku collaborator + - Create or remove the heroku collaborator. choices: ["present", "absent"] default: "present" notes: - - C(HEROKU_API_KEY) and C(TF_VAR_HEROKU_API_KEY) env variable can be used instead setting C(api_key). - - If you use I(--check), you can also pass the I(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"]. -''' + - E(HEROKU_API_KEY) and E(TF_VAR_HEROKU_API_KEY) environment variables can be used instead setting O(api_key). + - If you use C(check_mode), you can also pass the C(-v) flag to see affected apps in C(msg), for example C(["heroku-example-app"]). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a heroku collaborator community.general.heroku_collaborator: api_key: YOUR_API_KEY @@ -68,12 +73,12 @@ EXAMPLES = ''' suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}' state: '{{ item.state | default("present") }}' with_items: - - { user: 'a.b@example.com' } - - { state: 'absent', user: 'b.c@example.com', suppress_invitation: false } - - { user: 'x.y@example.com', apps: ["heroku-example-app"] } -''' + - {user: 'a.b@example.com'} + - {state: 'absent', user: 'b.c@example.com', suppress_invitation: false} + - {user: 'x.y@example.com', apps: ["heroku-example-app"]} +""" -RETURN = ''' # ''' +RETURN = """ # """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.heroku import HerokuHelper diff --git a/plugins/modules/source_control/hg.py b/plugins/modules/hg.py similarity index 70% rename from plugins/modules/source_control/hg.py rename to plugins/modules/hg.py index 572b036e1f..afd3e59dd3 100644 --- a/plugins/modules/source_control/hg.py +++ b/plugins/modules/hg.py @@ -1,90 +1,91 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2013, Yeukhon Wong -# Copyright: (c) 2014, Nate Coraor -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, Yeukhon Wong +# Copyright (c) 2014, Nate Coraor +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hg short_description: Manages Mercurial (hg) repositories description: - - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address. + - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address. author: "Yeukhon Wong (@yeukhon)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - repo: - description: - - The repository address. - required: yes - aliases: [ name ] - type: str - dest: - description: - - Absolute path of where the repository should be cloned to. - This parameter is required, unless clone and update are set to no - type: path - revision: - description: - - Equivalent C(-r) option in hg command which could be the changeset, revision number, - branch name or even tag. - aliases: [ version ] - type: str - force: - description: - - Discards uncommitted changes. Runs C(hg update -C). Prior to - 1.9, the default was `yes`. - type: bool - default: 'no' - purge: - description: - - Deletes untracked files. Runs C(hg purge). - type: bool - default: 'no' - update: - description: - - If C(no), do not retrieve new revisions from the origin repository - type: bool - default: 'yes' - clone: - description: - - If C(no), do not clone the repository if it does not exist locally. - type: bool - default: 'yes' - executable: - description: - - Path to hg executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. - type: str + repo: + description: + - The repository address. + required: true + aliases: [name] + type: str + dest: + description: + - Absolute path of where the repository should be cloned to. This parameter is required, unless clone and update are + set to no. + type: path + revision: + description: + - Equivalent C(-r) option in hg command which could be the changeset, revision number, branch name or even tag. + aliases: [version] + type: str + force: + description: + - Discards uncommitted changes. Runs C(hg update -C). + type: bool + default: false + purge: + description: + - Deletes untracked files. Runs C(hg purge). + type: bool + default: false + update: + description: + - If V(false), do not retrieve new revisions from the origin repository. + type: bool + default: true + clone: + description: + - If V(false), do not clone the repository if it does not exist locally. + type: bool + default: true + executable: + description: + - Path to C(hg) executable to use. If not supplied, the normal mechanism for resolving binary paths is used. + type: str notes: - - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156). - - "If the task seems to be hanging, first verify remote host is in C(known_hosts). - SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, - one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling - the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts." - - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such, - if the underlying system still uses a Python version below 2.7.9, you will have issues checking out - bitbucket repositories. See U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01). -''' + - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156). + - 'If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH prompts user to authorize the first + contact with a remote host. To avoid this prompt, one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) + before calling the hg module, with the following command: C(ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts).' + - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such, if the underlying system + still uses a Python version below 2.7.9, you are bound to have issues checking out bitbucket repositories. See + U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure the current working copy is inside the stable branch and deletes untracked files if any. community.general.hg: repo: https://bitbucket.org/user/repo1 dest: /home/user/repo1 revision: stable - purge: yes + purge: true - name: Get information about the repository whether or not it has already been cloned locally. community.general.hg: repo: git://bitbucket.org/user/repo dest: /srv/checkout - clone: no - update: no -''' + clone: false + update: false +""" import os @@ -202,7 +203,7 @@ class Hg(object): if the desired changeset is already the current changeset. """ if self.revision is None or len(self.revision) < 7: - # Assume it's a rev number, tag, or branch + # Assume it is a rev number, tag, or branch return False (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest]) if rc != 0: @@ -219,12 +220,12 @@ def main(): argument_spec=dict( repo=dict(type='str', required=True, aliases=['name']), dest=dict(type='path'), - revision=dict(type='str', default=None, aliases=['version']), + revision=dict(type='str', aliases=['version']), force=dict(type='bool', default=False), purge=dict(type='bool', default=False), update=dict(type='bool', default=True), clone=dict(type='bool', default=True), - executable=dict(type='str', default=None), + executable=dict(type='str'), ), ) repo = module.params['repo'] @@ -244,7 +245,7 @@ def main(): cleaned = False if not dest and (clone or update): - module.fail_json(msg="the destination directory must be specified unless clone=no and update=no") + module.fail_json(msg="the destination directory must be specified unless clone=false and update=false") hg = Hg(module, dest, repo, revision, hg_path) diff --git a/plugins/modules/packaging/os/homebrew.py b/plugins/modules/homebrew.py similarity index 53% rename from plugins/modules/packaging/os/homebrew.py rename to plugins/modules/homebrew.py index db1feda78e..2b0e4408a2 100644 --- a/plugins/modules/packaging/os/homebrew.py +++ b/plugins/modules/homebrew.py @@ -1,86 +1,97 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, Andrew Dunham -# (c) 2013, Daniel Jaouen -# (c) 2015, Indrajit Raychaudhuri +# Copyright (c) 2013, Andrew Dunham +# Copyright (c) 2013, Daniel Jaouen +# Copyright (c) 2015, Indrajit Raychaudhuri # # Based on macports (Jimmy Tang ) # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: homebrew author: - - "Indrajit Raychaudhuri (@indrajitr)" - - "Daniel Jaouen (@danieljaouen)" - - "Andrew Dunham (@andrew-d)" + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" + - "Andrew Dunham (@andrew-d)" requirements: - - "python >= 2.6" - - homebrew must already be installed on the target system + - homebrew must already be installed on the target system short_description: Package manager for Homebrew description: - - Manages Homebrew packages + - Manages Homebrew packages. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - A list of names of packages to install/remove. - aliases: [ 'formula', 'package', 'pkg' ] - type: list - elements: str - path: - description: - - "A ':' separated list of paths to search for 'brew' executable. - Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, - providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system." - default: '/usr/local/bin:/opt/homebrew/bin' - type: path - state: - description: - - state of the package. - choices: [ 'absent', 'head', 'installed', 'latest', 'linked', 'present', 'removed', 'uninstalled', 'unlinked', 'upgraded' ] - default: present - type: str - update_homebrew: - description: - - update homebrew itself first. - type: bool - default: no - upgrade_all: - description: - - upgrade all homebrew packages. - type: bool - default: no - aliases: ['upgrade'] - install_options: - description: - - options flags to install a package. - aliases: ['options'] - type: list - elements: str - upgrade_options: - description: - - Option flags to upgrade. - type: list - elements: str - version_added: '0.2.0' + name: + description: + - A list of names of packages to install/remove. + aliases: ['formula', 'package', 'pkg'] + type: list + elements: str + path: + description: + - A V(:) separated list of paths to search for C(brew) executable. Since a package (I(formula) in homebrew parlance) + location is prefixed relative to the actual path of C(brew) command, providing an alternative C(brew) path enables + managing different set of packages in an alternative location in the system. + default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' + type: path + state: + description: + - State of the package. + choices: ['absent', 'head', 'installed', 'latest', 'linked', 'present', 'removed', 'uninstalled', 'unlinked', 'upgraded'] + default: present + type: str + update_homebrew: + description: + - Update homebrew itself first. + type: bool + default: false + upgrade_all: + description: + - Upgrade all homebrew packages. + type: bool + default: false + aliases: ['upgrade'] + install_options: + description: + - Options flags to install a package. + aliases: ['options'] + type: list + elements: str + upgrade_options: + description: + - Option flags to upgrade. + type: list + elements: str + version_added: '0.2.0' + force_formula: + description: + - Force the package(s) to be treated as a formula (equivalent to C(brew --formula)). + - To install a cask, use the M(community.general.homebrew_cask) module. + type: bool + default: false + version_added: 9.0.0 notes: - - When used with a `loop:` each package will be processed individually, - it is much more efficient to pass the list directly to the `name` option. -''' + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. +""" -EXAMPLES = ''' +EXAMPLES = r""" # Install formula foo with 'brew' in default path - community.general.homebrew: name: foo state: present -# Install formula foo with 'brew' in alternate path C(/my/other/location/bin) +# Install formula foo with 'brew' in alternate path (/my/other/location/bin) - community.general.homebrew: name: foo path: /my/other/location/bin @@ -90,18 +101,18 @@ EXAMPLES = ''' - community.general.homebrew: name: foo state: present - update_homebrew: yes + update_homebrew: true # Update homebrew first and upgrade formula foo to latest available with 'brew' in default path - community.general.homebrew: name: foo state: latest - update_homebrew: yes + update_homebrew: true # Update homebrew and upgrade all packages - community.general.homebrew: - update_homebrew: yes - upgrade_all: yes + update_homebrew: true + upgrade_all: true # Miscellaneous other examples - community.general.homebrew: @@ -132,37 +143,44 @@ EXAMPLES = ''' - name: Use ignore-pinned option while upgrading all community.general.homebrew: - upgrade_all: yes + upgrade_all: true upgrade_options: ignore-pinned -''' -RETURN = ''' +- name: Force installing a formula whose name is also a cask name + community.general.homebrew: + name: ambiguous_formula + state: present + force_formula: true +""" + +RETURN = r""" msg: - description: if the cache was updated or not - returned: always - type: str - sample: "Changed: 0, Unchanged: 2" + description: If the cache was updated or not. + returned: always + type: str + sample: "Changed: 0, Unchanged: 2" unchanged_pkgs: - description: - - List of package names which are unchanged after module run - returned: success - type: list - sample: ["awscli", "ag"] - version_added: '0.2.0' + description: + - List of package names which are unchanged after module run. + returned: success + type: list + sample: ["awscli", "ag"] + version_added: '0.2.0' changed_pkgs: - description: - - List of package names which are changed after module run - returned: success - type: list - sample: ['git', 'git-cola'] - version_added: '0.2.0' -''' + description: + - List of package names which are changed after module run. + returned: success + type: list + sample: ["git", "git-cola"] + version_added: '0.2.0' +""" -import os.path +import json import re +from ansible_collections.community.general.plugins.module_utils.homebrew import HomebrewValidate + from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import iteritems, string_types # exceptions -------------------------------------------------------------- {{{ @@ -174,107 +192,20 @@ class HomebrewException(Exception): # utils ------------------------------------------------------------------- {{{ def _create_regex_group_complement(s): lines = (line.strip() for line in s.split('\n') if line.strip()) - chars = filter(None, (line.split('#')[0].strip() for line in lines)) + chars = [_f for _f in (line.split('#')[0].strip() for line in lines) if _f] group = r'[^' + r''.join(chars) + r']' return re.compile(group) + + +def _check_package_in_json(json_output, package_type): + return bool(json_output.get(package_type, []) and json_output[package_type][0].get("installed")) # /utils ------------------------------------------------------------------ }}} class Homebrew(object): '''A class to manage Homebrew packages.''' - # class regexes ------------------------------------------------ {{{ - VALID_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - : # colons - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - - VALID_BREW_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - - VALID_PACKAGE_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - . # dots - / # slash (for taps) - \+ # plusses - \- # dashes - : # colons (for URLs) - @ # at-sign - ''' - - INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS) - INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS) - INVALID_PACKAGE_REGEX = _create_regex_group_complement(VALID_PACKAGE_CHARS) - # /class regexes ----------------------------------------------- }}} - # class validations -------------------------------------------- {{{ - @classmethod - def valid_path(cls, path): - ''' - `path` must be one of: - - list of paths - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - colons - - os.path.sep - ''' - - if isinstance(path, string_types): - return not cls.INVALID_PATH_REGEX.search(path) - - try: - iter(path) - except TypeError: - return False - else: - paths = path - return all(cls.valid_brew_path(path_) for path_ in paths) - - @classmethod - def valid_brew_path(cls, brew_path): - ''' - `brew_path` must be one of: - - None - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - os.path.sep - ''' - - if brew_path is None: - return True - - return ( - isinstance(brew_path, string_types) - and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) - ) - - @classmethod - def valid_package(cls, package): - '''A valid package is either None or alphanumeric.''' - - if package is None: - return True - - return ( - isinstance(package, string_types) - and not cls.INVALID_PACKAGE_REGEX.search(package) - ) - @classmethod def valid_state(cls, state): ''' @@ -292,7 +223,7 @@ class Homebrew(object): return True else: return ( - isinstance(state, string_types) + isinstance(state, str) and state.lower() in ( 'installed', 'upgraded', @@ -334,14 +265,14 @@ class Homebrew(object): @path.setter def path(self, path): - if not self.valid_path(path): + if not HomebrewValidate.valid_path(path): self._path = [] self.failed = True self.message = 'Invalid path: {0}.'.format(path) raise HomebrewException(self.message) else: - if isinstance(path, string_types): + if isinstance(path, str): self._path = path.split(':') else: self._path = path @@ -354,7 +285,7 @@ class Homebrew(object): @brew_path.setter def brew_path(self, brew_path): - if not self.valid_brew_path(brew_path): + if not HomebrewValidate.valid_brew_path(brew_path): self._brew_path = None self.failed = True self.message = 'Invalid brew_path: {0}.'.format(brew_path) @@ -373,26 +304,12 @@ class Homebrew(object): self._params = self.module.params return self._params - @property - def current_package(self): - return self._current_package - - @current_package.setter - def current_package(self, package): - if not self.valid_package(package): - self._current_package = None - self.failed = True - self.message = 'Invalid package: {0}.'.format(package) - raise HomebrewException(self.message) - - else: - self._current_package = package - return package # /class properties -------------------------------------------- }}} def __init__(self, module, path, packages=None, state=None, update_homebrew=False, upgrade_all=False, - install_options=None, upgrade_options=None): + install_options=None, upgrade_options=None, + force_formula=False): if not install_options: install_options = list() if not upgrade_options: @@ -402,7 +319,8 @@ class Homebrew(object): state=state, update_homebrew=update_homebrew, upgrade_all=upgrade_all, install_options=install_options, - upgrade_options=upgrade_options,) + upgrade_options=upgrade_options, + force_formula=force_formula) self._prep() @@ -410,14 +328,14 @@ class Homebrew(object): def _setup_status_vars(self): self.failed = False self.changed = False - self.changed_count = 0 - self.unchanged_count = 0 self.changed_pkgs = [] self.unchanged_pkgs = [] self.message = '' def _setup_instance_vars(self, **kwargs): - for key, val in iteritems(kwargs): + self.installed_packages = set() + self.outdated_packages = set() + for key, val in kwargs.items(): setattr(self, key, val) def _prep(self): @@ -443,8 +361,98 @@ class Homebrew(object): return self.brew_path - def _status(self): - return (self.failed, self.changed, self.message) + def _validate_packages_names(self): + invalid_packages = [] + for package in self.packages: + if not HomebrewValidate.valid_package(package): + invalid_packages.append(package) + + if invalid_packages: + self.failed = True + self.message = 'Invalid package{0}: {1}'.format( + "s" if len(invalid_packages) > 1 else "", + ", ".join(invalid_packages), + ) + raise HomebrewException(self.message) + + def _save_package_info(self, package_detail, package_name): + if bool(package_detail.get("installed")): + self.installed_packages.add(package_name) + if bool(package_detail.get("outdated")): + self.outdated_packages.add(package_name) + + def _extract_package_name(self, package_detail, is_cask): + # "brew info" can lookup by name, full_name, token, full_token, + # oldnames, old_tokens, or aliases. In addition, any of the + # above names can be prefixed by the tap. Any of these can be + # supplied by the user as the package name. In case of + # ambiguity, where a given name might match multiple packages, + # formulae are preferred over casks. For all other ambiguities, + # the results are an error. Note that in the homebrew/core and + # homebrew/cask taps, there are no "other" ambiguities. + if is_cask: # according to brew info + name = package_detail["token"] + full_name = package_detail["full_token"] + else: + name = package_detail["name"] + full_name = package_detail["full_name"] + + # Issue https://github.com/ansible-collections/community.general/issues/9803: + # name can include the tap as a prefix, in order to disambiguate, + # e.g. casks from identically named formulae. + # + # Issue https://github.com/ansible-collections/community.general/issues/10012: + # package_detail["tap"] is None if package is no longer available. + # + # Issue https://github.com/ansible-collections/community.general/issues/10804 + # name can be an alias, oldnames or old_tokens optionally prefixed by tap + package_names = {name, full_name} + package_names.update(package_detail.get("aliases", [])) + package_names.update(package_detail.get("oldnames", [])) + package_names.update(package_detail.get("old_tokens", [])) + if package_detail['tap']: + # names so far, with tap prefix added to each + tapped_names = {package_detail["tap"] + "/" + x for x in package_names} + package_names.update(tapped_names) + + # Finally, identify which of all those package names was the one supplied by the user. + package_names = package_names & set(self.packages) + if len(package_names) != 1: + self.failed = True + self.message = "Package names for {name} are missing or ambiguous: {packages}".format( + name=name, + packages=", ".join(str(p) for p in package_names), + ) + raise HomebrewException(self.message) + + # Then make sure the user provided name resurface. + return package_names.pop() + + def _get_packages_info(self): + cmd = [ + "{brew_path}".format(brew_path=self.brew_path), + "info", + "--json=v2", + ] + cmd.extend(self.packages) + if self.force_formula: + cmd.append("--formula") + + rc, out, err = self.module.run_command(cmd) + if rc != 0: + self.failed = True + self.message = err.strip() or ("Unknown failure with exit code %d" % rc) + raise HomebrewException(self.message) + + data = json.loads(out) + for package_detail in data.get("formulae", []): + package_name = self._extract_package_name(package_detail, is_cask=False) + self._save_package_info(package_detail, package_name) + + for package_detail in data.get("casks", []): + package_name = self._extract_package_name(package_detail, is_cask=True) + self._save_package_info(package_detail, package_name) + # /prep -------------------------------------------------------- }}} def run(self): @@ -453,68 +461,14 @@ class Homebrew(object): except HomebrewException: pass - if not self.failed and (self.changed_count + self.unchanged_count > 1): + changed_count = len(self.changed_pkgs) + unchanged_count = len(self.unchanged_pkgs) + if not self.failed and (changed_count + unchanged_count > 1): self.message = "Changed: %d, Unchanged: %d" % ( - self.changed_count, - self.unchanged_count, + changed_count, + unchanged_count, ) - (failed, changed, message) = self._status() - - return (failed, changed, message) - - # checks ------------------------------------------------------- {{{ - def _current_package_is_installed(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - cmd = [ - "{brew_path}".format(brew_path=self.brew_path), - "info", - self.current_package, - ] - rc, out, err = self.module.run_command(cmd) - for line in out.split('\n'): - if ( - re.search(r'Built from source', line) - or re.search(r'Poured from bottle', line) - ): - return True - - return False - - def _current_package_is_outdated(self): - if not self.valid_package(self.current_package): - return False - - rc, out, err = self.module.run_command([ - self.brew_path, - 'outdated', - self.current_package, - ]) - - return rc != 0 - - def _current_package_is_installed_from_head(self): - if not Homebrew.valid_package(self.current_package): - return False - elif not self._current_package_is_installed(): - return False - - rc, out, err = self.module.run_command([ - self.brew_path, - 'info', - self.current_package, - ]) - - try: - version_info = [line for line in out.split('\n') if line][0] - except IndexError: - return False - - return version_info.split(' ')[-1] == 'HEAD' - # /checks ------------------------------------------------------ }}} + return (self.failed, self.changed, self.message) # commands ----------------------------------------------------- {{{ def _run(self): @@ -525,6 +479,8 @@ class Homebrew(object): self._upgrade_all() if self.packages: + self._validate_packages_names() + self._get_packages_info() if self.state == 'installed': return self._install_packages() elif self.state == 'upgraded': @@ -550,7 +506,7 @@ class Homebrew(object): 'update', ]) if rc == 0: - if out and isinstance(out, string_types): + if out and isinstance(out, str): already_updated = any( re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) for s in out.split('\n') @@ -594,24 +550,22 @@ class Homebrew(object): # /_upgrade_all -------------------------- }}} # installed ------------------------------ {{{ - def _install_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) + def _install_packages(self): + packages_to_install = set(self.packages) - self.installed_packages - if self._current_package_is_installed(): - self.unchanged_count += 1 - self.unchanged_pkgs.append(self.current_package) - self.message = 'Package already installed: {0}'.format( - self.current_package, + if len(packages_to_install) == 0: + self.unchanged_pkgs.extend(self.packages) + self.message = 'Package{0} already installed: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages), ) return True if self.module.check_mode: self.changed = True - self.message = 'Package would be installed: {0}'.format( - self.current_package + self.message = 'Package{0} would be installed: {1}'.format( + "s" if len(packages_to_install) > 1 else "", + ", ".join(packages_to_install) ) raise HomebrewException(self.message) @@ -620,79 +574,36 @@ class Homebrew(object): else: head = None + if self.force_formula: + formula = '--formula' + else: + formula = None + opts = ( [self.brew_path, 'install'] + self.install_options - + [self.current_package, head] + + list(packages_to_install) + + [head, formula] ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) - if self._current_package_is_installed(): - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) + if rc == 0: + self.changed_pkgs.extend(packages_to_install) + self.unchanged_pkgs.extend(self.installed_packages) self.changed = True - self.message = 'Package installed: {0}'.format(self.current_package) + self.message = 'Package{0} installed: {1}'.format( + "s" if len(packages_to_install) > 1 else "", + ", ".join(packages_to_install) + ) return True else: self.failed = True self.message = err.strip() raise HomebrewException(self.message) - - def _install_packages(self): - for package in self.packages: - self.current_package = package - self._install_current_package() - - return True # /installed ----------------------------- }}} # upgraded ------------------------------- {{{ - def _upgrade_current_package(self): - command = 'upgrade' - - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - command = 'install' - - if self._current_package_is_installed() and not self._current_package_is_outdated(): - self.message = 'Package is already upgraded: {0}'.format( - self.current_package, - ) - self.unchanged_count += 1 - self.unchanged_pkgs.append(self.current_package) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be upgraded: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - opts = ( - [self.brew_path, command] - + self.install_options - + [self.current_package] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if self._current_package_is_installed() and not self._current_package_is_outdated(): - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) - self.changed = True - self.message = 'Package upgraded: {0}'.format(self.current_package) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - def _upgrade_all_packages(self): opts = ( [self.brew_path, 'upgrade'] @@ -714,153 +625,188 @@ class Homebrew(object): if not self.packages: self._upgrade_all_packages() else: - for package in self.packages: - self.current_package = package - self._upgrade_current_package() - return True + # There are 3 action possible here depending on installed and outdated states: + # - not installed -> 'install' + # - installed and outdated -> 'upgrade' + # - installed and NOT outdated -> Nothing to do! + packages_to_install = set(self.packages) - self.installed_packages + packages_to_upgrade = self.installed_packages & self.outdated_packages + packages_to_install_or_upgrade = packages_to_install | packages_to_upgrade + + if len(packages_to_install_or_upgrade) == 0: + self.unchanged_pkgs.extend(self.packages) + self.message = 'Package{0} already upgraded: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages), + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Package{0} would be upgraded: {1}'.format( + "s" if len(packages_to_install_or_upgrade) > 1 else "", + ", ".join(packages_to_install_or_upgrade) + ) + raise HomebrewException(self.message) + + for command, packages in [ + ("install", packages_to_install), + ("upgrade", packages_to_upgrade) + ]: + if not packages: + continue + + opts = ( + [self.brew_path, command] + + self.install_options + + list(packages) + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc != 0: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + self.changed_pkgs.extend(packages_to_install_or_upgrade) + self.unchanged_pkgs.extend(set(self.packages) - packages_to_install_or_upgrade) + self.changed = True + self.message = 'Package{0} upgraded: {1}'.format( + "s" if len(packages_to_install_or_upgrade) > 1 else "", + ", ".join(packages_to_install_or_upgrade), + ) # /upgraded ------------------------------ }}} # uninstalled ---------------------------- {{{ - def _uninstall_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) + def _uninstall_packages(self): + packages_to_uninstall = self.installed_packages & set(self.packages) - if not self._current_package_is_installed(): - self.unchanged_count += 1 - self.unchanged_pkgs.append(self.current_package) - self.message = 'Package already uninstalled: {0}'.format( - self.current_package, + if len(packages_to_uninstall) == 0: + self.unchanged_pkgs.extend(self.packages) + self.message = 'Package{0} already uninstalled: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages), ) return True if self.module.check_mode: self.changed = True - self.message = 'Package would be uninstalled: {0}'.format( - self.current_package + self.message = 'Package{0} would be uninstalled: {1}'.format( + "s" if len(packages_to_uninstall) > 1 else "", + ", ".join(packages_to_uninstall) ) raise HomebrewException(self.message) opts = ( [self.brew_path, 'uninstall', '--force'] + self.install_options - + [self.current_package] + + list(packages_to_uninstall) ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) - if not self._current_package_is_installed(): - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) + if rc == 0: + self.changed_pkgs.extend(packages_to_uninstall) + self.unchanged_pkgs.extend(set(self.packages) - self.installed_packages) self.changed = True - self.message = 'Package uninstalled: {0}'.format(self.current_package) + self.message = 'Package{0} uninstalled: {1}'.format( + "s" if len(packages_to_uninstall) > 1 else "", + ", ".join(packages_to_uninstall) + ) return True else: self.failed = True self.message = err.strip() raise HomebrewException(self.message) - - def _uninstall_packages(self): - for package in self.packages: - self.current_package = package - self._uninstall_current_package() - - return True # /uninstalled ----------------------------- }}} # linked --------------------------------- {{{ - def _link_current_package(self): - if not self.valid_package(self.current_package): + def _link_packages(self): + missing_packages = set(self.packages) - self.installed_packages + if missing_packages: self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - self.failed = True - self.message = 'Package not installed: {0}.'.format(self.current_package) + self.message = 'Package{0} not installed: {1}.'.format( + "s" if len(missing_packages) > 1 else "", + ", ".join(missing_packages), + ) raise HomebrewException(self.message) if self.module.check_mode: self.changed = True - self.message = 'Package would be linked: {0}'.format( - self.current_package + self.message = 'Package{0} would be linked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) ) raise HomebrewException(self.message) opts = ( [self.brew_path, 'link'] + self.install_options - + [self.current_package] + + self.packages ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if rc == 0: - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) + self.changed_pkgs.extend(self.packages) self.changed = True - self.message = 'Package linked: {0}'.format(self.current_package) - + self.message = 'Package{0} linked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) return True else: self.failed = True - self.message = 'Package could not be linked: {0}.'.format(self.current_package) + self.message = 'Package{0} could not be linked: {1}.'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) raise HomebrewException(self.message) - - def _link_packages(self): - for package in self.packages: - self.current_package = package - self._link_current_package() - - return True # /linked -------------------------------- }}} # unlinked ------------------------------- {{{ - def _unlink_current_package(self): - if not self.valid_package(self.current_package): + def _unlink_packages(self): + missing_packages = set(self.packages) - self.installed_packages + if missing_packages: self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - self.failed = True - self.message = 'Package not installed: {0}.'.format(self.current_package) + self.message = 'Package{0} not installed: {1}.'.format( + "s" if len(missing_packages) > 1 else "", + ", ".join(missing_packages), + ) raise HomebrewException(self.message) if self.module.check_mode: self.changed = True - self.message = 'Package would be unlinked: {0}'.format( - self.current_package + self.message = 'Package{0} would be unlinked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) ) raise HomebrewException(self.message) opts = ( [self.brew_path, 'unlink'] + self.install_options - + [self.current_package] + + self.packages ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if rc == 0: - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) + self.changed_pkgs.extend(self.packages) self.changed = True - self.message = 'Package unlinked: {0}'.format(self.current_package) - + self.message = 'Package{0} unlinked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) return True else: self.failed = True - self.message = 'Package could not be unlinked: {0}.'.format(self.current_package) + self.message = 'Package{0} could not be unlinked: {1}.'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) raise HomebrewException(self.message) - - def _unlink_packages(self): - for package in self.packages: - self.current_package = package - self._unlink_current_package() - - return True # /unlinked ------------------------------ }}} # /commands ---------------------------------------------------- }}} @@ -870,13 +816,11 @@ def main(): argument_spec=dict( name=dict( aliases=["pkg", "package", "formula"], - required=False, type='list', elements='str', ), path=dict( - default="/usr/local/bin:/opt/homebrew/bin", - required=False, + default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", type='path', ), state=dict( @@ -898,16 +842,18 @@ def main(): type='bool', ), install_options=dict( - default=None, aliases=['options'], type='list', elements='str', ), upgrade_options=dict( - default=None, type='list', elements='str', - ) + ), + force_formula=dict( + default=False, + type='bool', + ), ), supports_check_mode=True, ) @@ -917,7 +863,7 @@ def main(): p = module.params if p['name']: - packages = p['name'] + packages = [package_name.lower() for package_name in p['name']] else: packages = None @@ -939,6 +885,7 @@ def main(): if state in ('absent', 'removed', 'uninstalled'): state = 'absent' + force_formula = p['force_formula'] update_homebrew = p['update_homebrew'] if not update_homebrew: module.run_command_environ_update.update( @@ -955,7 +902,7 @@ def main(): brew = Homebrew(module=module, path=path, packages=packages, state=state, update_homebrew=update_homebrew, upgrade_all=upgrade_all, install_options=install_options, - upgrade_options=upgrade_options) + upgrade_options=upgrade_options, force_formula=force_formula) (failed, changed, message) = brew.run() changed_pkgs = brew.changed_pkgs unchanged_pkgs = brew.unchanged_pkgs diff --git a/plugins/modules/packaging/os/homebrew_cask.py b/plugins/modules/homebrew_cask.py similarity index 84% rename from plugins/modules/packaging/os/homebrew_cask.py rename to plugins/modules/homebrew_cask.py index a43eabb7cb..ac88e1bafe 100644 --- a/plugins/modules/packaging/os/homebrew_cask.py +++ b/plugins/modules/homebrew_cask.py @@ -1,83 +1,86 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2013, Daniel Jaouen -# Copyright: (c) 2016, Indrajit Raychaudhuri +# Copyright (c) 2013, Daniel Jaouen +# Copyright (c) 2016, Indrajit Raychaudhuri # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: homebrew_cask author: -- "Indrajit Raychaudhuri (@indrajitr)" -- "Daniel Jaouen (@danieljaouen)" -- "Enric Lluelles (@enriclluelles)" -requirements: -- "python >= 2.6" + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" + - "Enric Lluelles (@enriclluelles)" short_description: Install and uninstall homebrew casks description: -- Manages Homebrew casks. + - Manages Homebrew casks. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - - Name of cask to install or remove. - aliases: [ 'cask', 'package', 'pkg' ] + - Name of cask to install or remove. + aliases: ['cask', 'package', 'pkg'] type: list elements: str path: description: - - "':' separated list of paths to search for 'brew' executable." + - "':' separated list of paths to search for 'brew' executable." default: '/usr/local/bin:/opt/homebrew/bin' type: path state: description: - - State of the cask. - choices: [ 'absent', 'installed', 'latest', 'present', 'removed', 'uninstalled', 'upgraded' ] + - State of the cask. + choices: ['absent', 'installed', 'latest', 'present', 'removed', 'uninstalled', 'upgraded'] default: present type: str sudo_password: description: - - The sudo password to be passed to SUDO_ASKPASS. + - The sudo password to be passed to E(SUDO_ASKPASS). required: false type: str update_homebrew: description: - - Update homebrew itself first. - - Note that C(brew cask update) is a synonym for C(brew update). + - Update homebrew itself first. + - Note that C(brew cask update) is a synonym for C(brew update). type: bool - default: no + default: false install_options: description: - - Options flags to install a package. - aliases: [ 'options' ] + - Options flags to install a package. + aliases: ['options'] type: list elements: str accept_external_apps: description: - - Allow external apps. + - Allow external apps. type: bool - default: no + default: false upgrade_all: description: - - Upgrade all casks. - - Mutually exclusive with C(upgraded) state. + - Upgrade all casks. + - Mutually exclusive with C(upgraded) state. type: bool - default: no - aliases: [ 'upgrade' ] + default: false + aliases: ['upgrade'] greedy: description: - - Upgrade casks that auto update. - - Passes --greedy to brew cask outdated when checking - if an installed cask has a newer version available. + - Upgrade casks that auto update. + - Passes C(--greedy) to C(brew outdated --cask) when checking if an installed cask has a newer version available, or + to C(brew upgrade --cask) when upgrading all casks. type: bool - default: no -''' -EXAMPLES = ''' + default: false +""" +EXAMPLES = r""" - name: Install cask community.general.homebrew_cask: name: alfred @@ -110,7 +113,7 @@ EXAMPLES = ''' community.general.homebrew_cask: name: alfred state: present - accept_external_apps: True + accept_external_apps: true - name: Remove cask with force option community.general.homebrew_cask: @@ -122,6 +125,11 @@ EXAMPLES = ''' community.general.homebrew_cask: upgrade_all: true +- name: Upgrade all casks with greedy option + community.general.homebrew_cask: + upgrade_all: true + greedy: true + - name: Upgrade given cask with force option community.general.homebrew_cask: name: alfred @@ -132,24 +140,24 @@ EXAMPLES = ''' community.general.homebrew_cask: name: 1password state: upgraded - greedy: True + greedy: true - name: Using sudo password for installing cask community.general.homebrew_cask: name: wireshark state: present sudo_password: "{{ ansible_become_pass }}" -''' +""" import os import re import tempfile from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible_collections.community.general.plugins.module_utils.homebrew import HomebrewValidate from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import iteritems, string_types # exceptions -------------------------------------------------------------- {{{ @@ -161,7 +169,7 @@ class HomebrewCaskException(Exception): # utils ------------------------------------------------------------------- {{{ def _create_regex_group_complement(s): lines = (line.strip() for line in s.split('\n') if line.strip()) - chars = filter(None, (line.split('#')[0].strip() for line in lines)) + chars = [_f for _f in (line.split('#')[0].strip() for line in lines) if _f] group = r'[^' + r''.join(chars) + r']' return re.compile(group) # /utils ------------------------------------------------------------------ }}} @@ -171,83 +179,19 @@ class HomebrewCask(object): '''A class to manage Homebrew casks.''' # class regexes ------------------------------------------------ {{{ - VALID_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - : # colons - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - - VALID_BREW_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - VALID_CASK_CHARS = r''' \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) . # dots / # slash (for taps) \- # dashes @ # at symbol + \+ # plus symbol ''' - INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS) - INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS) INVALID_CASK_REGEX = _create_regex_group_complement(VALID_CASK_CHARS) # /class regexes ----------------------------------------------- }}} # class validations -------------------------------------------- {{{ - @classmethod - def valid_path(cls, path): - ''' - `path` must be one of: - - list of paths - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - colons - - os.path.sep - ''' - - if isinstance(path, (string_types)): - return not cls.INVALID_PATH_REGEX.search(path) - - try: - iter(path) - except TypeError: - return False - else: - paths = path - return all(cls.valid_brew_path(path_) for path_ in paths) - - @classmethod - def valid_brew_path(cls, brew_path): - ''' - `brew_path` must be one of: - - None - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - os.path.sep - ''' - - if brew_path is None: - return True - - return ( - isinstance(brew_path, string_types) - and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) - ) - @classmethod def valid_cask(cls, cask): '''A valid cask is either None or alphanumeric + backslashes.''' @@ -256,7 +200,7 @@ class HomebrewCask(object): return True return ( - isinstance(cask, string_types) + isinstance(cask, str) and not cls.INVALID_CASK_REGEX.search(cask) ) @@ -272,7 +216,7 @@ class HomebrewCask(object): return True else: return ( - isinstance(state, string_types) + isinstance(state, str) and state.lower() in ( 'installed', 'absent', @@ -309,14 +253,14 @@ class HomebrewCask(object): @path.setter def path(self, path): - if not self.valid_path(path): + if not HomebrewValidate.valid_path(path): self._path = [] self.failed = True self.message = 'Invalid path: {0}.'.format(path) raise HomebrewCaskException(self.message) else: - if isinstance(path, string_types): + if isinstance(path, str): self._path = path.split(':') else: self._path = path @@ -329,7 +273,7 @@ class HomebrewCask(object): @brew_path.setter def brew_path(self, brew_path): - if not self.valid_brew_path(brew_path): + if not HomebrewValidate.valid_brew_path(brew_path): self._brew_path = None self.failed = True self.message = 'Invalid brew_path: {0}.'.format(brew_path) @@ -403,7 +347,7 @@ class HomebrewCask(object): self.message = '' def _setup_instance_vars(self, **kwargs): - for key, val in iteritems(kwargs): + for key, val in kwargs.items(): setattr(self, key, val) def _prep(self): @@ -478,10 +422,7 @@ class HomebrewCask(object): cmd = base_opts + [self.current_cask] rc, out, err = self.module.run_command(cmd) - if rc == 0: - return True - else: - return False + return rc == 0 def _get_brew_version(self): if self.brew_version: @@ -489,11 +430,13 @@ class HomebrewCask(object): cmd = [self.brew_path, '--version'] - rc, out, err = self.module.run_command(cmd, check_rc=True) + dummy, out, dummy = self.module.run_command(cmd, check_rc=True) - # get version string from first line of "brew --version" output - version = out.split('\n')[0].split(' ')[1] - self.brew_version = version + pattern = r"Homebrew (.*)(\d+\.\d+\.\d+)(-dirty)?" + rematch = re.search(pattern, out) + if not rematch: + self.module.fail_json(msg="Failed to match regex to get brew version", stdout=out) + self.brew_version = rematch.groups()[1] return self.brew_version def _brew_cask_command_is_deprecated(self): @@ -544,7 +487,7 @@ class HomebrewCask(object): 'update', ]) if rc == 0: - if out and isinstance(out, string_types): + if out and isinstance(out, str): already_updated = any( re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) for s in out.split('\n') @@ -575,6 +518,9 @@ class HomebrewCask(object): else: cmd = [self.brew_path, 'cask', 'upgrade'] + if self.greedy: + cmd = cmd + ['--greedy'] + rc, out, err = '', '', '' if self.sudo_password: @@ -583,7 +529,12 @@ class HomebrewCask(object): rc, out, err = self.module.run_command(cmd) if rc == 0: - if re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE): + # 'brew upgrade --cask' does not output anything if no casks are upgraded + if not out.strip(): + self.message = 'Homebrew casks already upgraded.' + + # handle legacy 'brew cask upgrade' + elif re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE): self.message = 'Homebrew casks already upgraded.' else: @@ -780,13 +731,11 @@ def main(): argument_spec=dict( name=dict( aliases=["pkg", "package", "cask"], - required=False, type='list', elements='str', ), path=dict( default="/usr/local/bin:/opt/homebrew/bin", - required=False, type='path', ), state=dict( @@ -799,7 +748,6 @@ def main(): ), sudo_password=dict( type="str", - required=False, no_log=True, ), update_homebrew=dict( @@ -807,7 +755,6 @@ def main(): type='bool', ), install_options=dict( - default=None, aliases=['options'], type='list', elements='str', diff --git a/plugins/modules/homebrew_services.py b/plugins/modules/homebrew_services.py new file mode 100644 index 0000000000..5527aae133 --- /dev/null +++ b/plugins/modules/homebrew_services.py @@ -0,0 +1,251 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Andrew Dunham +# Copyright (c) 2013, Daniel Jaouen +# Copyright (c) 2015, Indrajit Raychaudhuri +# Copyright (c) 2024, Kit Ham +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: homebrew_services +author: + - "Kit Ham (@kitizz)" +requirements: + - homebrew must already be installed on the target system +short_description: Services manager for Homebrew +version_added: 9.3.0 +description: + - Manages daemons and services using Homebrew. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - An installed homebrew package whose service is to be updated. + aliases: ['formula'] + type: str + required: true + path: + description: + - A V(:) separated list of paths to search for C(brew) executable. Since a package (I(formula) in homebrew parlance) + location is prefixed relative to the actual path of C(brew) command, providing an alternative C(brew) path enables + managing different set of packages in an alternative location in the system. + default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' + type: path + state: + description: + - State of the package's service. + choices: ['present', 'absent', 'restarted'] + default: present + type: str +""" + +EXAMPLES = r""" +- name: Install foo package + community.general.homebrew: + name: foo + state: present + +- name: Start the foo service (equivalent to `brew services start foo`) + community.general.homebrew_services: + name: foo + state: present + +- name: Restart the foo service (equivalent to `brew services restart foo`) + community.general.homebrew_services: + name: foo + state: restarted + +- name: Remove the foo service (equivalent to `brew services stop foo`) + community.general.homebrew_services: + name: foo + state: absent +""" + +RETURN = r""" +pid: + description: + - If the service is now running, this is the PID of the service, otherwise -1. + returned: success + type: int + sample: 1234 +running: + description: + - Whether the service is running after running this command. + returned: success + type: bool + sample: true +""" + +import json +import sys + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.homebrew import ( + HomebrewValidate, + parse_brew_path, +) + +if sys.version_info < (3, 5): + from collections import namedtuple + + # Stores validated arguments for an instance of an action. + # See DOCUMENTATION string for argument-specific information. + HomebrewServiceArgs = namedtuple( + "HomebrewServiceArgs", ["name", "state", "brew_path"] + ) + + # Stores the state of a Homebrew service. + HomebrewServiceState = namedtuple("HomebrewServiceState", ["running", "pid"]) + +else: + from typing import NamedTuple, Optional + + # Stores validated arguments for an instance of an action. + # See DOCUMENTATION string for argument-specific information. + HomebrewServiceArgs = NamedTuple( + "HomebrewServiceArgs", [("name", str), ("state", str), ("brew_path", str)] + ) + + # Stores the state of a Homebrew service. + HomebrewServiceState = NamedTuple( + "HomebrewServiceState", [("running", bool), ("pid", Optional[int])] + ) + + +def _brew_service_state(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> HomebrewServiceState + cmd = [args.brew_path, "services", "info", args.name, "--json"] + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + + try: + data = json.loads(stdout)[0] + except json.JSONDecodeError: + module.fail_json(msg="Failed to parse JSON output:\n{0}".format(stdout)) + + return HomebrewServiceState(running=data["status"] == "started", pid=data["pid"]) + + +def _exit_with_state(args, module, changed=False, message=None): + # type: (HomebrewServiceArgs, AnsibleModule, bool, Optional[str]) -> None + state = _brew_service_state(args, module) + if message is None: + message = ( + "Running: {state.running}, Changed: {changed}, PID: {state.pid}".format( + state=state, changed=changed + ) + ) + module.exit_json(msg=message, pid=state.pid, running=state.running, changed=changed) + + +def validate_and_load_arguments(module): + # type: (AnsibleModule) -> HomebrewServiceArgs + """Reuse the Homebrew module's validation logic to validate these arguments.""" + package = module.params["name"] # type: ignore + if not HomebrewValidate.valid_package(package): + module.fail_json(msg="Invalid package name: {0}".format(package)) + + state = module.params["state"] # type: ignore + if state not in ["present", "absent", "restarted"]: + module.fail_json(msg="Invalid state: {0}".format(state)) + + brew_path = parse_brew_path(module) + + return HomebrewServiceArgs(name=package, state=state, brew_path=brew_path) + + +def start_service(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> None + """Start the requested brew service if it is not already running.""" + state = _brew_service_state(args, module) + if state.running: + # Nothing to do, return early. + _exit_with_state(args, module, changed=False, message="Service already running") + + if module.check_mode: + _exit_with_state(args, module, changed=True, message="Service would be started") + + start_cmd = [args.brew_path, "services", "start", args.name] + rc, stdout, stderr = module.run_command(start_cmd, check_rc=True) + + _exit_with_state(args, module, changed=True) + + +def stop_service(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> None + """Stop the requested brew service if it is running.""" + state = _brew_service_state(args, module) + if not state.running: + # Nothing to do, return early. + _exit_with_state(args, module, changed=False, message="Service already stopped") + + if module.check_mode: + _exit_with_state(args, module, changed=True, message="Service would be stopped") + + stop_cmd = [args.brew_path, "services", "stop", args.name] + rc, stdout, stderr = module.run_command(stop_cmd, check_rc=True) + + _exit_with_state(args, module, changed=True) + + +def restart_service(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> None + """Restart the requested brew service. This always results in a change.""" + if module.check_mode: + _exit_with_state( + args, module, changed=True, message="Service would be restarted" + ) + + restart_cmd = [args.brew_path, "services", "restart", args.name] + rc, stdout, stderr = module.run_command(restart_cmd, check_rc=True) + + _exit_with_state(args, module, changed=True) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict( + aliases=["formula"], + required=True, + type="str", + ), + state=dict( + choices=["present", "absent", "restarted"], + default="present", + ), + path=dict( + default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", + type="path", + ), + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict( + LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C" + ) + + # Pre-validate arguments. + service_args = validate_and_load_arguments(module) + + # Choose logic based on the desired state. + if service_args.state == "present": + start_service(service_args, module) + elif service_args.state == "absent": + stop_service(service_args, module) + elif service_args.state == "restarted": + restart_service(service_args, module) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/packaging/os/homebrew_tap.py b/plugins/modules/homebrew_tap.py similarity index 77% rename from plugins/modules/packaging/os/homebrew_tap.py rename to plugins/modules/homebrew_tap.py index 6b30fdb68f..813b89db44 100644 --- a/plugins/modules/packaging/os/homebrew_tap.py +++ b/plugins/modules/homebrew_tap.py @@ -1,60 +1,63 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2013, Daniel Jaouen -# Copyright: (c) 2016, Indrajit Raychaudhuri +# Copyright (c) 2013, Daniel Jaouen +# Copyright (c) 2016, Indrajit Raychaudhuri # # Based on homebrew (Andrew Dunham ) # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: homebrew_tap author: - - "Indrajit Raychaudhuri (@indrajitr)" - - "Daniel Jaouen (@danieljaouen)" -short_description: Tap a Homebrew repository. + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" +short_description: Tap a Homebrew repository description: - - Tap external Homebrew repositories. + - Tap external Homebrew repositories. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - The GitHub user/organization repository to tap. - required: true - aliases: ['tap'] - type: list - elements: str - url: - description: - - The optional git URL of the repository to tap. The URL is not - assumed to be on GitHub, and the protocol doesn't have to be HTTP. - Any location and protocol that git can handle is fine. - - I(name) option may not be a list of multiple taps (but a single - tap instead) when this option is provided. - required: false - type: str - state: - description: - - state of the repository. - choices: [ 'present', 'absent' ] - required: false - default: 'present' - type: str - path: - description: - - "A ':' separated list of paths to search for C(brew) executable." - default: '/usr/local/bin:/opt/homebrew/bin' - type: path - version_added: '2.1.0' -requirements: [ homebrew ] -''' + name: + description: + - The GitHub user/organization repository to tap. + required: true + aliases: ['tap'] + type: list + elements: str + url: + description: + - The optional git URL of the repository to tap. The URL is not assumed to be on GitHub, and the protocol does not have + to be HTTP. Any location and protocol that git can handle is fine. + - O(name) option may not be a list of multiple taps (but a single tap instead) when this option is provided. + required: false + type: str + state: + description: + - State of the repository. + choices: ['present', 'absent'] + required: false + default: 'present' + type: str + path: + description: + - A V(:) separated list of paths to search for C(brew) executable. + default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' + type: path + version_added: '2.1.0' +requirements: [homebrew] +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Tap a Homebrew repository, state present community.general.homebrew_tap: name: homebrew/dupes @@ -73,7 +76,7 @@ EXAMPLES = r''' community.general.homebrew_tap: name: telemachus/brew url: 'https://bitbucket.org/telemachus/brew' -''' +""" import re @@ -215,11 +218,10 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(aliases=['tap'], type='list', required=True, elements='str'), - url=dict(default=None, required=False), + url=dict(), state=dict(default='present', choices=['present', 'absent']), path=dict( - default="/usr/local/bin:/opt/homebrew/bin", - required=False, + default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", type='path', ), ), diff --git a/plugins/modules/system/homectl.py b/plugins/modules/homectl.py similarity index 64% rename from plugins/modules/system/homectl.py rename to plugins/modules/homectl.py index ff7a619509..90e97fc484 100644 --- a/plugins/modules/system/homectl.py +++ b/plugins/modules/homectl.py @@ -1,179 +1,190 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2022, James Livulpi -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2022, James Livulpi +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: homectl author: - - "James Livulpi (@jameslivulpi)" + - "James Livulpi (@jameslivulpi)" short_description: Manage user accounts with systemd-homed version_added: 4.4.0 description: - - Manages a user's home directory managed by systemd-homed. + - Manages a user's home directory managed by systemd-homed. +notes: + - This module requires the deprecated L(crypt Python module, https://docs.python.org/3.12/library/crypt.html) library which + was removed from Python 3.13. For Python 3.13 or newer, you need to install L(legacycrypt, https://pypi.org/project/legacycrypt/). +requirements: + - legacycrypt (on Python 3.13 or newer) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - The user name to create, remove, or update. - required: true - aliases: [ 'user', 'username' ] - type: str - password: - description: - - Set the user's password to this. - - Homed requires this value to be in cleartext on user creation and updating a user. - - The module takes the password and generates a password hash in SHA-512 with 10000 rounds of salt generation using crypt. - - See U(https://systemd.io/USER_RECORD/). - - This is required for I(state=present). When an existing user is updated this is checked against the stored hash in homed. - type: str - state: - description: - - The operation to take on the user. - choices: [ 'absent', 'present' ] - default: present - type: str - storage: - description: - - Indicates the storage mechanism for the user's home directory. - - If the storage type is not specified, ``homed.conf(5)`` defines which default storage to use. - - Only used when a user is first created. - choices: [ 'classic', 'luks', 'directory', 'subvolume', 'fscrypt', 'cifs' ] - type: str - disksize: - description: - - The intended home directory disk space. - - Human readable value such as C(10G), C(10M), or C(10B). - type: str - resize: - description: - - When used with I(disksize) this will attempt to resize the home directory immediately. - default: false - type: bool - realname: - description: - - The user's real ('human') name. - - This can also be used to add a comment to maintain compatability with C(useradd). - aliases: [ 'comment' ] - type: str - realm: - description: - - The 'realm' a user is defined in. - type: str - email: - description: - - The email address of the user. - type: str - location: - description: - - A free-form location string describing the location of the user. - type: str - iconname: - description: - - The name of an icon picked by the user, for example for the purpose of an avatar. - - Should follow the semantics defined in the Icon Naming Specification. - - See U(https://specifications.freedesktop.org/icon-naming-spec/icon-naming-spec-latest.html) for specifics. - type: str - homedir: - description: - - Path to use as home directory for the user. - - This is the directory the user's home directory is mounted to while the user is logged in. - - This is not where the user's data is actually stored, see I(imagepath) for that. - - Only used when a user is first created. - type: path - imagepath: - description: - - Path to place the user's home directory. - - See U(https://www.freedesktop.org/software/systemd/man/homectl.html#--image-path=PATH) for more information. - - Only used when a user is first created. - type: path - uid: - description: - - Sets the UID of the user. - - If using I(gid) homed requires the value to be the same. - - Only used when a user is first created. - type: int - gid: - description: - - Sets the gid of the user. - - If using I(uid) homed requires the value to be the same. - - Only used when a user is first created. - type: int - mountopts: - description: - - String separated by comma each indicating mount options for a users home directory. - - Valid options are C(nosuid), C(nodev) or C(noexec). - - Homed by default uses C(nodev) and C(nosuid) while C(noexec) is off. - type: str - umask: - description: - - Sets the umask for the user's login sessions - - Value from C(0000) to C(0777). - type: int - memberof: - description: - - String separated by comma each indicating a UNIX group this user shall be a member of. - - Groups the user should be a member of should be supplied as comma separated list. - aliases: [ 'groups' ] - type: str - skeleton: - description: - - The absolute path to the skeleton directory to populate a new home directory from. - - This is only used when a home directory is first created. - - If not specified homed by default uses C(/etc/skel). - aliases: [ 'skel' ] - type: path - shell: - description: - - Shell binary to use for terminal logins of given user. - - If not specified homed by default uses C(/bin/bash). - type: str - environment: - description: - - String separated by comma each containing an environment variable and its value to - set for the user's login session, in a format compatible with ``putenv()``. - - Any environment variable listed here is automatically set by pam_systemd for all - login sessions of the user. - aliases: [ 'setenv' ] - type: str - timezone: - description: - - Preferred timezone to use for the user. - - Should be a tzdata compatible location string such as C(America/New_York). - type: str - locked: - description: - - Whether the user account should be locked or not. - type: bool - language: - description: - - The preferred language/locale for the user. - - This should be in a format compatible with the C($LANG) environment variable. - type: str - passwordhint: - description: - - Password hint for the given user. - type: str - sshkeys: - description: - - String separated by comma each listing a SSH public key that is authorized to access the account. - - The keys should follow the same format as the lines in a traditional C(~/.ssh/authorized_key) file. - type: str - notbefore: - description: - - A time since the UNIX epoch before which the record should be considered invalid for the purpose of logging in. - type: int - notafter: - description: - - A time since the UNIX epoch after which the record should be considered invalid for the purpose of logging in. - type: int -''' + name: + description: + - The user name to create, remove, or update. + required: true + aliases: ['user', 'username'] + type: str + password: + description: + - Set the user's password to this. + - Homed requires this value to be in cleartext on user creation and updating a user. + - The module takes the password and generates a password hash in SHA-512 with 10000 rounds of salt generation using + crypt. + - See U(https://systemd.io/USER_RECORD/). + - This is required for O(state=present). When an existing user is updated this is checked against the stored hash in + homed. + type: str + state: + description: + - The operation to take on the user. + choices: ['absent', 'present'] + default: present + type: str + storage: + description: + - Indicates the storage mechanism for the user's home directory. + - If the storage type is not specified, C(homed.conf(5\)) defines which default storage to use. + - Only used when a user is first created. + choices: ['classic', 'luks', 'directory', 'subvolume', 'fscrypt', 'cifs'] + type: str + disksize: + description: + - The intended home directory disk space. + - Human readable value such as V(10G), V(10M), or V(10B). + type: str + resize: + description: + - When used with O(disksize) this attempts to resize the home directory immediately. + default: false + type: bool + realname: + description: + - The user's real ('human') name. + - This can also be used to add a comment to maintain compatibility with C(useradd). + aliases: ['comment'] + type: str + realm: + description: + - The 'realm' a user is defined in. + type: str + email: + description: + - The email address of the user. + type: str + location: + description: + - A free-form location string describing the location of the user. + type: str + iconname: + description: + - The name of an icon picked by the user, for example for the purpose of an avatar. + - Should follow the semantics defined in the Icon Naming Specification. + - See U(https://specifications.freedesktop.org/icon-naming-spec/icon-naming-spec-latest.html) for specifics. + type: str + homedir: + description: + - Path to use as home directory for the user. + - This is the directory the user's home directory is mounted to while the user is logged in. + - This is not where the user's data is actually stored, see O(imagepath) for that. + - Only used when a user is first created. + type: path + imagepath: + description: + - Path to place the user's home directory. + - See U(https://www.freedesktop.org/software/systemd/man/homectl.html#--image-path=PATH) for more information. + - Only used when a user is first created. + type: path + uid: + description: + - Sets the UID of the user. + - If using O(gid) homed requires the value to be the same. + - Only used when a user is first created. + type: int + gid: + description: + - Sets the gid of the user. + - If using O(uid) homed requires the value to be the same. + - Only used when a user is first created. + type: int + mountopts: + description: + - String separated by comma each indicating mount options for a users home directory. + - Valid options are V(nosuid), V(nodev) or V(noexec). + - Homed by default uses V(nodev) and V(nosuid) while V(noexec) is off. + type: str + umask: + description: + - Sets the umask for the user's login sessions. + - Value from V(0000) to V(0777). + type: int + memberof: + description: + - String separated by comma each indicating a UNIX group this user shall be a member of. + - Groups the user should be a member of should be supplied as comma separated list. + aliases: ['groups'] + type: str + skeleton: + description: + - The absolute path to the skeleton directory to populate a new home directory from. + - This is only used when a home directory is first created. + - If not specified homed by default uses V(/etc/skel). + aliases: ['skel'] + type: path + shell: + description: + - Shell binary to use for terminal logins of given user. + - If not specified homed by default uses V(/bin/bash). + type: str + environment: + description: + - String separated by comma each containing an environment variable and its value to set for the user's login session, + in a format compatible with C(putenv(\)). + - Any environment variable listed here is automatically set by pam_systemd for all login sessions of the user. + aliases: ['setenv'] + type: str + timezone: + description: + - Preferred timezone to use for the user. + - Should be a tzdata compatible location string such as V(America/New_York). + type: str + locked: + description: + - Whether the user account should be locked or not. + type: bool + language: + description: + - The preferred language/locale for the user. + - This should be in a format compatible with the E(LANG) environment variable. + type: str + passwordhint: + description: + - Password hint for the given user. + type: str + sshkeys: + description: + - String separated by comma each listing a SSH public key that is authorized to access the account. + - The keys should follow the same format as the lines in a traditional C(~/.ssh/authorized_key) file. + type: str + notbefore: + description: + - A time since the UNIX epoch before which the record should be considered invalid for the purpose of logging in. + type: int + notafter: + description: + - A time since the UNIX epoch after which the record should be considered invalid for the purpose of logging in. + type: int +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add the user 'james' community.general.homectl: name: johnd @@ -195,76 +206,95 @@ EXAMPLES = ''' password: myreallysecurepassword1! state: present disksize: 10G - resize: yes + resize: true - name: Remove an existing user 'janet' community.general.homectl: name: janet state: absent -''' +""" -RETURN = ''' +RETURN = r""" data: - description: A json dictionary returned from C(homectl inspect -j). - returned: success - type: dict - sample: { - "data": { - "binding": { - "e9ed2a5b0033427286b228e97c1e8343": { - "fileSystemType": "btrfs", - "fileSystemUuid": "7bd59491-2812-4642-a492-220c3f0c6c0b", - "gid": 60268, - "imagePath": "/home/james.home", - "luksCipher": "aes", - "luksCipherMode": "xts-plain64", - "luksUuid": "7f05825a-2c38-47b4-90e1-f21540a35a81", - "luksVolumeKeySize": 32, - "partitionUuid": "5a906126-d3c8-4234-b230-8f6e9b427b2f", - "storage": "luks", - "uid": 60268 - } - }, + description: Dictionary returned from C(homectl inspect -j). + returned: success + type: dict + sample: + { + "data": { + "binding": { + "e9ed2a5b0033427286b228e97c1e8343": { + "fileSystemType": "btrfs", + "fileSystemUuid": "7bd59491-2812-4642-a492-220c3f0c6c0b", + "gid": 60268, + "imagePath": "/home/james.home", + "luksCipher": "aes", + "luksCipherMode": "xts-plain64", + "luksUuid": "7f05825a-2c38-47b4-90e1-f21540a35a81", + "luksVolumeKeySize": 32, + "partitionUuid": "5a906126-d3c8-4234-b230-8f6e9b427b2f", + "storage": "luks", + "uid": 60268 + } + }, + "diskSize": 3221225472, + "disposition": "regular", + "lastChangeUSec": 1641941238208691, + "lastPasswordChangeUSec": 1641941238208691, + "privileged": { + "hashedPassword": [ + "$6$ov9AKni.trf76inT$tTtfSyHgbPTdUsG0CvSSQZXGqFGdHKQ9Pb6e0BTZhDmlgrL/vA5BxrXduBi8u/PCBiYUffGLIkGhApjKMK3bV." + ] + }, + "signature": [ + { + "data": "o6zVFbymcmk4YTVaY6KPQK23YCp+VkXdGEeniZeV1pzIbFzoaZBvVLPkNKMoPAQbodY5BYfBtuy41prNL78qAg==", + "key": "-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEAbs7ELeiEYBxkUQhxZ+5NGyu6J7gTtZtZ5vmIw3jowcY=\n-----END PUBLIC KEY-----\n" + } + ], + "status": { + "e9ed2a5b0033427286b228e97c1e8343": { + "diskCeiling": 21845405696, + "diskFloor": 268435456, "diskSize": 3221225472, - "disposition": "regular", - "lastChangeUSec": 1641941238208691, - "lastPasswordChangeUSec": 1641941238208691, - "privileged": { - "hashedPassword": [ - "$6$ov9AKni.trf76inT$tTtfSyHgbPTdUsG0CvSSQZXGqFGdHKQ9Pb6e0BTZhDmlgrL/vA5BxrXduBi8u/PCBiYUffGLIkGhApjKMK3bV." - ] - }, - "signature": [ - { - "data": "o6zVFbymcmk4YTVaY6KPQK23YCp+VkXdGEeniZeV1pzIbFzoaZBvVLPkNKMoPAQbodY5BYfBtuy41prNL78qAg==", - "key": "-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEAbs7ELeiEYBxkUQhxZ+5NGyu6J7gTtZtZ5vmIw3jowcY=\n-----END PUBLIC KEY-----\n" - } - ], - "status": { - "e9ed2a5b0033427286b228e97c1e8343": { - "diskCeiling": 21845405696, - "diskFloor": 268435456, - "diskSize": 3221225472, - "service": "io.systemd.Home", - "signedLocally": true, - "state": "inactive" - } - }, - "userName": "james", - } + "service": "io.systemd.Home", + "signedLocally": true, + "state": "inactive" + } + }, + "userName": "james" + } } -''' +""" -import crypt import json -from ansible.module_utils.basic import AnsibleModule +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.basic import jsonify from ansible.module_utils.common.text.formatters import human_to_bytes +try: + import crypt +except ImportError: + HAS_CRYPT = False + CRYPT_IMPORT_ERROR = traceback.format_exc() +else: + HAS_CRYPT = True + CRYPT_IMPORT_ERROR = None + +try: + import legacycrypt + if not HAS_CRYPT: + crypt = legacycrypt +except ImportError: + HAS_LEGACYCRYPT = False + LEGACYCRYPT_IMPORT_ERROR = traceback.format_exc() +else: + HAS_LEGACYCRYPT = True + LEGACYCRYPT_IMPORT_ERROR = None + class Homectl(object): - '''#TODO DOC STRINGS''' - def __init__(self, module): self.module = module self.state = module.params['state'] @@ -328,7 +358,7 @@ class Homectl(object): cmd = [self.module.get_bin_path('homectl', True)] cmd.append('create') cmd.append('--identity=-') # Read the user record from standard input. - return(self.module.run_command(cmd, data=record)) + return self.module.run_command(cmd, data=record) def _hash_password(self, password): method = crypt.METHOD_SHA512 @@ -385,7 +415,7 @@ class Homectl(object): user_metadata.pop('status', None) # Let last change Usec be updated by homed when command runs. user_metadata.pop('lastChangeUSec', None) - # Now only change fields that are called on leaving whats currently in the record intact. + # Now only change fields that are called on leaving what's currently in the record intact. record = user_metadata record['userName'] = self.name @@ -431,7 +461,7 @@ class Homectl(object): self.result['changed'] = True if self.disksize: - # convert humand readble to bytes + # convert human readable to bytes if self.disksize != record.get('diskSize'): record['diskSize'] = human_to_bytes(self.disksize) self.result['changed'] = True @@ -583,6 +613,12 @@ def main(): ] ) + if not HAS_CRYPT and not HAS_LEGACYCRYPT: + module.fail_json( + msg=missing_required_lib('crypt (part of standard library up to Python 3.12) or legacycrypt (PyPI)'), + exception=CRYPT_IMPORT_ERROR, + ) + homectl = Homectl(module) homectl.result['state'] = homectl.state diff --git a/plugins/modules/monitoring/honeybadger_deployment.py b/plugins/modules/honeybadger_deployment.py similarity index 71% rename from plugins/modules/monitoring/honeybadger_deployment.py rename to plugins/modules/honeybadger_deployment.py index 2e2198e1a3..a5fe8c86f7 100644 --- a/plugins/modules/monitoring/honeybadger_deployment.py +++ b/plugins/modules/honeybadger_deployment.py @@ -1,19 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright 2014 Benjamin Curtis -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: honeybadger_deployment author: "Benjamin Curtis (@stympy)" short_description: Notify Honeybadger.io about app deployments description: - - Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking) + - Notify Honeybadger.io about app deployments (see U(http://docs.honeybadger.io/article/188-deployment-tracking)). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: token: type: str @@ -23,20 +28,20 @@ options: environment: type: str description: - - The environment name, typically 'production', 'staging', etc. + - The environment name, typically V(production), V(staging), and so on. required: true user: type: str description: - - The username of the person doing the deployment + - The username of the person doing the deployment. repo: type: str description: - - URL of the project repository + - URL of the project repository. revision: type: str description: - - A hash, number, tag, or other identifier showing what revision was deployed + - A hash, number, tag, or other identifier showing what revision was deployed. url: type: str description: @@ -44,14 +49,13 @@ options: default: "https://api.honeybadger.io/v1/deploys" validate_certs: description: - - If C(no), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled + sites using self-signed certificates. type: bool - default: 'yes' + default: true +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Notify Honeybadger.io about an app deployment community.general.honeybadger_deployment: token: AAAAAA @@ -59,14 +63,14 @@ EXAMPLES = ''' user: ansible revision: b6826b8 repo: 'git@github.com:user/repo.git' -''' +""" -RETURN = '''# ''' +RETURN = """#""" import traceback +from urllib.parse import urlencode from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url @@ -82,9 +86,9 @@ def main(): token=dict(required=True, no_log=True), environment=dict(required=True), user=dict(required=False), - repo=dict(required=False), - revision=dict(required=False), - url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'), + repo=dict(), + revision=dict(), + url=dict(default='https://api.honeybadger.io/v1/deploys'), validate_certs=dict(default=True, type='bool'), ), supports_check_mode=True diff --git a/plugins/modules/remote_management/hpilo/hpilo_boot.py b/plugins/modules/hpilo_boot.py similarity index 60% rename from plugins/modules/remote_management/hpilo/hpilo_boot.py rename to plugins/modules/hpilo_boot.py index 728f8ffbb1..bf44a4dac4 100644 --- a/plugins/modules/remote_management/hpilo/hpilo_boot.py +++ b/plugins/modules/hpilo_boot.py @@ -1,22 +1,27 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright 2012 Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hpilo_boot author: Dag Wieers (@dagwieers) short_description: Boot system using specific media through HP iLO interface description: -- "This module boots a system through its HP iLO interface. The boot media - can be one of: cdrom, floppy, hdd, network or usb." -- This module requires the hpilo python module. + - 'This module boots a system through its HP iLO interface. The boot media can be one of: V(cdrom), V(floppy), V(hdd), V(network), + or V(usb).' + - This module requires the hpilo python module. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: host: description: @@ -35,50 +40,55 @@ options: type: str media: description: - - The boot media to boot the system from - choices: [ "cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb" ] + - The boot media to boot the system from. + choices: ["cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb"] type: str image: description: - - The URL of a cdrom, floppy or usb boot media image. - protocol://username:password@hostname:port/filename - - protocol is either 'http' or 'https' - - username:password is optional - - port is optional + - The URL of a cdrom, floppy or usb boot media image in the form V(protocol://username:password@hostname:port/filename). + - V(protocol) is either V(http) or V(https). + - V(username:password) is optional. + - V(port) is optional. type: str state: description: - The state of the boot media. - - "no_boot: Do not boot from the device" - - "boot_once: Boot from the device once and then notthereafter" - - "boot_always: Boot from the device each time the server is rebooted" - - "connect: Connect the virtual media device and set to boot_always" - - "disconnect: Disconnects the virtual media device and set to no_boot" - - "poweroff: Power off the server" + - 'V(no_boot): Do not boot from the device.' + - 'V(boot_once): Boot from the device once and then notthereafter.' + - 'V(boot_always): Boot from the device each time the server is rebooted.' + - 'V(connect): Connect the virtual media device and set to boot_always.' + - 'V(disconnect): Disconnects the virtual media device and set to no_boot.' + - 'V(poweroff): Power off the server.' default: boot_once type: str - choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ] + choices: ["boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff"] force: description: - - Whether to force a reboot (even when the system is already booted). - - As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running. - default: no + - Whether to force a reboot (even when the system is already booted). + - As a safeguard, without force, M(community.general.hpilo_boot) refuses to reboot a server that is already running. + default: false type: bool ssl_version: description: - Change the ssl_version used. default: TLSv1 type: str - choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ] + choices: ["SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2"] + idempotent_boot_once: + description: + - This option makes O(state=boot_once) succeed instead of failing when the server is already powered on. + type: bool + default: false + version_added: 10.6.0 requirements: -- python-hpilo + - python-hpilo notes: -- To use a USB key image you need to specify floppy as boot media. -- This module ought to be run from a system that can access the HP iLO - interface directly, either by using C(local_action) or using C(delegate_to). -''' + - To use a USB key image you need to specify floppy as boot media. + - This module ought to be run from a system that can access the HP iLO interface directly, either by using C(local_action) + or using C(delegate_to). +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server community.general.hpilo_boot: host: YOUR_ILO_ADDRESS @@ -96,11 +106,11 @@ EXAMPLES = r''' password: YOUR_ILO_PASSWORD state: poweroff delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" # Default return values -''' +""" import time import traceback @@ -132,6 +142,7 @@ def main(): image=dict(type='str'), state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']), force=dict(type='bool', default=False), + idempotent_boot_once=dict(type='bool', default=False), ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']), ) ) @@ -146,6 +157,7 @@ def main(): image = module.params['image'] state = module.params['state'] force = module.params['force'] + idempotent_boot_once = module.params['idempotent_boot_once'] ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v')) ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version) @@ -181,13 +193,21 @@ def main(): power_status = ilo.get_host_power_status() - if not force and power_status == 'ON': - module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host) - if power_status == 'ON': - ilo.warm_boot_server() -# ilo.cold_boot_server() - changed = True + if not force and not idempotent_boot_once: + # module.deprecate( + # 'The failure of the module when the server is already powered on is being deprecated.' + # ' Please set the parameter "idempotent_boot_once=true" to start using the new behavior.', + # version='11.0.0', + # collection_name='community.general' + # ) + module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host) + elif not force and idempotent_boot_once: + pass + elif force: + ilo.warm_boot_server() + # ilo.cold_boot_server() + changed = True else: ilo.press_pwr_btn() # ilo.reset_server() diff --git a/plugins/modules/remote_management/hpilo/hpilo_info.py b/plugins/modules/hpilo_info.py similarity index 74% rename from plugins/modules/remote_management/hpilo/hpilo_info.py rename to plugins/modules/hpilo_info.py index b0cc0be940..8f2739180d 100644 --- a/plugins/modules/remote_management/hpilo/hpilo_info.py +++ b/plugins/modules/hpilo_info.py @@ -1,29 +1,27 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright 2012 Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: hpilo_info author: Dag Wieers (@dagwieers) short_description: Gather information through an HP iLO interface description: -- This module gathers information on a specific system using its HP iLO interface. - These information includes hardware and network related information useful - for provisioning (e.g. macaddress, uuid). -- This module requires the C(hpilo) python module. -- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.hpilo_info) module no longer returns C(ansible_facts)! + - This module gathers information on a specific system using its HP iLO interface. These information includes hardware and + network related information useful for provisioning (for example macaddress, uuid). + - This module requires the C(hpilo) python module. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module options: host: description: - - The HP iLO hostname/address that is linked to the physical system. + - The HP iLO hostname/address that is linked to the physical system. type: str required: true login: @@ -41,15 +39,15 @@ options: - Change the ssl_version used. default: TLSv1 type: str - choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ] + choices: ["SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2"] requirements: -- hpilo + - hpilo notes: -- This module ought to be run from a system that can access the HP iLO - interface directly, either by using C(local_action) or using C(delegate_to). -''' + - This module ought to be run from a system that can access the HP iLO interface directly, either by using C(local_action) + or using C(delegate_to). +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts from a HP iLO interface only if the system is an HP server community.general.hpilo_info: host: YOUR_ILO_ADDRESS @@ -62,71 +60,71 @@ EXAMPLES = r''' - ansible.builtin.fail: msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !' when: cmdb_serialno != results.hw_system_serial -''' +""" -RETURN = r''' +RETURN = r""" # Typical output of HP iLO_info for a physical system hw_bios_date: - description: BIOS date - returned: always - type: str - sample: 05/05/2011 + description: BIOS date. + returned: always + type: str + sample: 05/05/2011 hw_bios_version: - description: BIOS version - returned: always - type: str - sample: P68 + description: BIOS version. + returned: always + type: str + sample: P68 hw_ethX: - description: Interface information (for each interface) - returned: always - type: dict - sample: - - macaddress: 00:11:22:33:44:55 - macaddress_dash: 00-11-22-33-44-55 + description: Interface information (for each interface). + returned: always + type: dict + sample: + - macaddress: 00:11:22:33:44:55 + macaddress_dash: 00-11-22-33-44-55 hw_eth_ilo: - description: Interface information (for the iLO network interface) - returned: always - type: dict - sample: - - macaddress: 00:11:22:33:44:BA - - macaddress_dash: 00-11-22-33-44-BA + description: Interface information (for the iLO network interface). + returned: always + type: dict + sample: + - macaddress: 00:11:22:33:44:BA + - macaddress_dash: 00-11-22-33-44-BA hw_product_name: - description: Product name - returned: always - type: str - sample: ProLiant DL360 G7 + description: Product name. + returned: always + type: str + sample: ProLiant DL360 G7 hw_product_uuid: - description: Product UUID - returned: always - type: str - sample: ef50bac8-2845-40ff-81d9-675315501dac + description: Product UUID. + returned: always + type: str + sample: ef50bac8-2845-40ff-81d9-675315501dac hw_system_serial: - description: System serial number - returned: always - type: str - sample: ABC12345D6 + description: System serial number. + returned: always + type: str + sample: ABC12345D6 hw_uuid: - description: Hardware UUID - returned: always - type: str - sample: 123456ABC78901D2 + description: Hardware UUID. + returned: always + type: str + sample: 123456ABC78901D2 host_power_status: - description: - - Power status of host. - - Will be one of C(ON), C(OFF) and C(UNKNOWN). - returned: always - type: str - sample: ON - version_added: 3.5.0 -''' + description: + - Power status of host. + - It is one of V(ON), V(OFF) and V(UNKNOWN). + returned: always + type: str + sample: "ON" + version_added: 3.5.0 +""" import re import traceback diff --git a/plugins/modules/remote_management/hpilo/hponcfg.py b/plugins/modules/hponcfg.py similarity index 55% rename from plugins/modules/remote_management/hpilo/hponcfg.py rename to plugins/modules/hponcfg.py index 98d11dd8b9..a17a905916 100644 --- a/plugins/modules/remote_management/hpilo/hponcfg.py +++ b/plugins/modules/hponcfg.py @@ -1,49 +1,54 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2012, Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2012, Dag Wieers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: hponcfg author: Dag Wieers (@dagwieers) -short_description: Configure HP iLO interface using hponcfg +short_description: Configure HP iLO interface using C(hponcfg) description: - - This modules configures the HP iLO interface using hponcfg. + - This modules configures the HP iLO interface using C(hponcfg). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: path: description: - - The XML file as accepted by hponcfg. + - The XML file as accepted by C(hponcfg). required: true aliases: ['src'] type: path minfw: description: - - The minimum firmware level needed. + - The minimum firmware level needed. required: false type: str executable: description: - - Path to the hponcfg executable (`hponcfg` which uses $PATH). + - Path to the hponcfg executable (C(hponcfg) which uses E(PATH)). default: hponcfg type: str verbose: description: - - Run hponcfg in verbose mode (-v). - default: no + - Run C(hponcfg) in verbose mode (-v). + default: false type: bool requirements: - - hponcfg tool + - hponcfg tool notes: - - You need a working hponcfg on the target system. -''' + - You need a working C(hponcfg) on the target system. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Example hponcfg configuration XML ansible.builtin.copy: content: | @@ -70,14 +75,13 @@ EXAMPLES = r''' community.general.hponcfg: src: /tmp/enable-ssh.xml executable: /opt/hp/tools/hponcfg -''' +""" -from ansible_collections.community.general.plugins.module_utils.module_helper import ( - CmdModuleHelper, ArgFormat -) +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper -class HPOnCfg(CmdModuleHelper): +class HPOnCfg(ModuleHelper): module = dict( argument_spec=dict( src=dict(type='path', required=True, aliases=['path']), @@ -87,19 +91,22 @@ class HPOnCfg(CmdModuleHelper): ) ) command_args_formats = dict( - src=dict(fmt=["-f", "{0}"]), - verbose=dict(fmt="-v", style=ArgFormat.BOOLEAN), - minfw=dict(fmt=["-m", "{0}"]), + src=cmd_runner_fmt.as_opt_val("-f"), + verbose=cmd_runner_fmt.as_bool("-v"), + minfw=cmd_runner_fmt.as_opt_val("-m"), ) - check_rc = True - - def __init_module__(self): - self.command = self.vars.executable - # Consider every action a change (not idempotent yet!) - self.changed = True def __run__(self): - self.run_command(params=['src', 'verbose', 'minfw']) + runner = CmdRunner( + self.module, + self.vars.executable, + self.command_args_formats, + check_rc=True, + ) + runner(['src', 'verbose', 'minfw']).run() + + # Consider every action a change (not idempotent yet!) + self.changed = True def main(): diff --git a/plugins/modules/web_infrastructure/htpasswd.py b/plugins/modules/htpasswd.py similarity index 53% rename from plugins/modules/web_infrastructure/htpasswd.py rename to plugins/modules/htpasswd.py index 2eebdfd5b8..d0e0941601 100644 --- a/plugins/modules/web_infrastructure/htpasswd.py +++ b/plugins/modules/htpasswd.py @@ -1,73 +1,84 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, Nimbis Services, Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, Nimbis Services, Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: htpasswd -short_description: manage user files for basic authentication +short_description: Manage user files for basic authentication description: - Add and remove username/password entries in a password file using htpasswd. - This is used by web servers such as Apache and Nginx for basic authentication. +attributes: + check_mode: + support: full + diff_mode: + support: none options: path: type: path required: true - aliases: [ dest, destfile ] + aliases: [dest, destfile] description: - - Path to the file that contains the usernames and passwords + - Path to the file that contains the usernames and passwords. name: type: str required: true - aliases: [ username ] + aliases: [username] description: - - User name to add or remove + - User name to add or remove. password: type: str required: false description: - Password associated with user. - Must be specified if user does not exist yet. - crypt_scheme: + hash_scheme: type: str required: false default: "apr_md5_crypt" description: - - Encryption scheme to be used. As well as the four choices listed - here, you can also use any other hash supported by passlib, such as - md5_crypt and sha256_crypt, which are linux passwd hashes. If you - do so the password file will not be compatible with Apache or Nginx - - 'Some of the available choices might be: C(apr_md5_crypt), C(des_crypt), C(ldap_sha1), C(plaintext)' + - Hashing scheme to be used. As well as the four choices listed here, you can also use any other hash supported by passlib, + such as V(portable_apache22) and V(host_apache24); or V(md5_crypt) and V(sha256_crypt), which are Linux passwd hashes. + Only some schemes in addition to the four choices below are compatible with Apache or Nginx, and supported schemes + depend on C(passlib) version and its dependencies. + - See U(https://passlib.readthedocs.io/en/stable/lib/passlib.apache.html#passlib.apache.HtpasswdFile) parameter C(default_scheme). + - 'Some of the available choices might be: V(apr_md5_crypt), V(des_crypt), V(ldap_sha1), V(plaintext).' + - 'B(WARNING): The module has no mechanism to determine the O(hash_scheme) of an existing entry, therefore, it does + not detect whether the O(hash_scheme) has changed. If you want to change the scheme, you must remove the existing + entry and then create a new one using the new scheme.' + aliases: [crypt_scheme] state: type: str required: false - choices: [ present, absent ] + choices: [present, absent] default: "present" description: - - Whether the user entry should be present or not + - Whether the user entry should be present or not. create: required: false type: bool - default: "yes" + default: true description: - - Used with C(state=present). If specified, the file will be created - if it does not already exist. If set to "no", will fail if the - file does not exist + - Used with O(state=present). If V(true), the file is created if it does not exist. Conversely, if set to V(false) and + the file does not exist, it fails. notes: - - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems." - - "On Debian, Ubuntu, or Fedora: install I(python-passlib)." - - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)." -requirements: [ passlib>=1.6 ] + - This module depends on the C(passlib) Python library, which needs to be installed on all target systems. + - 'On Debian < 11, Ubuntu <= 20.04, or Fedora: install C(python-passlib).' + - 'On Debian, Ubuntu: install C(python3-passlib).' + - 'On RHEL or CentOS: Enable EPEL, then install C(python-passlib).' +requirements: [passlib>=1.6] author: "Ansible Core Team" -extends_documentation_fragment: files -''' +extends_documentation_fragment: + - files + - community.general.attributes +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Add a user to a password file and ensure permissions are set community.general.htpasswd: path: /etc/nginx/passwdfile @@ -75,7 +86,7 @@ EXAMPLES = """ password: '9s36?;fyNp' owner: root group: www-data - mode: 0640 + mode: '0640' - name: Remove a user from a password file community.general.htpasswd: @@ -88,28 +99,22 @@ EXAMPLES = """ path: /etc/mail/passwords name: alex password: oedu2eGh - crypt_scheme: md5_crypt + hash_scheme: md5_crypt """ import os import tempfile -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils import deps from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -PASSLIB_IMP_ERR = None -try: +with deps.declare("passlib"): from passlib.apache import HtpasswdFile, htpasswd_context from passlib.context import CryptContext - import passlib -except ImportError: - PASSLIB_IMP_ERR = traceback.format_exc() - passlib_installed = False -else: - passlib_installed = True + apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] @@ -120,50 +125,34 @@ def create_missing_directories(dest): os.makedirs(destpath) -def present(dest, username, password, crypt_scheme, create, check_mode): +def present(dest, username, password, hash_scheme, create, check_mode): """ Ensures user is present Returns (msg, changed) """ - if crypt_scheme in apache_hashes: + if hash_scheme in apache_hashes: context = htpasswd_context else: - context = CryptContext(schemes=[crypt_scheme] + apache_hashes) + context = CryptContext(schemes=[hash_scheme] + apache_hashes) if not os.path.exists(dest): if not create: raise ValueError('Destination %s does not exist' % dest) if check_mode: return ("Create %s" % dest, True) create_missing_directories(dest) - if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): - ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context) - else: - ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context) - if getattr(ht, 'set_password', None): - ht.set_password(username, password) - else: - ht.update(username, password) + ht = HtpasswdFile(dest, new=True, default_scheme=hash_scheme, context=context) + ht.set_password(username, password) ht.save() return ("Created %s and added %s" % (dest, username), True) else: - if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): - ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context) - else: - ht = HtpasswdFile(dest, default=crypt_scheme, context=context) + ht = HtpasswdFile(dest, new=False, default_scheme=hash_scheme, context=context) - found = None - if getattr(ht, 'check_password', None): - found = ht.check_password(username, password) - else: - found = ht.verify(username, password) + found = ht.check_password(username, password) if found: return ("%s already present" % username, False) else: if not check_mode: - if getattr(ht, 'set_password', None): - ht.set_password(username, password) - else: - ht.update(username, password) + ht.set_password(username, password) ht.save() return ("Add/update %s" % username, True) @@ -172,10 +161,7 @@ def absent(dest, username, check_mode): """ Ensures user is absent Returns (msg, changed) """ - if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): - ht = HtpasswdFile(dest, new=False) - else: - ht = HtpasswdFile(dest) + ht = HtpasswdFile(dest, new=False) if username not in ht.users(): return ("%s not present" % username, False) @@ -203,9 +189,9 @@ def main(): arg_spec = dict( path=dict(type='path', required=True, aliases=["dest", "destfile"]), name=dict(type='str', required=True, aliases=["username"]), - password=dict(type='str', required=False, default=None, no_log=True), - crypt_scheme=dict(type='str', required=False, default="apr_md5_crypt"), - state=dict(type='str', required=False, default="present", choices=["present", "absent"]), + password=dict(type='str', no_log=True), + hash_scheme=dict(type='str', default="apr_md5_crypt", aliases=["crypt_scheme"]), + state=dict(type='str', default="present", choices=["present", "absent"]), create=dict(type='bool', default=True), ) @@ -216,25 +202,18 @@ def main(): path = module.params['path'] username = module.params['name'] password = module.params['password'] - crypt_scheme = module.params['crypt_scheme'] + hash_scheme = module.params['hash_scheme'] state = module.params['state'] create = module.params['create'] check_mode = module.check_mode - if not passlib_installed: - module.fail_json(msg=missing_required_lib("passlib"), exception=PASSLIB_IMP_ERR) + deps.validate(module) + # TODO double check if this hack below is still needed. # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error. try: - f = open(path, "r") - except IOError: - # No preexisting file to remove blank lines from - f = None - else: - try: + with open(path, "r") as f: lines = f.readlines() - finally: - f.close() # If the file gets edited, it returns true, so only edit the file if it has blank lines strip = False @@ -248,24 +227,26 @@ def main(): if check_mode: temp = tempfile.NamedTemporaryFile() path = temp.name - f = open(path, "w") - try: - [f.write(line) for line in lines if line.strip()] - finally: - f.close() + with open(path, "w") as f: + f.writelines(line for line in lines if line.strip()) + + except IOError: + # No preexisting file to remove blank lines from + pass try: if state == 'present': - (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode) + (msg, changed) = present(path, username, password, hash_scheme, create, check_mode) elif state == 'absent': if not os.path.exists(path): - module.exit_json(msg="%s not present" % username, - warnings="%s does not exist" % path, changed=False) + module.warn("%s does not exist" % path) + module.exit_json(msg="%s not present" % username, changed=False) (msg, changed) = absent(path, username, check_mode) else: module.fail_json(msg="Invalid state: %s" % state) + return # needed to make pylint happy - check_file_attrs(module, changed, msg) + (msg, changed) = check_file_attrs(module, changed, msg) module.exit_json(msg=msg, changed=changed) except Exception as e: module.fail_json(msg=to_native(e)) diff --git a/plugins/modules/cloud/huawei/hwc_ecs_instance.py b/plugins/modules/hwc_ecs_instance.py similarity index 72% rename from plugins/modules/cloud/huawei/hwc_ecs_instance.py rename to plugins/modules/hwc_ecs_instance.py index 3d4ba84b64..610cd8b872 100644 --- a/plugins/modules/cloud/huawei/hwc_ecs_instance.py +++ b/plugins/modules/hwc_ecs_instance.py @@ -1,234 +1,216 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_ecs_instance description: - - instance management. + - Instance management. short_description: Creates a resource of Ecs/Instance in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '30m' - update: - description: - - The timeouts for update operation. - type: str - default: '30m' - delete: - description: - - The timeouts for delete operation. - type: str - default: '30m' - availability_zone: - description: - - Specifies the name of the AZ where the ECS is located. - type: str - required: true - flavor_name: - description: - - Specifies the name of the system flavor. - type: str - required: true - image_id: - description: - - Specifies the ID of the system image. - type: str - required: true - name: - description: - - Specifies the ECS name. Value requirements consists of 1 to 64 - characters, including letters, digits, underscores C(_), hyphens - (-), periods (.). - type: str - required: true - nics: - description: - - Specifies the NIC information of the ECS. Constraints the - network of the NIC must belong to the VPC specified by vpc_id. A - maximum of 12 NICs can be attached to an ECS. - type: list - elements: dict - required: true - suboptions: - ip_address: - description: - - Specifies the IP address of the NIC. The value is an IPv4 - address. Its value must be an unused IP - address in the network segment of the subnet. - type: str - required: true - subnet_id: - description: - - Specifies the ID of subnet. - type: str - required: true - root_volume: - description: - - Specifies the configuration of the ECS's system disks. - type: dict - required: true - suboptions: - volume_type: - description: - - Specifies the ECS system disk type. - - SATA is common I/O disk type. - - SAS is high I/O disk type. - - SSD is ultra-high I/O disk type. - - co-p1 is high I/O (performance-optimized I) disk type. - - uh-l1 is ultra-high I/O (latency-optimized) disk type. - - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 - disks. For other ECSs, do not use co-p1 or uh-l1 disks. - type: str - required: true - size: - description: - - Specifies the system disk size, in GB. The value range is - 1 to 1024. The system disk size must be - greater than or equal to the minimum system disk size - supported by the image (min_disk attribute of the image). - If this parameter is not specified or is set to 0, the - default system disk size is the minimum value of the - system disk in the image (min_disk attribute of the - image). - type: int - required: false - snapshot_id: - description: - - Specifies the snapshot ID or ID of the original data disk - contained in the full-ECS image. - type: str - required: false - vpc_id: - description: - - Specifies the ID of the VPC to which the ECS belongs. - type: str - required: true - admin_pass: - description: - - Specifies the initial login password of the administrator account - for logging in to an ECS using password authentication. The Linux - administrator is root, and the Windows administrator is - Administrator. Password complexity requirements, consists of 8 to - 26 characters. The password must contain at least three of the - following character types 'uppercase letters, lowercase letters, - digits, and special characters (!@$%^-_=+[{}]:,./?)'. The password - cannot contain the username or the username in reverse. The - Windows ECS password cannot contain the username, the username in - reverse, or more than two consecutive characters in the username. - type: str - required: false - data_volumes: - description: - - Specifies the data disks of ECS instance. - type: list - elements: dict - required: false - suboptions: - volume_id: - description: - - Specifies the disk ID. - type: str - required: true - device: - description: - - Specifies the disk device name. - type: str - required: false + state: description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Specifies the description of an ECS, which is a null string by - default. Can contain a maximum of 85 characters. Cannot contain - special characters, such as < and >. + - The timeouts for create operation. + type: str + default: '30m' + update: + description: + - The timeouts for update operation. + type: str + default: '30m' + delete: + description: + - The timeouts for delete operation. + type: str + default: '30m' + availability_zone: + description: + - Specifies the name of the AZ where the ECS is located. + type: str + required: true + flavor_name: + description: + - Specifies the name of the system flavor. + type: str + required: true + image_id: + description: + - Specifies the ID of the system image. + type: str + required: true + name: + description: + - Specifies the ECS name. Value requirements consists of 1 to 64 characters, including letters, digits, underscores + (V(_)), hyphens (V(-)), periods (V(.)). + type: str + required: true + nics: + description: + - Specifies the NIC information of the ECS. Constraints the network of the NIC must belong to the VPC specified by vpc_id. + A maximum of 12 NICs can be attached to an ECS. + type: list + elements: dict + required: true + suboptions: + ip_address: + description: + - Specifies the IP address of the NIC. The value is an IPv4 address. Its value must be an unused IP address in the + network segment of the subnet. + type: str + required: true + subnet_id: + description: + - Specifies the ID of subnet. + type: str + required: true + root_volume: + description: + - Specifies the configuration of the ECS's system disks. + type: dict + required: true + suboptions: + volume_type: + description: + - Specifies the ECS system disk type. + - SATA is common I/O disk type. + - SAS is high I/O disk type. + - SSD is ultra-high I/O disk type. + - Co-p1 is high I/O (performance-optimized I) disk type. + - Uh-l1 is ultra-high I/O (latency-optimized) disk type. + - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 disks. For other ECSs, do not use co-p1 or uh-l1 disks. + type: str + required: true + size: + description: + - Specifies the system disk size, in GB. The value range is 1 to 1024. The system disk size must be greater than + or equal to the minimum system disk size supported by the image (min_disk attribute of the image). If this parameter + is not specified or is set to 0, the default system disk size is the minimum value of the system disk in the image + (min_disk attribute of the image). + type: int + required: false + snapshot_id: + description: + - Specifies the snapshot ID or ID of the original data disk contained in the full-ECS image. type: str required: false - eip_id: + vpc_id: + description: + - Specifies the ID of the VPC to which the ECS belongs. + type: str + required: true + admin_pass: + description: + - Specifies the initial login password of the administrator account for logging in to an ECS using password authentication. + The Linux administrator is root, and the Windows administrator is Administrator. Password complexity requirements, + consists of 8 to 26 characters. The password must contain at least three of the following character types 'uppercase + letters, lowercase letters, digits, and special characters (V(!@$%^-_=+[{}]:,./?))'. The password cannot contain the + username or the username in reverse. The Windows ECS password cannot contain the username, the username in reverse, + or more than two consecutive characters in the username. + type: str + required: false + data_volumes: + description: + - Specifies the data disks of ECS instance. + type: list + elements: dict + required: false + suboptions: + volume_id: description: - - Specifies the ID of the elastic IP address assigned to the ECS. - Only elastic IP addresses in the DOWN state can be - assigned. - type: str - required: false - enable_auto_recovery: - description: - - Specifies whether automatic recovery is enabled on the ECS. - type: bool - required: false - enterprise_project_id: - description: - - Specifies the ID of the enterprise project to which the ECS - belongs. - type: str - required: false - security_groups: - description: - - Specifies the security groups of the ECS. If this - parameter is left blank, the default security group is bound to - the ECS by default. - type: list - elements: str - required: false - server_metadata: - description: - - Specifies the metadata of ECS to be created. - type: dict - required: false - server_tags: - description: - - Specifies the tags of an ECS. When you create ECSs, one ECS - supports up to 10 tags. - type: dict - required: false - ssh_key_name: - description: - - Specifies the name of the SSH key used for logging in to the ECS. - type: str - required: false - user_data: - description: - - Specifies the user data to be injected during the ECS creation - process. Text, text files, and gzip files can be injected. - The content to be injected must be encoded with - base64. The maximum size of the content to be injected (before - encoding) is 32 KB. For Linux ECSs, this parameter does not take - effect when adminPass is used. + - Specifies the disk ID. + type: str + required: true + device: + description: + - Specifies the disk device name. type: str required: false + description: + description: + - Specifies the description of an ECS, which is a null string by default. Can contain a maximum of 85 characters. Cannot + contain special characters, such as V(<) and V(>). + type: str + required: false + eip_id: + description: + - Specifies the ID of the elastic IP address assigned to the ECS. Only elastic IP addresses in the DOWN state can be + assigned. + type: str + required: false + enable_auto_recovery: + description: + - Specifies whether automatic recovery is enabled on the ECS. + type: bool + required: false + enterprise_project_id: + description: + - Specifies the ID of the enterprise project to which the ECS belongs. + type: str + required: false + security_groups: + description: + - Specifies the security groups of the ECS. If this parameter is left blank, the default security group is bound to + the ECS by default. + type: list + elements: str + required: false + server_metadata: + description: + - Specifies the metadata of ECS to be created. + type: dict + required: false + server_tags: + description: + - Specifies the tags of an ECS. When you create ECSs, one ECS supports up to 10 tags. + type: dict + required: false + ssh_key_name: + description: + - Specifies the name of the SSH key used for logging in to the ECS. + type: str + required: false + user_data: + description: + - Specifies the user data to be injected during the ECS creation process. Text, text files, and gzip files can be injected. + The content to be injected must be encoded with base64. The maximum size of the content to be injected (before encoding) + is 32 KB. For Linux ECSs, this parameter does not take effect when adminPass is used. + type: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create an ecs instance - name: Create a vpc hwc_network_vpc: @@ -278,238 +260,216 @@ EXAMPLES = ''' vpc_id: "{{ vpc.id }}" root_volume: volume_type: "SAS" -''' +""" -RETURN = ''' - availability_zone: - description: - - Specifies the name of the AZ where the ECS is located. - type: str - returned: success - flavor_name: - description: - - Specifies the name of the system flavor. - type: str - returned: success - image_id: - description: - - Specifies the ID of the system image. - type: str - returned: success - name: - description: - - Specifies the ECS name. Value requirements "Consists of 1 to 64 - characters, including letters, digits, underscores C(_), hyphens - (-), periods (.)". - type: str - returned: success - nics: - description: - - Specifies the NIC information of the ECS. The - network of the NIC must belong to the VPC specified by vpc_id. A - maximum of 12 NICs can be attached to an ECS. - type: list - returned: success - contains: - ip_address: - description: - - Specifies the IP address of the NIC. The value is an IPv4 - address. Its value must be an unused IP - address in the network segment of the subnet. - type: str - returned: success - subnet_id: - description: - - Specifies the ID of subnet. - type: str - returned: success - port_id: - description: - - Specifies the port ID corresponding to the IP address. - type: str - returned: success - root_volume: - description: - - Specifies the configuration of the ECS's system disks. - type: dict - returned: success - contains: - volume_type: - description: - - Specifies the ECS system disk type. - - SATA is common I/O disk type. - - SAS is high I/O disk type. - - SSD is ultra-high I/O disk type. - - co-p1 is high I/O (performance-optimized I) disk type. - - uh-l1 is ultra-high I/O (latency-optimized) disk type. - - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 - disks. For other ECSs, do not use co-p1 or uh-l1 disks. - type: str - returned: success - size: - description: - - Specifies the system disk size, in GB. The value range is - 1 to 1024. The system disk size must be - greater than or equal to the minimum system disk size - supported by the image (min_disk attribute of the image). - If this parameter is not specified or is set to 0, the - default system disk size is the minimum value of the - system disk in the image (min_disk attribute of the - image). - type: int - returned: success - snapshot_id: - description: - - Specifies the snapshot ID or ID of the original data disk - contained in the full-ECS image. - type: str - returned: success - device: - description: - - Specifies the disk device name. - type: str - returned: success - volume_id: - description: - - Specifies the disk ID. - type: str - returned: success - vpc_id: - description: - - Specifies the ID of the VPC to which the ECS belongs. - type: str - returned: success - admin_pass: - description: - - Specifies the initial login password of the administrator account - for logging in to an ECS using password authentication. The Linux - administrator is root, and the Windows administrator is - Administrator. Password complexity requirements consists of 8 to - 26 characters. The password must contain at least three of the - following character types "uppercase letters, lowercase letters, - digits, and special characters (!@$%^-_=+[{}]:,./?)". The password - cannot contain the username or the username in reverse. The - Windows ECS password cannot contain the username, the username in - reverse, or more than two consecutive characters in the username. - type: str - returned: success - data_volumes: - description: - - Specifies the data disks of ECS instance. - type: list - returned: success - contains: - volume_id: - description: - - Specifies the disk ID. - type: str - returned: success - device: - description: - - Specifies the disk device name. - type: str - returned: success - description: - description: - - Specifies the description of an ECS, which is a null string by - default. Can contain a maximum of 85 characters. Cannot contain - special characters, such as < and >. - type: str - returned: success - eip_id: - description: - - Specifies the ID of the elastic IP address assigned to the ECS. - Only elastic IP addresses in the DOWN state can be assigned. - type: str - returned: success - enable_auto_recovery: - description: - - Specifies whether automatic recovery is enabled on the ECS. - type: bool - returned: success - enterprise_project_id: - description: - - Specifies the ID of the enterprise project to which the ECS - belongs. - type: str - returned: success - security_groups: - description: - - Specifies the security groups of the ECS. If this parameter is left - blank, the default security group is bound to the ECS by default. - type: list - returned: success - server_metadata: - description: - - Specifies the metadata of ECS to be created. - type: dict - returned: success - server_tags: - description: - - Specifies the tags of an ECS. When you create ECSs, one ECS - supports up to 10 tags. - type: dict - returned: success - ssh_key_name: - description: - - Specifies the name of the SSH key used for logging in to the ECS. - type: str - returned: success - user_data: - description: - - Specifies the user data to be injected during the ECS creation - process. Text, text files, and gzip files can be injected. - The content to be injected must be encoded with base64. The maximum - size of the content to be injected (before encoding) is 32 KB. For - Linux ECSs, this parameter does not take effect when adminPass is - used. - type: str - returned: success - config_drive: - description: - - Specifies the configuration driver. - type: str - returned: success - created: - description: - - Specifies the time when an ECS was created. - type: str - returned: success - disk_config_type: - description: - - Specifies the disk configuration type. MANUAL is The image - space is not expanded. AUTO is the image space of the system disk - will be expanded to be as same as the flavor. - type: str - returned: success - host_name: - description: - - Specifies the host name of the ECS. - type: str - returned: success - image_name: - description: - - Specifies the image name of the ECS. - type: str - returned: success - power_state: - description: - - Specifies the power status of the ECS. - type: int - returned: success - server_alias: - description: - - Specifies the ECS alias. - type: str - returned: success - status: - description: - - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT, - REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, ERROR, - and DELETED. - type: str - returned: success -''' +RETURN = r""" +availability_zone: + description: + - Specifies the name of the AZ where the ECS is located. + type: str + returned: success +flavor_name: + description: + - Specifies the name of the system flavor. + type: str + returned: success +image_id: + description: + - Specifies the ID of the system image. + type: str + returned: success +name: + description: + - Specifies the ECS name. Value requirements "Consists of 1 to 64 characters, including letters, digits, underscores (V(_)), + hyphens (V(-)), periods (V(.)).". + type: str + returned: success +nics: + description: + - Specifies the NIC information of the ECS. The network of the NIC must belong to the VPC specified by vpc_id. A maximum + of 12 NICs can be attached to an ECS. + type: list + returned: success + contains: + ip_address: + description: + - Specifies the IP address of the NIC. The value is an IPv4 address. Its value must be an unused IP address in the + network segment of the subnet. + type: str + returned: success + subnet_id: + description: + - Specifies the ID of subnet. + type: str + returned: success + port_id: + description: + - Specifies the port ID corresponding to the IP address. + type: str + returned: success +root_volume: + description: + - Specifies the configuration of the ECS's system disks. + type: dict + returned: success + contains: + volume_type: + description: + - Specifies the ECS system disk type. + - SATA is common I/O disk type. + - SAS is high I/O disk type. + - SSD is ultra-high I/O disk type. + - Co-p1 is high I/O (performance-optimized I) disk type. + - Uh-l1 is ultra-high I/O (latency-optimized) disk type. + - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 disks. For other ECSs, do not use co-p1 or uh-l1 disks. + type: str + returned: success + size: + description: + - Specifies the system disk size, in GB. The value range is 1 to 1024. The system disk size must be greater than or + equal to the minimum system disk size supported by the image (min_disk attribute of the image). If this parameter + is not specified or is set to 0, the default system disk size is the minimum value of the system disk in the image + (min_disk attribute of the image). + type: int + returned: success + snapshot_id: + description: + - Specifies the snapshot ID or ID of the original data disk contained in the full-ECS image. + type: str + returned: success + device: + description: + - Specifies the disk device name. + type: str + returned: success + volume_id: + description: + - Specifies the disk ID. + type: str + returned: success +vpc_id: + description: + - Specifies the ID of the VPC to which the ECS belongs. + type: str + returned: success +admin_pass: + description: + - Specifies the initial login password of the administrator account for logging in to an ECS using password authentication. + The Linux administrator is root, and the Windows administrator is Administrator. Password complexity requirements consists + of 8 to 26 characters. The password must contain at least three of the following character types "uppercase letters, + lowercase letters, digits, and special characters (!@$%^-_=+[{}]:,./?)". The password cannot contain the username or + the username in reverse. The Windows ECS password cannot contain the username, the username in reverse, or more than + two consecutive characters in the username. + type: str + returned: success +data_volumes: + description: + - Specifies the data disks of ECS instance. + type: list + returned: success + contains: + volume_id: + description: + - Specifies the disk ID. + type: str + returned: success + device: + description: + - Specifies the disk device name. + type: str + returned: success +description: + description: + - Specifies the description of an ECS, which is a null string by default. Can contain a maximum of 85 characters. Cannot + contain special characters, such as < and >. + type: str + returned: success +eip_id: + description: + - Specifies the ID of the elastic IP address assigned to the ECS. Only elastic IP addresses in the DOWN state can be assigned. + type: str + returned: success +enable_auto_recovery: + description: + - Specifies whether automatic recovery is enabled on the ECS. + type: bool + returned: success +enterprise_project_id: + description: + - Specifies the ID of the enterprise project to which the ECS belongs. + type: str + returned: success +security_groups: + description: + - Specifies the security groups of the ECS. If this parameter is left blank, the default security group is bound to the + ECS by default. + type: list + returned: success +server_metadata: + description: + - Specifies the metadata of ECS to be created. + type: dict + returned: success +server_tags: + description: + - Specifies the tags of an ECS. When you create ECSs, one ECS supports up to 10 tags. + type: dict + returned: success +ssh_key_name: + description: + - Specifies the name of the SSH key used for logging in to the ECS. + type: str + returned: success +user_data: + description: + - Specifies the user data to be injected during the ECS creation process. Text, text files, and gzip files can be injected. + The content to be injected must be encoded with base64. The maximum size of the content to be injected (before encoding) + is 32 KB. For Linux ECSs, this parameter does not take effect when adminPass is used. + type: str + returned: success +config_drive: + description: + - Specifies the configuration driver. + type: str + returned: success +created: + description: + - Specifies the time when an ECS was created. + type: str + returned: success +disk_config_type: + description: + - Specifies the disk configuration type. MANUAL is The image space is not expanded. AUTO is the image space of the system + disk is expanded to be as same as the flavor. + type: str + returned: success +host_name: + description: + - Specifies the host name of the ECS. + type: str + returned: success +image_name: + description: + - Specifies the image name of the ECS. + type: str + returned: success +power_state: + description: + - Specifies the power status of the ECS. + type: int + returned: success +server_alias: + description: + - Specifies the ECS alias. + type: str + returned: success +status: + description: + - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT, REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, + ERROR, and DELETED. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, @@ -1156,8 +1116,7 @@ def send_delete_volume_request(module, params, client, info): path_parameters = { "volume_id": ["volume_id"], } - data = dict((key, navigate_value(info, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(info, path) for key, path in path_parameters.items()} url = build_path(module, "cloudservers/{id}/detachvolume/{volume_id}", data) diff --git a/plugins/modules/cloud/huawei/hwc_evs_disk.py b/plugins/modules/hwc_evs_disk.py similarity index 67% rename from plugins/modules/cloud/huawei/hwc_evs_disk.py rename to plugins/modules/hwc_evs_disk.py index 4aec1b94db..0963736ec2 100644 --- a/plugins/modules/cloud/huawei/hwc_evs_disk.py +++ b/plugins/modules/hwc_evs_disk.py @@ -1,159 +1,144 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_evs_disk description: - - block storage management. + - Block storage management. short_description: Creates a resource of Evs/Disk in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huaweicloud Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '30m' - update: - description: - - The timeouts for update operation. - type: str - default: '30m' - delete: - description: - - The timeouts for delete operation. - type: str - default: '30m' - availability_zone: - description: - - Specifies the AZ where you want to create the disk. - type: str - required: true - name: - description: - - Specifies the disk name. The value can contain a maximum of 255 - bytes. - type: str - required: true - volume_type: - description: - - Specifies the disk type. Currently, the value can be SSD, SAS, or - SATA. - - SSD specifies the ultra-high I/O disk type. - - SAS specifies the high I/O disk type. - - SATA specifies the common I/O disk type. - - If the specified disk type is not available in the AZ, the - disk will fail to create. If the EVS disk is created from a - snapshot, the volume_type field must be the same as that of the - snapshot's source disk. - type: str - required: true - backup_id: - description: - - Specifies the ID of the backup that can be used to create a disk. - This parameter is mandatory when you use a backup to create the - disk. - type: str - required: false + state: description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Specifies the disk description. The value can contain a maximum - of 255 bytes. + - The timeouts for create operation. type: str - required: false - enable_full_clone: + default: '30m' + update: description: - - If the disk is created from a snapshot and linked cloning needs - to be used, set this parameter to True. - type: bool - required: false - enable_scsi: - description: - - If this parameter is set to True, the disk device type will be - SCSI, which allows ECS OSs to directly access underlying storage - media. SCSI reservation command is supported. If this parameter - is set to False, the disk device type will be VBD, which supports - only simple SCSI read/write commands. - - If parameter enable_share is set to True and this parameter - is not specified, shared SCSI disks are created. SCSI EVS disks - cannot be created from backups, which means that this parameter - cannot be True if backup_id has been specified. - type: bool - required: false - enable_share: - description: - - Specifies whether the disk is shareable. The default value is - False. - type: bool - required: false - encryption_id: - description: - - Specifies the encryption ID. The length of it fixes at 36 bytes. + - The timeouts for update operation. type: str - required: false - enterprise_project_id: + default: '30m' + delete: description: - - Specifies the enterprise project ID. This ID is associated with - the disk during the disk creation. If it is not specified, the - disk is bound to the default enterprise project. + - The timeouts for delete operation. type: str - required: false - image_id: - description: - - Specifies the image ID. If this parameter is specified, the disk - is created from an image. BMS system disks cannot be - created from BMS images. - type: str - required: false - size: - description: - - Specifies the disk size, in GB. Its values are as follows, System - disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This - parameter is mandatory when you create an empty disk or use an - image or a snapshot to create a disk. If you use an image or a - snapshot to create a disk, the disk size must be greater than or - equal to the image or snapshot size. This parameter is optional - when you use a backup to create a disk. If this parameter is not - specified, the disk size is equal to the backup size. - type: int - required: false - snapshot_id: - description: - - Specifies the snapshot ID. If this parameter is specified, the - disk is created from a snapshot. - type: str - required: false + default: '30m' + availability_zone: + description: + - Specifies the AZ where you want to create the disk. + type: str + required: true + name: + description: + - Specifies the disk name. The value can contain a maximum of 255 bytes. + type: str + required: true + volume_type: + description: + - Specifies the disk type. Currently, the value can be SSD, SAS, or SATA. + - SSD specifies the ultra-high I/O disk type. + - SAS specifies the high I/O disk type. + - SATA specifies the common I/O disk type. + - If the specified disk type is not available in the AZ, the disk creation fails. If the EVS disk is created from a + snapshot, the volume_type field must be the same as that of the snapshot's source disk. + type: str + required: true + backup_id: + description: + - Specifies the ID of the backup that can be used to create a disk. This parameter is mandatory when you use a backup + to create the disk. + type: str + required: false + description: + description: + - Specifies the disk description. The value can contain a maximum of 255 bytes. + type: str + required: false + enable_full_clone: + description: + - If the disk is created from a snapshot and linked cloning needs to be used, set this parameter to True. + type: bool + required: false + enable_scsi: + description: + - If this parameter is set to V(true), the disk device type is SCSI, which allows ECS OSs to directly access underlying + storage media. SCSI reservation command is supported. If this parameter is set to V(false), the disk device type is + VBD, which supports only simple SCSI read/write commands. + - If parameter enable_share is set to True and this parameter is not specified, shared SCSI disks are created. SCSI + EVS disks cannot be created from backups, which means that this parameter cannot be True if backup_id has been specified. + type: bool + required: false + enable_share: + description: + - Specifies whether the disk is shareable. The default value is False. + type: bool + required: false + encryption_id: + description: + - Specifies the encryption ID. The length of it fixes at 36 bytes. + type: str + required: false + enterprise_project_id: + description: + - Specifies the enterprise project ID. This ID is associated with the disk during the disk creation. If it is not specified, + the disk is bound to the default enterprise project. + type: str + required: false + image_id: + description: + - Specifies the image ID. If this parameter is specified, the disk is created from an image. BMS system disks cannot + be created from BMS images. + type: str + required: false + size: + description: + - Specifies the disk size, in GB. Its values are as follows, System disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. + This parameter is mandatory when you create an empty disk or use an image or a snapshot to create a disk. If you use + an image or a snapshot to create a disk, the disk size must be greater than or equal to the image or snapshot size. + This parameter is optional when you use a backup to create a disk. If this parameter is not specified, the disk size + is equal to the backup size. + type: int + required: false + snapshot_id: + description: + - Specifies the snapshot ID. If this parameter is specified, the disk is created from a snapshot. + type: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # test create disk - name: Create a disk community.general.hwc_evs_disk: @@ -161,176 +146,153 @@ EXAMPLES = ''' name: "ansible_evs_disk_test" volume_type: "SATA" size: 10 -''' +""" -RETURN = ''' - availability_zone: - description: - - Specifies the AZ where you want to create the disk. - type: str - returned: success - name: - description: - - Specifies the disk name. The value can contain a maximum of 255 - bytes. - type: str - returned: success - volume_type: - description: - - Specifies the disk type. Currently, the value can be SSD, SAS, or - SATA. - - SSD specifies the ultra-high I/O disk type. - - SAS specifies the high I/O disk type. - - SATA specifies the common I/O disk type. - - If the specified disk type is not available in the AZ, the - disk will fail to create. If the EVS disk is created from a - snapshot, the volume_type field must be the same as that of the - snapshot's source disk. - type: str - returned: success - backup_id: - description: - - Specifies the ID of the backup that can be used to create a disk. - This parameter is mandatory when you use a backup to create the - disk. - type: str - returned: success - description: - description: - - Specifies the disk description. The value can contain a maximum - of 255 bytes. - type: str - returned: success - enable_full_clone: - description: - - If the disk is created from a snapshot and linked cloning needs - to be used, set this parameter to True. - type: bool - returned: success - enable_scsi: - description: - - If this parameter is set to True, the disk device type will be - SCSI, which allows ECS OSs to directly access underlying storage - media. SCSI reservation command is supported. If this parameter - is set to False, the disk device type will be VBD, which supports - only simple SCSI read/write commands. - - If parameter enable_share is set to True and this parameter - is not specified, shared SCSI disks are created. SCSI EVS disks - cannot be created from backups, which means that this parameter - cannot be True if backup_id has been specified. - type: bool - returned: success - enable_share: - description: - - Specifies whether the disk is shareable. The default value is - False. - type: bool - returned: success - encryption_id: - description: - - Specifies the encryption ID. The length of it fixes at 36 bytes. - type: str - returned: success - enterprise_project_id: - description: - - Specifies the enterprise project ID. This ID is associated with - the disk during the disk creation. If it is not specified, the - disk is bound to the default enterprise project. - type: str - returned: success - image_id: - description: - - Specifies the image ID. If this parameter is specified, the disk - is created from an image. BMS system disks cannot be - created from BMS images. - type: str - returned: success - size: - description: - - Specifies the disk size, in GB. Its values are as follows, System - disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This - parameter is mandatory when you create an empty disk or use an - image or a snapshot to create a disk. If you use an image or a - snapshot to create a disk, the disk size must be greater than or - equal to the image or snapshot size. This parameter is optional - when you use a backup to create a disk. If this parameter is not - specified, the disk size is equal to the backup size. - type: int - returned: success - snapshot_id: - description: - - Specifies the snapshot ID. If this parameter is specified, the - disk is created from a snapshot. - type: str - returned: success - attachments: - description: - - Specifies the disk attachment information. - type: complex - returned: success - contains: - attached_at: - description: - - Specifies the time when the disk was attached. Time - format is 'UTC YYYY-MM-DDTHH:MM:SS'. - type: str - returned: success - attachment_id: - description: - - Specifies the ID of the attachment information. - type: str - returned: success - device: - description: - - Specifies the device name. - type: str - returned: success - server_id: - description: - - Specifies the ID of the server to which the disk is - attached. - type: str - returned: success - backup_policy_id: - description: - - Specifies the backup policy ID. - type: str - returned: success - created_at: - description: - - Specifies the time when the disk was created. Time format is 'UTC - YYYY-MM-DDTHH:MM:SS'. - type: str - returned: success - is_bootable: - description: - - Specifies whether the disk is bootable. - type: bool - returned: success - is_readonly: - description: - - Specifies whether the disk is read-only or read/write. True - indicates that the disk is read-only. False indicates that the - disk is read/write. - type: bool - returned: success - source_volume_id: - description: - - Specifies the source disk ID. This parameter has a value if the - disk is created from a source disk. - type: str - returned: success - status: - description: - - Specifies the disk status. - type: str - returned: success - tags: - description: - - Specifies the disk tags. - type: dict - returned: success -''' +RETURN = r""" +availability_zone: + description: + - Specifies the AZ where you want to create the disk. + type: str + returned: success +name: + description: + - Specifies the disk name. The value can contain a maximum of 255 bytes. + type: str + returned: success +volume_type: + description: + - Specifies the disk type. Currently, the value can be SSD, SAS, or SATA. + - SSD specifies the ultra-high I/O disk type. + - SAS specifies the high I/O disk type. + - SATA specifies the common I/O disk type. + - If the specified disk type is not available in the AZ, the disk creation fails. If the EVS disk is created from a snapshot, + the volume_type field must be the same as that of the snapshot's source disk. + type: str + returned: success +backup_id: + description: + - Specifies the ID of the backup that can be used to create a disk. This parameter is mandatory when you use a backup + to create the disk. + type: str + returned: success +description: + description: + - Specifies the disk description. The value can contain a maximum of 255 bytes. + type: str + returned: success +enable_full_clone: + description: + - If the disk is created from a snapshot and linked cloning needs to be used, set this parameter to True. + type: bool + returned: success +enable_scsi: + description: + - If this parameter is set to V(true), the disk device type is SCSI, which allows ECS OSs to directly access underlying + storage media. SCSI reservation command is supported. If this parameter is set to V(false), the disk device type is + VBD, which supports only simple SCSI read/write commands. + - If parameter enable_share is set to True and this parameter is not specified, shared SCSI disks are created. SCSI EVS + disks cannot be created from backups, which means that this parameter cannot be True if backup_id has been specified. + type: bool + returned: success +enable_share: + description: + - Specifies whether the disk is shareable. The default value is False. + type: bool + returned: success +encryption_id: + description: + - Specifies the encryption ID. The length of it fixes at 36 bytes. + type: str + returned: success +enterprise_project_id: + description: + - Specifies the enterprise project ID. This ID is associated with the disk during the disk creation. If it is not specified, + the disk is bound to the default enterprise project. + type: str + returned: success +image_id: + description: + - Specifies the image ID. If this parameter is specified, the disk is created from an image. BMS system disks cannot be + created from BMS images. + type: str + returned: success +size: + description: + - Specifies the disk size, in GB. Its values are as follows, System disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. + This parameter is mandatory when you create an empty disk or use an image or a snapshot to create a disk. If you use + an image or a snapshot to create a disk, the disk size must be greater than or equal to the image or snapshot size. + This parameter is optional when you use a backup to create a disk. If this parameter is not specified, the disk size + is equal to the backup size. + type: int + returned: success +snapshot_id: + description: + - Specifies the snapshot ID. If this parameter is specified, the disk is created from a snapshot. + type: str + returned: success +attachments: + description: + - Specifies the disk attachment information. + type: complex + returned: success + contains: + attached_at: + description: + - Specifies the time when the disk was attached. Time format is 'UTC YYYY-MM-DDTHH:MM:SS'. + type: str + returned: success + attachment_id: + description: + - Specifies the ID of the attachment information. + type: str + returned: success + device: + description: + - Specifies the device name. + type: str + returned: success + server_id: + description: + - Specifies the ID of the server to which the disk is attached. + type: str + returned: success +backup_policy_id: + description: + - Specifies the backup policy ID. + type: str + returned: success +created_at: + description: + - Specifies the time when the disk was created. Time format is 'UTC YYYY-MM-DDTHH:MM:SS'. + type: str + returned: success +is_bootable: + description: + - Specifies whether the disk is bootable. + type: bool + returned: success +is_readonly: + description: + - Specifies whether the disk is read-only or read/write. True indicates that the disk is read-only. False indicates that + the disk is read/write. + type: bool + returned: success +source_volume_id: + description: + - Specifies the source disk ID. This parameter has a value if the disk is created from a source disk. + type: str + returned: success +status: + description: + - Specifies the disk status. + type: str + returned: success +tags: + description: + - Specifies the disk tags. + type: dict + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, @@ -764,8 +726,7 @@ def async_wait(config, result, client, timeout): path_parameters = { "job_id": ["job_id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "jobs/{job_id}", data) diff --git a/plugins/modules/cloud/huawei/hwc_network_vpc.py b/plugins/modules/hwc_network_vpc.py similarity index 76% rename from plugins/modules/cloud/huawei/hwc_network_vpc.py rename to plugins/modules/hwc_network_vpc.py index f53369adcd..b974831c87 100644 --- a/plugins/modules/cloud/huawei/hwc_network_vpc.py +++ b/plugins/modules/hwc_network_vpc.py @@ -1,127 +1,129 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2018 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_network_vpc description: - - Represents an vpc resource. + - Represents an vpc resource. short_description: Creates a Huawei Cloud VPC author: Huawei Inc. (@huaweicloud) requirements: - - requests >= 2.18.4 - - keystoneauth1 >= 3.6.0 + - requests >= 2.18.4 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: + state: + description: + - Whether the given object should exist in VPC. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Whether the given object should exist in vpc. + - The timeout for create operation. type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: + default: '15m' + update: description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeout for create operation. - type: str - default: '15m' - update: - description: - - The timeout for update operation. - type: str - default: '15m' - delete: - description: - - The timeout for delete operation. - type: str - default: '15m' - name: - description: - - The name of vpc. + - The timeout for update operation. type: str - required: true - cidr: + default: '15m' + delete: description: - - The range of available subnets in the vpc. + - The timeout for delete operation. type: str - required: true + default: '15m' + name: + description: + - The name of vpc. + type: str + required: true + cidr: + description: + - The range of available subnets in the VPC. + type: str + required: true extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a vpc community.general.hwc_network_vpc: - identity_endpoint: "{{ identity_endpoint }}" - user: "{{ user }}" - password: "{{ password }}" - domain: "{{ domain }}" - project: "{{ project }}" - region: "{{ region }}" - name: "vpc_1" - cidr: "192.168.100.0/24" - state: present -''' + identity_endpoint: "{{ identity_endpoint }}" + user: "{{ user }}" + password: "{{ password }}" + domain: "{{ domain }}" + project: "{{ project }}" + region: "{{ region }}" + name: "vpc_1" + cidr: "192.168.100.0/24" + state: present +""" -RETURN = ''' - id: - description: - - the id of vpc. - type: str - returned: success - name: - description: - - the name of vpc. - type: str - returned: success - cidr: - description: - - the range of available subnets in the vpc. - type: str - returned: success - status: - description: - - the status of vpc. - type: str - returned: success - routes: - description: - - the route information. - type: complex - returned: success - contains: - destination: - description: - - the destination network segment of a route. - type: str - returned: success - next_hop: - description: - - the next hop of a route. If the route type is peering, - it will provide VPC peering connection ID. - type: str - returned: success - enable_shared_snat: - description: - - show whether the shared snat is enabled. - type: bool - returned: success -''' +RETURN = r""" +id: + description: + - The ID of VPC. + type: str + returned: success +name: + description: + - The name of VPC. + type: str + returned: success +cidr: + description: + - The range of available subnets in the VPC. + type: str + returned: success +status: + description: + - The status of VPC. + type: str + returned: success +routes: + description: + - The route information. + type: complex + returned: success + contains: + destination: + description: + - The destination network segment of a route. + type: str + returned: success + next_hop: + description: + - The next hop of a route. If the route type is peering, it provides VPC peering connection ID. + type: str + returned: success +enable_shared_snat: + description: + - Show whether the shared SNAT is enabled. + type: bool + returned: success +""" ############################################################################### # Imports @@ -374,13 +376,13 @@ def response_to_hash(module, response): This is for doing comparisons with Ansible's current parameters. """ return { - u'id': response.get(u'id'), - u'name': response.get(u'name'), - u'cidr': response.get(u'cidr'), - u'status': response.get(u'status'), - u'routes': VpcRoutesArray( - response.get(u'routes', []), module).from_response(), - u'enable_shared_snat': response.get(u'enable_shared_snat') + 'id': response.get('id'), + 'name': response.get('name'), + 'cidr': response.get('cidr'), + 'status': response.get('status'), + 'routes': VpcRoutesArray( + response.get('routes', []), module).from_response(), + 'enable_shared_snat': response.get('enable_shared_snat') } @@ -478,14 +480,14 @@ class VpcRoutesArray(object): def _request_for_item(self, item): return { - u'destination': item.get('destination'), - u'nexthop': item.get('next_hop') + 'destination': item.get('destination'), + 'nexthop': item.get('next_hop') } def _response_from_item(self, item): return { - u'destination': item.get(u'destination'), - u'next_hop': item.get(u'nexthop') + 'destination': item.get('destination'), + 'next_hop': item.get('nexthop') } diff --git a/plugins/modules/cloud/huawei/hwc_smn_topic.py b/plugins/modules/hwc_smn_topic.py similarity index 70% rename from plugins/modules/cloud/huawei/hwc_smn_topic.py rename to plugins/modules/hwc_smn_topic.py index f7fb4faea4..6fb9a3814d 100644 --- a/plugins/modules/cloud/huawei/hwc_smn_topic.py +++ b/plugins/modules/hwc_smn_topic.py @@ -1,106 +1,101 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_smn_topic description: - - Represents a SMN notification topic resource. -short_description: Creates a resource of SMNTopic in Huaweicloud Cloud + - Represents a SMN notification topic resource. +short_description: Creates a resource of SMNTopic in Huawei Cloud author: Huawei Inc. (@huaweicloud) requirements: - - requests >= 2.18.4 - - keystoneauth1 >= 3.6.0 + - requests >= 2.18.4 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huaweicloud Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - display_name: - description: - - Topic display name, which is presented as the name of the email - sender in an email message. The topic display name contains a - maximum of 192 bytes. - type: str - required: false - name: - description: - - Name of the topic to be created. The topic name is a string of 1 - to 256 characters. It must contain upper- or lower-case letters, - digits, hyphens (-), and underscores C(_), and must start with a - letter or digit. - type: str - required: true + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + display_name: + description: + - Topic display name, which is presented as the name of the email sender in an email message. The topic display name + contains a maximum of 192 bytes. + type: str + required: false + name: + description: + - Name of the topic to be created. The topic name is a string of 1 to 256 characters. It must contain upper- or lower-case + letters, digits, hyphens (V(-)), and underscores (V(_)), and must start with a letter or digit. + type: str + required: true extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a smn topic community.general.hwc_smn_topic: - identity_endpoint: "{{ identity_endpoint }}" - user_name: "{{ user_name }}" - password: "{{ password }}" - domain_name: "{{ domain_name }}" - project_name: "{{ project_name }}" - region: "{{ region }}" - name: "ansible_smn_topic_test" - state: present -''' + identity_endpoint: "{{ identity_endpoint }}" + user_name: "{{ user_name }}" + password: "{{ password }}" + domain_name: "{{ domain_name }}" + project_name: "{{ project_name }}" + region: "{{ region }}" + name: "ansible_smn_topic_test" + state: present +""" -RETURN = ''' +RETURN = r""" create_time: - description: - - Time when the topic was created. - returned: success - type: str + description: + - Time when the topic was created. + returned: success + type: str display_name: - description: - - Topic display name, which is presented as the name of the email - sender in an email message. The topic display name contains a - maximum of 192 bytes. - returned: success - type: str + description: + - Topic display name, which is presented as the name of the email sender in an email message. The topic display name contains + a maximum of 192 bytes. + returned: success + type: str name: - description: - - Name of the topic to be created. The topic name is a string of 1 - to 256 characters. It must contain upper- or lower-case letters, - digits, hyphens (-), and underscores C(_), and must start with a - letter or digit. - returned: success - type: str + description: + - Name of the topic to be created. The topic name is a string of 1 to 256 characters. It must contain upper- or lower-case + letters, digits, hyphens (V(-)), and underscores (V(_)), and must start with a letter or digit. + returned: success + type: str push_policy: - description: - - Message pushing policy. 0 indicates that the message sending - fails and the message is cached in the queue. 1 indicates that - the failed message is discarded. - returned: success - type: int + description: + - Message pushing policy. V(0) indicates that the message sending fails and the message is cached in the queue. V(1) indicates + that the failed message is discarded. + returned: success + type: int topic_urn: - description: - - Resource identifier of a topic, which is unique. - returned: success - type: str + description: + - Resource identifier of a topic, which is unique. + returned: success + type: str update_time: - description: - - Time when the topic was updated. - returned: success - type: str -''' + description: + - Time when the topic was updated. + returned: success + type: str +""" ############################################################################### # Imports @@ -317,13 +312,12 @@ def response_to_hash(module, response): This is for doing comparisons with Ansible's current parameters. """ return { - u'create_time': response.get(u'create_time'), - u'display_name': response.get(u'display_name'), - u'name': response.get(u'name'), - u'push_policy': _push_policy_convert_from_response( - response.get('push_policy')), - u'topic_urn': response.get(u'topic_urn'), - u'update_time': response.get(u'update_time') + 'create_time': response.get('create_time'), + 'display_name': response.get('display_name'), + 'name': response.get('name'), + 'push_policy': _push_policy_convert_from_response(response.get('push_policy')), + 'topic_urn': response.get('topic_urn'), + 'update_time': response.get('update_time') } diff --git a/plugins/modules/cloud/huawei/hwc_vpc_eip.py b/plugins/modules/hwc_vpc_eip.py similarity index 69% rename from plugins/modules/cloud/huawei/hwc_vpc_eip.py rename to plugins/modules/hwc_vpc_eip.py index b53395f87a..9a23b7b3f9 100644 --- a/plugins/modules/cloud/huawei/hwc_vpc_eip.py +++ b/plugins/modules/hwc_vpc_eip.py @@ -1,130 +1,119 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_eip description: - - elastic ip management. -short_description: Creates a resource of Vpc/EIP in Huawei Cloud + - Elastic IP management. +short_description: Creates a resource of VPC/EIP in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Whether the given object should exist in Huawei Cloud. + - The timeouts for create operation. type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: + default: '5m' + update: description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '5m' - update: - description: - - The timeouts for update operation. - type: str - default: '5m' - type: + - The timeouts for update operation. + type: str + default: '5m' + type: + description: + - Specifies the EIP type. + type: str + required: true + dedicated_bandwidth: + description: + - Specifies the dedicated bandwidth object. + type: dict + required: false + suboptions: + charge_mode: description: - - Specifies the EIP type. + - Specifies whether the bandwidth is billed by traffic or by bandwidth size. The value can be bandwidth or traffic. + If this parameter is left blank or is null character string, default value bandwidth is used. For IPv6 addresses, + the default parameter value is bandwidth outside China and is traffic in China. type: str required: true - dedicated_bandwidth: + name: description: - - Specifies the dedicated bandwidth object. - type: dict - required: false - suboptions: - charge_mode: - description: - - Specifies whether the bandwidth is billed by traffic or - by bandwidth size. The value can be bandwidth or traffic. - If this parameter is left blank or is null character - string, default value bandwidth is used. For IPv6 - addresses, the default parameter value is bandwidth - outside China and is traffic in China. - type: str - required: true - name: - description: - - Specifies the bandwidth name. The value is a string of 1 - to 64 characters that can contain letters, digits, - underscores C(_), hyphens (-), and periods (.). - type: str - required: true - size: - description: - - Specifies the bandwidth size. The value ranges from 1 - Mbit/s to 2000 Mbit/s by default. (The specific range may - vary depending on the configuration in each region. You - can see the bandwidth range of each region on the - management console.) The minimum unit for bandwidth - adjustment varies depending on the bandwidth range. The - details are as follows. - - The minimum unit is 1 Mbit/s if the allowed bandwidth - size ranges from 0 to 300 Mbit/s (with 300 Mbit/s - included). - - The minimum unit is 50 Mbit/s if the allowed bandwidth - size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s - included). - - The minimum unit is 500 Mbit/s if the allowed bandwidth - size is greater than 1000 Mbit/s. - type: int - required: true - enterprise_project_id: - description: - - Specifies the enterprise project ID. + - Specifies the bandwidth name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). type: str - required: false - ip_version: + required: true + size: description: - - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this - parameter is left blank, an IPv4 address will be assigned. + - Specifies the bandwidth size. The value ranges from 1 Mbit/s to 2000 Mbit/s by default. (The specific range may + vary depending on the configuration in each region. You can see the bandwidth range of each region on the management + console.) The minimum unit for bandwidth adjustment varies depending on the bandwidth range. The details are as + follows. + - The minimum unit is 1 Mbit/s if the allowed bandwidth size ranges from 0 to 300 Mbit/s (with 300 Mbit/s included). + - The minimum unit is 50 Mbit/s if the allowed bandwidth size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s + included). + - The minimum unit is 500 Mbit/s if the allowed bandwidth size is greater than 1000 Mbit/s. type: int - required: false - ipv4_address: - description: - - Specifies the obtained IPv4 EIP. The system automatically assigns - an EIP if you do not specify it. - type: str - required: false - port_id: - description: - - Specifies the port ID. This parameter is returned only when a - private IP address is bound with the EIP. - type: str - required: false - shared_bandwidth_id: - description: - - Specifies the ID of shared bandwidth. - type: str - required: false + required: true + enterprise_project_id: + description: + - Specifies the enterprise project ID. + type: str + required: false + ip_version: + description: + - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address is assigned. + type: int + required: false + ipv4_address: + description: + - Specifies the obtained IPv4 EIP. The system automatically assigns an EIP if you do not specify it. + type: str + required: false + port_id: + description: + - Specifies the port ID. This parameter is returned only when a private IP address is bound with the EIP. + type: str + required: false + shared_bandwidth_id: + description: + - Specifies the ID of shared bandwidth. + type: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create an eip and bind it to a port - name: Create vpc hwc_network_vpc: @@ -135,7 +124,7 @@ EXAMPLES = ''' hwc_vpc_subnet: gateway_ip: "192.168.100.32" name: "ansible_network_subnet_test" - dhcp_enable: True + dhcp_enable: true vpc_id: "{{ vpc.id }}" cidr: "192.168.100.0/26" register: subnet @@ -152,107 +141,91 @@ EXAMPLES = ''' name: "ansible_test_dedicated_bandwidth" size: 1 port_id: "{{ port.id }}" -''' +""" -RETURN = ''' - type: - description: - - Specifies the EIP type. - type: str - returned: success - dedicated_bandwidth: - description: - - Specifies the dedicated bandwidth object. - type: dict - returned: success - contains: - charge_mode: - description: - - Specifies whether the bandwidth is billed by traffic or - by bandwidth size. The value can be bandwidth or traffic. - If this parameter is left blank or is null character - string, default value bandwidth is used. For IPv6 - addresses, the default parameter value is bandwidth - outside China and is traffic in China. - type: str - returned: success - name: - description: - - Specifies the bandwidth name. The value is a string of 1 - to 64 characters that can contain letters, digits, - underscores C(_), hyphens (-), and periods (.). - type: str - returned: success - size: - description: - - Specifies the bandwidth size. The value ranges from 1 - Mbit/s to 2000 Mbit/s by default. (The specific range may - vary depending on the configuration in each region. You - can see the bandwidth range of each region on the - management console.) The minimum unit for bandwidth - adjustment varies depending on the bandwidth range. The - details are as follows:. - - The minimum unit is 1 Mbit/s if the allowed bandwidth - size ranges from 0 to 300 Mbit/s (with 300 Mbit/s - included). - - The minimum unit is 50 Mbit/s if the allowed bandwidth - size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s - included). - - The minimum unit is 500 Mbit/s if the allowed bandwidth - size is greater than 1000 Mbit/s. - type: int - returned: success - id: - description: - - Specifies the ID of dedicated bandwidth. - type: str - returned: success - enterprise_project_id: - description: - - Specifies the enterprise project ID. - type: str - returned: success - ip_version: - description: - - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this - parameter is left blank, an IPv4 address will be assigned. - type: int - returned: success - ipv4_address: - description: - - Specifies the obtained IPv4 EIP. The system automatically assigns - an EIP if you do not specify it. - type: str - returned: success - port_id: - description: - - Specifies the port ID. This parameter is returned only when a - private IP address is bound with the EIP. - type: str - returned: success - shared_bandwidth_id: - description: - - Specifies the ID of shared bandwidth. - type: str - returned: success - create_time: - description: - - Specifies the time (UTC time) when the EIP was assigned. - type: str - returned: success - ipv6_address: - description: - - Specifies the obtained IPv6 EIP. - type: str - returned: success - private_ip_address: - description: - - Specifies the private IP address bound with the EIP. This - parameter is returned only when a private IP address is bound - with the EIP. - type: str - returned: success -''' +RETURN = r""" +type: + description: + - Specifies the EIP type. + type: str + returned: success +dedicated_bandwidth: + description: + - Specifies the dedicated bandwidth object. + type: dict + returned: success + contains: + charge_mode: + description: + - Specifies whether the bandwidth is billed by traffic or by bandwidth size. The value can be bandwidth or traffic. + If this parameter is left blank or is null character string, default value bandwidth is used. For IPv6 addresses, + the default parameter value is bandwidth outside China and is traffic in China. + type: str + returned: success + name: + description: + - Specifies the bandwidth name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + returned: success + size: + description: + - Specifies the bandwidth size. The value ranges from 1 Mbit/s to 2000 Mbit/s by default. (The specific range may + vary depending on the configuration in each region. You can see the bandwidth range of each region on the management + console.) The minimum unit for bandwidth adjustment varies depending on the bandwidth range. The details are as + follows:. + - The minimum unit is 1 Mbit/s if the allowed bandwidth size ranges from 0 to 300 Mbit/s (with 300 Mbit/s included). + - The minimum unit is 50 Mbit/s if the allowed bandwidth size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s included). + - The minimum unit is 500 Mbit/s if the allowed bandwidth size is greater than 1000 Mbit/s. + type: int + returned: success + id: + description: + - Specifies the ID of dedicated bandwidth. + type: str + returned: success +enterprise_project_id: + description: + - Specifies the enterprise project ID. + type: str + returned: success +ip_version: + description: + - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address is assigned. + type: int + returned: success +ipv4_address: + description: + - Specifies the obtained IPv4 EIP. The system automatically assigns an EIP if you do not specify it. + type: str + returned: success +port_id: + description: + - Specifies the port ID. This parameter is returned only when a private IP address is bound with the EIP. + type: str + returned: success +shared_bandwidth_id: + description: + - Specifies the ID of shared bandwidth. + type: str + returned: success +create_time: + description: + - Specifies the time (UTC time) when the EIP was assigned. + type: str + returned: success +ipv6_address: + description: + - Specifies the obtained IPv6 EIP. + type: str + returned: success +private_ip_address: + description: + - Specifies the private IP address bound with the EIP. This parameter is returned only when a private IP address is bound + with the EIP. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcClientException404, HwcModule, @@ -540,8 +513,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "publicip_id": ["publicip", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "publicips/{publicip_id}", data) diff --git a/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py b/plugins/modules/hwc_vpc_peering_connect.py similarity index 84% rename from plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py rename to plugins/modules/hwc_vpc_peering_connect.py index a4d5921b77..e5d410c327 100644 --- a/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py +++ b/plugins/modules/hwc_vpc_peering_connect.py @@ -1,84 +1,85 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or SPDX-License-Identifier: GPL-3.0-or-later # https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_peering_connect description: - - vpc peering management. -short_description: Creates a resource of Vpc/PeeringConnect in Huawei Cloud + - VPC peering management. +short_description: Creates a resource of VPC/PeeringConnect in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '15m' - local_vpc_id: - description: - - Specifies the ID of local VPC. - type: str - required: true - name: - description: - - Specifies the name of the VPC peering connection. The value can - contain 1 to 64 characters. - type: str - required: true - peering_vpc: - description: - - Specifies information about the peering VPC. - type: dict - required: true - suboptions: - vpc_id: - description: - - Specifies the ID of peering VPC. - type: str - required: true - project_id: - description: - - Specifies the ID of the project which the peering vpc - belongs to. - type: str - required: false + state: description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - The description of vpc peering connection. + - The timeouts for create operation. + type: str + default: '15m' + local_vpc_id: + description: + - Specifies the ID of local VPC. + type: str + required: true + name: + description: + - Specifies the name of the VPC peering connection. The value can contain 1 to 64 characters. + type: str + required: true + peering_vpc: + description: + - Specifies information about the peering VPC. + type: dict + required: true + suboptions: + vpc_id: + description: + - Specifies the ID of peering VPC. + type: str + required: true + project_id: + description: + - Specifies the ID of the project which the peering vpc belongs to. type: str required: false + description: + description: + - The description of vpc peering connection. + type: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create a peering connect - name: Create a local vpc hwc_network_vpc: @@ -96,43 +97,41 @@ EXAMPLES = ''' name: "ansible_network_peering_test" peering_vpc: vpc_id: "{{ vpc2.id }}" -''' +""" -RETURN = ''' - local_vpc_id: - description: - - Specifies the ID of local VPC. - type: str - returned: success - name: - description: - - Specifies the name of the VPC peering connection. The value can - contain 1 to 64 characters. - type: str - returned: success - peering_vpc: - description: - - Specifies information about the peering VPC. - type: dict - returned: success - contains: - vpc_id: - description: - - Specifies the ID of peering VPC. - type: str - returned: success - project_id: - description: - - Specifies the ID of the project which the peering vpc - belongs to. - type: str - returned: success - description: - description: - - The description of vpc peering connection. - type: str - returned: success -''' +RETURN = r""" +local_vpc_id: + description: + - Specifies the ID of local VPC. + type: str + returned: success +name: + description: + - Specifies the name of the VPC peering connection. The value can contain 1 to 64 characters. + type: str + returned: success +peering_vpc: + description: + - Specifies information about the peering VPC. + type: dict + returned: success + contains: + vpc_id: + description: + - Specifies the ID of peering VPC. + type: str + returned: success + project_id: + description: + - Specifies the ID of the project which the peering vpc belongs to. + type: str + returned: success +description: + description: + - The description of vpc peering connection. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcClientException404, HwcModule, @@ -400,8 +399,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "peering_id": ["peering", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "v2.0/vpc/peerings/{peering_id}", data) diff --git a/plugins/modules/cloud/huawei/hwc_vpc_port.py b/plugins/modules/hwc_vpc_port.py similarity index 84% rename from plugins/modules/cloud/huawei/hwc_vpc_port.py rename to plugins/modules/hwc_vpc_port.py index cf0718f59b..54bea0f249 100644 --- a/plugins/modules/cloud/huawei/hwc_vpc_port.py +++ b/plugins/modules/hwc_vpc_port.py @@ -1,114 +1,114 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_port description: - - vpc port management. -short_description: Creates a resource of Vpc/Port in Huawei Cloud + - VPC port management. +short_description: Creates a resource of VPC/Port in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Whether the given object should exist in Huawei Cloud. + - The timeouts for create operation. type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: + default: '15m' + subnet_id: + description: + - Specifies the ID of the subnet to which the port belongs. + type: str + required: true + admin_state_up: + description: + - Specifies the administrative state of the port. + type: bool + required: false + allowed_address_pairs: + description: + - Specifies a set of zero or more allowed address pairs. + required: false + type: list + elements: dict + suboptions: + ip_address: description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '15m' - subnet_id: - description: - - Specifies the ID of the subnet to which the port belongs. - type: str - required: true - admin_state_up: - description: - - Specifies the administrative state of the port. - type: bool - required: false - allowed_address_pairs: - description: - - Specifies a set of zero or more allowed address pairs. - required: false - type: list - elements: dict - suboptions: - ip_address: - description: - - Specifies the IP address. It cannot set it to 0.0.0.0. - Configure an independent security group for the port if a - large CIDR block (subnet mask less than 24) is configured - for parameter allowed_address_pairs. - type: str - required: false - mac_address: - description: - - Specifies the MAC address. - type: str - required: false - extra_dhcp_opts: - description: - - Specifies the extended option of DHCP. - type: list - elements: dict - required: false - suboptions: - name: - description: - - Specifies the option name. - type: str - required: false - value: - description: - - Specifies the option value. - type: str - required: false - ip_address: - description: - - Specifies the port IP address. + - Specifies the IP address. It cannot set it to 0.0.0.0. Configure an independent security group for the port if + a large CIDR block (subnet mask less than 24) is configured for parameter allowed_address_pairs. type: str required: false - name: + mac_address: description: - - Specifies the port name. The value can contain no more than 255 - characters. + - Specifies the MAC address. type: str required: false - security_groups: + extra_dhcp_opts: + description: + - Specifies the extended option of DHCP. + type: list + elements: dict + required: false + suboptions: + name: description: - - Specifies the ID of the security group. - type: list - elements: str + - Specifies the option name. + type: str required: false + value: + description: + - Specifies the option value. + type: str + required: false + ip_address: + description: + - Specifies the port IP address. + type: str + required: false + name: + description: + - Specifies the port name. The value can contain no more than 255 characters. + type: str + required: false + security_groups: + description: + - Specifies the ID of the security group. + type: list + elements: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create a port - name: Create vpc hwc_network_vpc: @@ -119,7 +119,7 @@ EXAMPLES = ''' hwc_vpc_subnet: gateway_ip: "192.168.100.32" name: "ansible_network_subnet_test" - dhcp_enable: True + dhcp_enable: true vpc_id: "{{ vpc.id }}" cidr: "192.168.100.0/26" register: subnet @@ -127,76 +127,73 @@ EXAMPLES = ''' community.general.hwc_vpc_port: subnet_id: "{{ subnet.id }}" ip_address: "192.168.100.33" -''' +""" -RETURN = ''' - subnet_id: - description: - - Specifies the ID of the subnet to which the port belongs. - type: str - returned: success - admin_state_up: - description: - - Specifies the administrative state of the port. - type: bool - returned: success - allowed_address_pairs: - description: - - Specifies a set of zero or more allowed address pairs. - type: list - returned: success - contains: - ip_address: - description: - - Specifies the IP address. It cannot set it to 0.0.0.0. - Configure an independent security group for the port if a - large CIDR block (subnet mask less than 24) is configured - for parameter allowed_address_pairs. - type: str - returned: success - mac_address: - description: - - Specifies the MAC address. - type: str - returned: success - extra_dhcp_opts: - description: - - Specifies the extended option of DHCP. - type: list - returned: success - contains: - name: - description: - - Specifies the option name. - type: str - returned: success - value: - description: - - Specifies the option value. - type: str - returned: success +RETURN = r""" +subnet_id: + description: + - Specifies the ID of the subnet to which the port belongs. + type: str + returned: success +admin_state_up: + description: + - Specifies the administrative state of the port. + type: bool + returned: success +allowed_address_pairs: + description: + - Specifies a set of zero or more allowed address pairs. + type: list + returned: success + contains: ip_address: - description: - - Specifies the port IP address. - type: str - returned: success - name: - description: - - Specifies the port name. The value can contain no more than 255 - characters. - type: str - returned: success - security_groups: - description: - - Specifies the ID of the security group. - type: list - returned: success + description: + - Specifies the IP address. It cannot set it to 0.0.0.0. Configure an independent security group for the port if a + large CIDR block (subnet mask less than 24) is configured for parameter allowed_address_pairs. + type: str + returned: success mac_address: - description: - - Specifies the port MAC address. - type: str - returned: success -''' + description: + - Specifies the MAC address. + type: str + returned: success +extra_dhcp_opts: + description: + - Specifies the extended option of DHCP. + type: list + returned: success + contains: + name: + description: + - Specifies the option name. + type: str + returned: success + value: + description: + - Specifies the option value. + type: str + returned: success +ip_address: + description: + - Specifies the port IP address. + type: str + returned: success +name: + description: + - Specifies the port name. The value can contain no more than 255 characters. + type: str + returned: success +security_groups: + description: + - Specifies the ID of the security group. + type: list + returned: success +mac_address: + description: + - Specifies the port MAC address. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcClientException404, HwcModule, @@ -553,8 +550,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "port_id": ["port", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "ports/{port_id}", data) diff --git a/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py b/plugins/modules/hwc_vpc_private_ip.py similarity index 79% rename from plugins/modules/cloud/huawei/hwc_vpc_private_ip.py rename to plugins/modules/hwc_vpc_private_ip.py index 901755f362..664b4c84e4 100644 --- a/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py +++ b/plugins/modules/hwc_vpc_private_ip.py @@ -1,59 +1,60 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_private_ip description: - - vpc private ip management. -short_description: Creates a resource of Vpc/PrivateIP in Huawei Cloud + - VPC private IP management. +short_description: Creates a resource of VPC/PrivateIP in Huawei Cloud notes: - - If I(id) option is provided, it takes precedence over I(subnet_id), I(ip_address) for private ip selection. - - I(subnet_id), I(ip_address) are used for private ip selection. If more than one private ip with this options exists, execution is aborted. - - No parameter support updating. If one of option is changed, the module will create a new resource. + - If O(id) option is provided, it takes precedence over O(subnet_id), O(ip_address) for private IP selection. + - O(subnet_id), O(ip_address) are used for private IP selection. If more than one private IP with this options exists, execution + is aborted. + - No parameter support updating. If one of option is changed, the module creates a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - subnet_id: - description: - - Specifies the ID of the subnet from which IP addresses are - assigned. Cannot be changed after creating the private ip. - type: str - required: true - ip_address: - description: - - Specifies the target IP address. The value can be an available IP - address in the subnet. If it is not specified, the system - automatically assigns an IP address. Cannot be changed after - creating the private ip. - type: str - required: false + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + subnet_id: + description: + - Specifies the ID of the subnet from which IP addresses are assigned. Cannot be changed after creating the private + IP. + type: str + required: true + ip_address: + description: + - Specifies the target IP address. The value can be an available IP address in the subnet. If it is not specified, the + system automatically assigns an IP address. Cannot be changed after creating the private IP. + type: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' -# create a private ip +EXAMPLES = r""" +# create a private IP - name: Create vpc hwc_network_vpc: cidr: "192.168.100.0/24" @@ -63,31 +64,29 @@ EXAMPLES = ''' hwc_vpc_subnet: gateway_ip: "192.168.100.32" name: "ansible_network_subnet_test" - dhcp_enable: True + dhcp_enable: true vpc_id: "{{ vpc.id }}" cidr: "192.168.100.0/26" register: subnet -- name: Create a private ip +- name: Create a private IP community.general.hwc_vpc_private_ip: subnet_id: "{{ subnet.id }}" ip_address: "192.168.100.33" -''' +""" -RETURN = ''' - subnet_id: - description: - - Specifies the ID of the subnet from which IP addresses are - assigned. - type: str - returned: success - ip_address: - description: - - Specifies the target IP address. The value can be an available IP - address in the subnet. If it is not specified, the system - automatically assigns an IP address. - type: str - returned: success -''' +RETURN = r""" +subnet_id: + description: + - Specifies the ID of the subnet from which IP addresses are assigned. + type: str + returned: success +ip_address: + description: + - Specifies the target IP address. The value can be an available IP address in the subnet. If it is not specified, the + system automatically assigns an IP address. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, diff --git a/plugins/modules/cloud/huawei/hwc_vpc_route.py b/plugins/modules/hwc_vpc_route.py similarity index 81% rename from plugins/modules/cloud/huawei/hwc_vpc_route.py rename to plugins/modules/hwc_vpc_route.py index 31829dc601..dfb1aea61b 100644 --- a/plugins/modules/cloud/huawei/hwc_vpc_route.py +++ b/plugins/modules/hwc_vpc_route.py @@ -1,65 +1,68 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_route description: - - vpc route management. -short_description: Creates a resource of Vpc/Route in Huawei Cloud + - VPC route management. +short_description: Creates a resource of VPC/Route in Huawei Cloud notes: - - If I(id) option is provided, it takes precedence over I(destination), I(vpc_id), I(type) and I(next_hop) for route selection. - - I(destination), I(vpc_id), I(type) and I(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted. - - No parameter support updating. If one of option is changed, the module will create a new resource. + - If O(id) option is provided, it takes precedence over O(destination), O(vpc_id), O(type), and O(next_hop) for route selection. + - O(destination), O(vpc_id), O(type) and O(next_hop) are used for route selection. If more than one route with this options + exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module creates a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - destination: - description: - - Specifies the destination IP address or CIDR block. - type: str - required: true - next_hop: - description: - - Specifies the next hop. The value is VPC peering connection ID. - type: str - required: true - vpc_id: - description: - - Specifies the VPC ID to which route is added. - type: str - required: true - type: - description: - - Specifies the type of route. - type: str - required: false - default: 'peering' + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + destination: + description: + - Specifies the destination IP address or CIDR block. + type: str + required: true + next_hop: + description: + - Specifies the next hop. The value is VPC peering connection ID. + type: str + required: true + vpc_id: + description: + - Specifies the VPC ID to which route is added. + type: str + required: true + type: + description: + - Specifies the type of route. + type: str + required: false + default: 'peering' extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create a peering connect - name: Create a local vpc hwc_network_vpc: @@ -85,35 +88,35 @@ EXAMPLES = ''' vpc_id: "{{ vpc1.id }}" destination: "192.168.0.0/16" next_hop: "{{ connect.id }}" -''' +""" -RETURN = ''' - id: - description: - - UUID of the route. - type: str - returned: success - destination: - description: - - Specifies the destination IP address or CIDR block. - type: str - returned: success - next_hop: - description: - - Specifies the next hop. The value is VPC peering connection ID. - type: str - returned: success - vpc_id: - description: - - Specifies the VPC ID to which route is added. - type: str - returned: success - type: - description: - - Specifies the type of route. - type: str - returned: success -''' +RETURN = r""" +id: + description: + - UUID of the route. + type: str + returned: success +destination: + description: + - Specifies the destination IP address or CIDR block. + type: str + returned: success +next_hop: + description: + - Specifies the next hop. The value is VPC peering connection ID. + type: str + returned: success +vpc_id: + description: + - Specifies the VPC ID to which route is added. + type: str + returned: success +type: + description: + - Specifies the type of route. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, diff --git a/plugins/modules/cloud/huawei/hwc_vpc_security_group.py b/plugins/modules/hwc_vpc_security_group.py similarity index 71% rename from plugins/modules/cloud/huawei/hwc_vpc_security_group.py rename to plugins/modules/hwc_vpc_security_group.py index 5a1dfe706b..d73318666c 100644 --- a/plugins/modules/cloud/huawei/hwc_vpc_security_group.py +++ b/plugins/modules/hwc_vpc_security_group.py @@ -1,167 +1,150 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_security_group description: - - vpc security group management. -short_description: Creates a resource of Vpc/SecurityGroup in Huawei Cloud + - VPC security group management. +short_description: Creates a resource of VPC/SecurityGroup in Huawei Cloud notes: - - If I(id) option is provided, it takes precedence over I(name), - I(enterprise_project_id) and I(vpc_id) for security group selection. - - I(name), I(enterprise_project_id) and I(vpc_id) are used for security - group selection. If more than one security group with this options exists, - execution is aborted. - - No parameter support updating. If one of option is changed, the module - will create a new resource. + - If O(id) option is provided, it takes precedence over O(name), O(enterprise_project_id), and O(vpc_id) for security group + selection. + - O(name), O(enterprise_project_id) and O(vpc_id) are used for security group selection. If more than one security group + with this options exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module creates a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - name: - description: - - Specifies the security group name. The value is a string of 1 to - 64 characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - required: true - enterprise_project_id: - description: - - Specifies the enterprise project ID. When creating a security - group, associate the enterprise project ID with the security - group.s - type: str - required: false - vpc_id: - description: - - Specifies the resource ID of the VPC to which the security group - belongs. - type: str - required: false + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + name: + description: + - Specifies the security group name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + required: true + enterprise_project_id: + description: + - Specifies the enterprise project ID. When creating a security group, associate the enterprise project ID with the + security group.s. + type: str + required: false + vpc_id: + description: + - Specifies the resource ID of the VPC to which the security group belongs. + type: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create a security group - name: Create a security group community.general.hwc_vpc_security_group: name: "ansible_network_security_group_test" -''' +""" -RETURN = ''' - name: - description: - - Specifies the security group name. The value is a string of 1 to - 64 characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - returned: success - enterprise_project_id: - description: - - Specifies the enterprise project ID. When creating a security - group, associate the enterprise project ID with the security - group. - type: str - returned: success - vpc_id: - description: - - Specifies the resource ID of the VPC to which the security group - belongs. - type: str - returned: success - rules: - description: - - Specifies the security group rule, which ensures that resources - in the security group can communicate with one another. - type: complex - returned: success - contains: - description: - description: - - Provides supplementary information about the security - group rule. - type: str - returned: success - direction: - description: - - Specifies the direction of access control. The value can - be egress or ingress. - type: str - returned: success - ethertype: - description: - - Specifies the IP protocol version. The value can be IPv4 - or IPv6. - type: str - returned: success - id: - description: - - Specifies the security group rule ID. - type: str - returned: success - port_range_max: - description: - - Specifies the end port number. The value ranges from 1 to - 65535. If the protocol is not icmp, the value cannot be - smaller than the port_range_min value. An empty value - indicates all ports. - type: int - returned: success - port_range_min: - description: - - Specifies the start port number. The value ranges from 1 - to 65535. The value cannot be greater than the - port_range_max value. An empty value indicates all ports. - type: int - returned: success - protocol: - description: - - Specifies the protocol type. The value can be icmp, tcp, - udp, or others. If the parameter is left blank, the - security group supports all protocols. - type: str - returned: success - remote_address_group_id: - description: - - Specifies the ID of remote IP address group. - type: str - returned: success - remote_group_id: - description: - - Specifies the ID of the peer security group. - type: str - returned: success - remote_ip_prefix: - description: - - Specifies the remote IP address. If the access control - direction is set to egress, the parameter specifies the - source IP address. If the access control direction is set - to ingress, the parameter specifies the destination IP - address. - type: str - returned: success -''' +RETURN = r""" +name: + description: + - Specifies the security group name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + returned: success +enterprise_project_id: + description: + - Specifies the enterprise project ID. When creating a security group, associate the enterprise project ID with the security + group. + type: str + returned: success +vpc_id: + description: + - Specifies the resource ID of the VPC to which the security group belongs. + type: str + returned: success +rules: + description: + - Specifies the security group rule, which ensures that resources in the security group can communicate with one another. + type: complex + returned: success + contains: + description: + description: + - Provides supplementary information about the security group rule. + type: str + returned: success + direction: + description: + - Specifies the direction of access control. The value can be egress or ingress. + type: str + returned: success + ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. + type: str + returned: success + id: + description: + - Specifies the security group rule ID. + type: str + returned: success + port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. If the protocol is not icmp, the value cannot be + smaller than the port_range_min value. An empty value indicates all ports. + type: int + returned: success + port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to 65535. The value cannot be greater than the port_range_max + value. An empty value indicates all ports. + type: int + returned: success + protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, udp, or others. If the parameter is left blank, the security + group supports all protocols. + type: str + returned: success + remote_address_group_id: + description: + - Specifies the ID of remote IP address group. + type: str + returned: success + remote_group_id: + description: + - Specifies the ID of the peer security group. + type: str + returned: success + remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction is set to egress, the parameter specifies the source + IP address. If the access control direction is set to ingress, the parameter specifies the destination IP address. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, diff --git a/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py b/plugins/modules/hwc_vpc_security_group_rule.py similarity index 66% rename from plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py rename to plugins/modules/hwc_vpc_security_group_rule.py index f92c82764e..153950fb2d 100644 --- a/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py +++ b/plugins/modules/hwc_vpc_security_group_rule.py @@ -1,110 +1,99 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_security_group_rule description: - - vpc security group management. -short_description: Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud + - VPC security group management. +short_description: Creates a resource of VPC/SecurityGroupRule in Huawei Cloud notes: - - If I(id) option is provided, it takes precedence over - I(enterprise_project_id) for security group rule selection. - - I(security_group_id) is used for security group rule selection. If more - than one security group rule with this options exists, execution is - aborted. - - No parameter support updating. If one of option is changed, the module - will create a new resource. + - If O(id) option is provided, it takes precedence over O(security_group_id) for security group rule selection. + - O(security_group_id) is used for security group rule selection. If more than one security group rule with this options + exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module creates a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - direction: - description: - - Specifies the direction of access control. The value can be - egress or ingress. - type: str - required: true - security_group_id: - description: - - Specifies the security group rule ID, which uniquely identifies - the security group rule. - type: str - required: true + state: description: - description: - - Provides supplementary information about the security group rule. - The value is a string of no more than 255 characters that can - contain letters and digits. - type: str - required: false - ethertype: - description: - - Specifies the IP protocol version. The value can be IPv4 or IPv6. - If you do not set this parameter, IPv4 is used by default. - type: str - required: false - port_range_max: - description: - - Specifies the end port number. The value ranges from 1 to 65535. - If the protocol is not icmp, the value cannot be smaller than the - port_range_min value. An empty value indicates all ports. - type: int - required: false - port_range_min: - description: - - Specifies the start port number. The value ranges from 1 to - 65535. The value cannot be greater than the port_range_max value. - An empty value indicates all ports. - type: int - required: false - protocol: - description: - - Specifies the protocol type. The value can be icmp, tcp, or udp. - If the parameter is left blank, the security group supports all - protocols. - type: str - required: false - remote_group_id: - description: - - Specifies the ID of the peer security group. The value is - exclusive with parameter remote_ip_prefix. - type: str - required: false - remote_ip_prefix: - description: - - Specifies the remote IP address. If the access control direction - is set to egress, the parameter specifies the source IP address. - If the access control direction is set to ingress, the parameter - specifies the destination IP address. The value can be in the - CIDR format or IP addresses. The parameter is exclusive with - parameter remote_group_id. - type: str - required: false + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + direction: + description: + - Specifies the direction of access control. The value can be egress or ingress. + type: str + required: true + security_group_id: + description: + - Specifies the security group rule ID, which uniquely identifies the security group rule. + type: str + required: true + description: + description: + - Provides supplementary information about the security group rule. The value is a string of no more than 255 characters + that can contain letters and digits. + type: str + required: false + ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. If you do not set this parameter, IPv4 is used by + default. + type: str + required: false + port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. If the protocol is not icmp, the value cannot be + smaller than the port_range_min value. An empty value indicates all ports. + type: int + required: false + port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to 65535. The value cannot be greater than the port_range_max + value. An empty value indicates all ports. + type: int + required: false + protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, or udp. If the parameter is left blank, the security group + supports all protocols. + type: str + required: false + remote_group_id: + description: + - Specifies the ID of the peer security group. The value is exclusive with parameter remote_ip_prefix. + type: str + required: false + remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction is set to egress, the parameter specifies the source + IP address. If the access control direction is set to ingress, the parameter specifies the destination IP address. + The value can be in the CIDR format or IP addresses. The parameter is exclusive with parameter remote_group_id. + type: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create a security group rule - name: Create a security group hwc_vpc_security_group: @@ -119,72 +108,62 @@ EXAMPLES = ''' security_group_id: "{{ sg.id }}" port_range_min: 22 remote_ip_prefix: "0.0.0.0/0" -''' +""" -RETURN = ''' - direction: - description: - - Specifies the direction of access control. The value can be - egress or ingress. - type: str - returned: success - security_group_id: - description: - - Specifies the security group rule ID, which uniquely identifies - the security group rule. - type: str - returned: success - description: - description: - - Provides supplementary information about the security group rule. - The value is a string of no more than 255 characters that can - contain letters and digits. - type: str - returned: success - ethertype: - description: - - Specifies the IP protocol version. The value can be IPv4 or IPv6. - If you do not set this parameter, IPv4 is used by default. - type: str - returned: success - port_range_max: - description: - - Specifies the end port number. The value ranges from 1 to 65535. - If the protocol is not icmp, the value cannot be smaller than the - port_range_min value. An empty value indicates all ports. - type: int - returned: success - port_range_min: - description: - - Specifies the start port number. The value ranges from 1 to - 65535. The value cannot be greater than the port_range_max value. - An empty value indicates all ports. - type: int - returned: success - protocol: - description: - - Specifies the protocol type. The value can be icmp, tcp, or udp. - If the parameter is left blank, the security group supports all - protocols. - type: str - returned: success - remote_group_id: - description: - - Specifies the ID of the peer security group. The value is - exclusive with parameter remote_ip_prefix. - type: str - returned: success - remote_ip_prefix: - description: - - Specifies the remote IP address. If the access control direction - is set to egress, the parameter specifies the source IP address. - If the access control direction is set to ingress, the parameter - specifies the destination IP address. The value can be in the - CIDR format or IP addresses. The parameter is exclusive with - parameter remote_group_id. - type: str - returned: success -''' +RETURN = r""" +direction: + description: + - Specifies the direction of access control. The value can be egress or ingress. + type: str + returned: success +security_group_id: + description: + - Specifies the security group rule ID, which uniquely identifies the security group rule. + type: str + returned: success +description: + description: + - Provides supplementary information about the security group rule. The value is a string of no more than 255 characters + that can contain letters and digits. + type: str + returned: success +ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. If you do not set this parameter, IPv4 is used by + default. + type: str + returned: success +port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. If the protocol is not icmp, the value cannot be smaller + than the port_range_min value. An empty value indicates all ports. + type: int + returned: success +port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to 65535. The value cannot be greater than the port_range_max + value. An empty value indicates all ports. + type: int + returned: success +protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, or udp. If the parameter is left blank, the security group + supports all protocols. + type: str + returned: success +remote_group_id: + description: + - Specifies the ID of the peer security group. The value is exclusive with parameter remote_ip_prefix. + type: str + returned: success +remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction is set to egress, the parameter specifies the source + IP address. If the access control direction is set to ingress, the parameter specifies the destination IP address. The + value can be in the CIDR format or IP addresses. The parameter is exclusive with parameter remote_group_id. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, diff --git a/plugins/modules/cloud/huawei/hwc_vpc_subnet.py b/plugins/modules/hwc_vpc_subnet.py similarity index 77% rename from plugins/modules/cloud/huawei/hwc_vpc_subnet.py rename to plugins/modules/hwc_vpc_subnet.py index ccf180502c..316ed39c1f 100644 --- a/plugins/modules/cloud/huawei/hwc_vpc_subnet.py +++ b/plugins/modules/hwc_vpc_subnet.py @@ -1,103 +1,99 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_subnet description: - - subnet management. -short_description: Creates a resource of Vpc/Subnet in Huawei Cloud + - Subnet management. +short_description: Creates a resource of VPC/Subnet in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Whether the given object should exist in Huawei Cloud. + - The timeouts for create operation. type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: + default: '15m' + update: description: - - The timeouts for each operations. - type: dict - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '15m' - update: - description: - - The timeouts for update operation. - type: str - default: '15m' - cidr: - description: - - Specifies the subnet CIDR block. The value must be within the VPC - CIDR block and be in CIDR format. The subnet mask cannot be - greater than 28. Cannot be changed after creating the subnet. + - The timeouts for update operation. type: str - required: true - gateway_ip: - description: - - Specifies the gateway of the subnet. The value must be an IP - address in the subnet. Cannot be changed after creating the subnet. - type: str - required: true - name: - description: - - Specifies the subnet name. The value is a string of 1 to 64 - characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - required: true - vpc_id: - description: - - Specifies the ID of the VPC to which the subnet belongs. Cannot - be changed after creating the subnet. - type: str - required: true - availability_zone: - description: - - Specifies the AZ to which the subnet belongs. Cannot be changed - after creating the subnet. - type: str - required: false - dhcp_enable: - description: - - Specifies whether DHCP is enabled for the subnet. The value can - be true (enabled) or false(disabled), and default value is true. - If this parameter is set to false, newly created ECSs cannot - obtain IP addresses, and usernames and passwords cannot be - injected using Cloud-init. - type: bool - required: false - dns_address: - description: - - Specifies the DNS server addresses for subnet. The address - in the head will be used first. - type: list - elements: str - required: false + default: '15m' + cidr: + description: + - Specifies the subnet CIDR block. The value must be within the VPC CIDR block and be in CIDR format. The subnet mask + cannot be greater than 28. Cannot be changed after creating the subnet. + type: str + required: true + gateway_ip: + description: + - Specifies the gateway of the subnet. The value must be an IP address in the subnet. Cannot be changed after creating + the subnet. + type: str + required: true + name: + description: + - Specifies the subnet name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + required: true + vpc_id: + description: + - Specifies the ID of the VPC to which the subnet belongs. Cannot be changed after creating the subnet. + type: str + required: true + availability_zone: + description: + - Specifies the AZ to which the subnet belongs. Cannot be changed after creating the subnet. + type: str + required: false + dhcp_enable: + description: + - Specifies whether DHCP is enabled for the subnet. The value can be true (enabled) or false(disabled), and default + value is true. If this parameter is set to false, newly created ECSs cannot obtain IP addresses, and usernames and + passwords cannot be injected using Cloud-init. + type: bool + required: false + dns_address: + description: + - Specifies the DNS server addresses for subnet. The address in the head is used first. + type: list + elements: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create subnet - name: Create vpc hwc_network_vpc: @@ -110,56 +106,50 @@ EXAMPLES = ''' cidr: "192.168.100.0/26" gateway_ip: "192.168.100.32" name: "ansible_network_subnet_test" - dhcp_enable: True -''' + dhcp_enable: true +""" -RETURN = ''' - cidr: - description: - - Specifies the subnet CIDR block. The value must be within the VPC - CIDR block and be in CIDR format. The subnet mask cannot be - greater than 28. - type: str - returned: success - gateway_ip: - description: - - Specifies the gateway of the subnet. The value must be an IP - address in the subnet. - type: str - returned: success - name: - description: - - Specifies the subnet name. The value is a string of 1 to 64 - characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - returned: success - vpc_id: - description: - - Specifies the ID of the VPC to which the subnet belongs. - type: str - returned: success - availability_zone: - description: - - Specifies the AZ to which the subnet belongs. - type: str - returned: success - dhcp_enable: - description: - - Specifies whether DHCP is enabled for the subnet. The value can - be true (enabled) or false(disabled), and default value is true. - If this parameter is set to false, newly created ECSs cannot - obtain IP addresses, and usernames and passwords cannot be - injected using Cloud-init. - type: bool - returned: success - dns_address: - description: - - Specifies the DNS server addresses for subnet. The address - in the head will be used first. - type: list - returned: success -''' +RETURN = r""" +cidr: + description: + - Specifies the subnet CIDR block. The value must be within the VPC CIDR block and be in CIDR format. The subnet mask + cannot be greater than 28. + type: str + returned: success +gateway_ip: + description: + - Specifies the gateway of the subnet. The value must be an IP address in the subnet. + type: str + returned: success +name: + description: + - Specifies the subnet name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + returned: success +vpc_id: + description: + - Specifies the ID of the VPC to which the subnet belongs. + type: str + returned: success +availability_zone: + description: + - Specifies the AZ to which the subnet belongs. + type: str + returned: success +dhcp_enable: + description: + - Specifies whether DHCP is enabled for the subnet. The value can be true (enabled) or false(disabled), and default value + is true. If this parameter is set to false, newly created ECSs cannot obtain IP addresses, and usernames and passwords + cannot be injected using Cloud-init. + type: bool + returned: success +dns_address: + description: + - Specifies the DNS server addresses for subnet. The address in the head is used first. + type: list + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcClientException404, HwcModule, @@ -433,8 +423,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "subnet_id": ["subnet", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "subnets/{subnet_id}", data) @@ -531,8 +520,7 @@ def async_wait_update(config, result, client, timeout): path_parameters = { "subnet_id": ["subnet", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "subnets/{subnet_id}", data) diff --git a/plugins/modules/storage/ibm/ibm_sa_domain.py b/plugins/modules/ibm_sa_domain.py similarity index 51% rename from plugins/modules/storage/ibm/ibm_sa_domain.py rename to plugins/modules/ibm_sa_domain.py index 9c5e6c50c8..f377bce761 100644 --- a/plugins/modules/storage/ibm/ibm_sa_domain.py +++ b/plugins/modules/ibm_sa_domain.py @@ -1,95 +1,97 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, IBM CORPORATION +# Copyright (c) 2018, IBM CORPORATION # Author(s): Tzur Eliyahu # -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_domain short_description: Manages domains on IBM Spectrum Accelerate Family storage systems description: - - "This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems." + - This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none options: - domain: - description: - - Name of the domain to be managed. - required: true - type: str - state: - description: - - The desired state of the domain. - default: "present" - choices: [ "present", "absent" ] - type: str - ldap_id: - description: - - ldap id to add to the domain. - required: false - type: str - size: - description: - - Size of the domain. - required: false - type: str - hard_capacity: - description: - - Hard capacity of the domain. - required: false - type: str - soft_capacity: - description: - - Soft capacity of the domain. - required: false - type: str - max_cgs: - description: - - Number of max cgs. - required: false - type: str - max_dms: - description: - - Number of max dms. - required: false - type: str - max_mirrors: - description: - - Number of max_mirrors. - required: false - type: str - max_pools: - description: - - Number of max_pools. - required: false - type: str - max_volumes: - description: - - Number of max_volumes. - required: false - type: str - perf_class: - description: - - Add the domain to a performance class. - required: false - type: str + domain: + description: + - Name of the domain to be managed. + required: true + type: str + state: + description: + - The desired state of the domain. + default: "present" + choices: ["present", "absent"] + type: str + ldap_id: + description: + - LDAP ID to add to the domain. + required: false + type: str + size: + description: + - Size of the domain. + required: false + type: str + hard_capacity: + description: + - Hard capacity of the domain. + required: false + type: str + soft_capacity: + description: + - Soft capacity of the domain. + required: false + type: str + max_cgs: + description: + - Number of max cgs. + required: false + type: str + max_dms: + description: + - Number of max dms. + required: false + type: str + max_mirrors: + description: + - Number of max_mirrors. + required: false + type: str + max_pools: + description: + - Number of max_pools. + required: false + type: str + max_volumes: + description: + - Number of max_volumes. + required: false + type: str + perf_class: + description: + - Add the domain to a performance class. + required: false + type: str extends_documentation_fragment: -- community.general.ibm_storage - + - community.general.ibm_storage + - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Define new domain. community.general.ibm_sa_domain: domain: domain_name @@ -106,14 +108,14 @@ EXAMPLES = ''' username: admin password: secret endpoints: hostdev-system -''' -RETURN = ''' +""" +RETURN = r""" msg: - description: module return status. - returned: as needed - type: str - sample: "domain 'domain_name' created successfully." -''' + description: Module return status. + returned: as needed + type: str + sample: "domain 'domain_name' created successfully." +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ diff --git a/plugins/modules/storage/ibm/ibm_sa_host.py b/plugins/modules/ibm_sa_host.py similarity index 54% rename from plugins/modules/storage/ibm/ibm_sa_host.py rename to plugins/modules/ibm_sa_host.py index 27a7287f8a..17615390f0 100644 --- a/plugins/modules/storage/ibm/ibm_sa_host.py +++ b/plugins/modules/ibm_sa_host.py @@ -1,68 +1,68 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (C) 2018 IBM CORPORATION # Author(s): Tzur Eliyahu # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_host -short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems. +short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems description: - - "This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems." + - This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none options: - host: - description: - - Host name. - required: true - type: str - state: - description: - - Host state. - default: "present" - choices: [ "present", "absent" ] - type: str - cluster: - description: - - The name of the cluster to include the host. - required: false - type: str - domain: - description: - - The domains the cluster will be attached to. - To include more than one domain, - separate domain names with commas. - To include all existing domains, use an asterisk ("*"). - required: false - type: str - iscsi_chap_name: - description: - - The host's CHAP name identifier - required: false - type: str - iscsi_chap_secret: - description: - - The password of the initiator used to - authenticate to the system when CHAP is enable - required: false - type: str + host: + description: + - Host name. + required: true + type: str + state: + description: + - Host state. + default: "present" + choices: ["present", "absent"] + type: str + cluster: + description: + - The name of the cluster to include the host. + required: false + type: str + domain: + description: + - The domains the cluster is attached to. To include more than one domain, separate domain names with commas. To include + all existing domains, use an asterisk (V(*)). + required: false + type: str + iscsi_chap_name: + description: + - The host's CHAP name identifier. + required: false + type: str + iscsi_chap_secret: + description: + - The password of the initiator used to authenticate to the system when CHAP is enable. + required: false + type: str extends_documentation_fragment: -- community.general.ibm_storage - + - community.general.ibm_storage + - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Define new host. community.general.ibm_sa_host: host: host_name @@ -78,9 +78,9 @@ EXAMPLES = ''' username: admin password: secret endpoints: hostdev-system -''' -RETURN = ''' -''' +""" +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ diff --git a/plugins/modules/storage/ibm/ibm_sa_host_ports.py b/plugins/modules/ibm_sa_host_ports.py similarity index 69% rename from plugins/modules/storage/ibm/ibm_sa_host_ports.py rename to plugins/modules/ibm_sa_host_ports.py index 32daa9f3c7..4c5b2b2d04 100644 --- a/plugins/modules/storage/ibm/ibm_sa_host_ports.py +++ b/plugins/modules/ibm_sa_host_ports.py @@ -1,59 +1,62 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (C) 2018 IBM CORPORATION # Author(s): Tzur Eliyahu # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_host_ports -short_description: Add host ports on IBM Spectrum Accelerate Family storage systems. +short_description: Add host ports on IBM Spectrum Accelerate Family storage systems description: - - "This module adds ports to or removes them from the hosts - on IBM Spectrum Accelerate Family storage systems." + - This module adds ports to or removes them from the hosts on IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none options: - host: - description: - - Host name. - required: true - type: str - state: - description: - - Host ports state. - default: "present" - choices: [ "present", "absent" ] - type: str - iscsi_name: - description: - - iSCSI initiator name. - required: false - type: str - fcaddress: - description: - - Fiber channel address. - required: false - type: str - num_of_visible_targets: - description: - - Number of visible targets. - required: false - type: str + host: + description: + - Host name. + required: true + type: str + state: + description: + - Host ports state. + default: "present" + choices: ["present", "absent"] + type: str + iscsi_name: + description: + - The iSCSI initiator name. + required: false + type: str + fcaddress: + description: + - Fiber channel address. + required: false + type: str + num_of_visible_targets: + description: + - Number of visible targets. + required: false + type: str extends_documentation_fragment: -- community.general.ibm_storage + - community.general.ibm_storage + - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add ports for host. community.general.ibm_sa_host_ports: host: test_host @@ -71,10 +74,9 @@ EXAMPLES = ''' password: secret endpoints: hostdev-system state: absent - -''' -RETURN = ''' -''' +""" +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, connect_ssl, diff --git a/plugins/modules/storage/ibm/ibm_sa_pool.py b/plugins/modules/ibm_sa_pool.py similarity index 60% rename from plugins/modules/storage/ibm/ibm_sa_pool.py rename to plugins/modules/ibm_sa_pool.py index 67c963ace1..bb7102fa71 100644 --- a/plugins/modules/storage/ibm/ibm_sa_pool.py +++ b/plugins/modules/ibm_sa_pool.py @@ -1,64 +1,67 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (C) 2018 IBM CORPORATION # Author(s): Tzur Eliyahu # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_pool -short_description: Handles pools on IBM Spectrum Accelerate Family storage systems. +short_description: Handles pools on IBM Spectrum Accelerate Family storage systems description: - - "This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems" + - This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none options: - pool: - description: - - Pool name. - required: true - type: str - state: - description: - - Pool state. - default: "present" - choices: [ "present", "absent" ] - type: str - size: - description: - - Pool size in GB - required: false - type: str - snapshot_size: - description: - - Pool snapshot size in GB - required: false - type: str - domain: - description: - - Adds the pool to the specified domain. - required: false - type: str - perf_class: - description: - - Assigns a perf_class to the pool. - required: false - type: str + pool: + description: + - Pool name. + required: true + type: str + state: + description: + - Pool state. + default: "present" + choices: ["present", "absent"] + type: str + size: + description: + - Pool size in GB. + required: false + type: str + snapshot_size: + description: + - Pool snapshot size in GB. + required: false + type: str + domain: + description: + - Adds the pool to the specified domain. + required: false + type: str + perf_class: + description: + - Assigns a perf_class to the pool. + required: false + type: str extends_documentation_fragment: -- community.general.ibm_storage - + - community.general.ibm_storage + - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create new pool. community.general.ibm_sa_pool: name: pool_name @@ -75,9 +78,9 @@ EXAMPLES = ''' username: admin password: secret endpoints: hostdev-system -''' -RETURN = ''' -''' +""" +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ diff --git a/plugins/modules/storage/ibm/ibm_sa_vol.py b/plugins/modules/ibm_sa_vol.py similarity index 66% rename from plugins/modules/storage/ibm/ibm_sa_vol.py rename to plugins/modules/ibm_sa_vol.py index 7820d26828..48450084e2 100644 --- a/plugins/modules/storage/ibm/ibm_sa_vol.py +++ b/plugins/modules/ibm_sa_vol.py @@ -1,54 +1,57 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (C) 2018 IBM CORPORATION # Author(s): Tzur Eliyahu # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_vol -short_description: Handle volumes on IBM Spectrum Accelerate Family storage systems. +short_description: Handle volumes on IBM Spectrum Accelerate Family storage systems description: - - "This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems." + - This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none options: - vol: - description: - - Volume name. - required: true - type: str - pool: - description: - - Volume pool. - required: false - type: str - state: - description: - - Volume state. - default: "present" - choices: [ "present", "absent" ] - type: str - size: - description: - - Volume size. - required: false - type: str + vol: + description: + - Volume name. + required: true + type: str + pool: + description: + - Volume pool. + required: false + type: str + state: + description: + - Volume state. + default: "present" + choices: ["present", "absent"] + type: str + size: + description: + - Volume size. + required: false + type: str extends_documentation_fragment: -- community.general.ibm_storage - + - community.general.ibm_storage + - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new volume. community.general.ibm_sa_vol: vol: volume_name @@ -66,9 +69,9 @@ EXAMPLES = ''' username: admin password: secret endpoints: hostdev-system -''' -RETURN = ''' -''' +""" +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ diff --git a/plugins/modules/storage/ibm/ibm_sa_vol_map.py b/plugins/modules/ibm_sa_vol_map.py similarity index 63% rename from plugins/modules/storage/ibm/ibm_sa_vol_map.py rename to plugins/modules/ibm_sa_vol_map.py index b449ba8de4..03c87ca37b 100644 --- a/plugins/modules/storage/ibm/ibm_sa_vol_map.py +++ b/plugins/modules/ibm_sa_vol_map.py @@ -1,68 +1,68 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (C) 2018 IBM CORPORATION # Author(s): Tzur Eliyahu # -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_vol_map -short_description: Handles volume mapping on IBM Spectrum Accelerate Family storage systems. +short_description: Handles volume mapping on IBM Spectrum Accelerate Family storage systems description: - - "This module maps volumes to or unmaps them from the hosts on - IBM Spectrum Accelerate Family storage systems." + - This module maps volumes to or unmaps them from the hosts on IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none options: - vol: - description: - - Volume name. - required: true - type: str - state: - default: "present" - choices: [ "present", "absent" ] - description: - - When the state is present the volume is mapped. - When the state is absent, the volume is meant to be unmapped. - type: str + vol: + description: + - Volume name. + required: true + type: str + state: + default: "present" + choices: ["present", "absent"] + description: + - When the state is present the volume is mapped. When the state is absent, the volume is meant to be unmapped. + type: str - cluster: - description: - - Maps the volume to a cluster. - required: false - type: str - host: - description: - - Maps the volume to a host. - required: false - type: str - lun: - description: - - The LUN identifier. - required: false - type: str - override: - description: - - Overrides the existing volume mapping. - required: false - type: str + cluster: + description: + - Maps the volume to a cluster. + required: false + type: str + host: + description: + - Maps the volume to a host. + required: false + type: str + lun: + description: + - The LUN identifier. + required: false + type: str + override: + description: + - Overrides the existing volume mapping. + required: false + type: str extends_documentation_fragment: -- community.general.ibm_storage - + - community.general.ibm_storage + - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Map volume to host. community.general.ibm_sa_vol_map: vol: volume_name @@ -90,9 +90,9 @@ EXAMPLES = ''' password: secret endpoints: hostdev-system state: absent -''' -RETURN = ''' -''' +""" +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, diff --git a/plugins/modules/monitoring/icinga2_feature.py b/plugins/modules/icinga2_feature.py similarity index 79% rename from plugins/modules/monitoring/icinga2_feature.py rename to plugins/modules/icinga2_feature.py index b59c0e11e4..6899fe2e23 100644 --- a/plugins/modules/monitoring/icinga2_feature.py +++ b/plugins/modules/icinga2_feature.py @@ -1,43 +1,48 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016, Loic Blot # Copyright (c) 2018, Ansible Project # Sponsored by Infopro Digital. http://www.infopro-digital.com/ # Sponsored by E.T.A.I. http://www.etai.fr/ # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: icinga2_feature short_description: Manage Icinga2 feature description: - - This module can be used to enable or disable an Icinga2 feature. + - This module can be used to enable or disable an Icinga2 feature. author: "Loic Blot (@nerzhul)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - description: + name: + type: str + description: - This is the feature name to enable or disable. - required: True - state: - type: str - description: - - If set to C(present) and feature is disabled, then feature is enabled. - - If set to C(present) and feature is already enabled, then nothing is changed. - - If set to C(absent) and feature is enabled, then feature is disabled. - - If set to C(absent) and feature is already disabled, then nothing is changed. - choices: [ "present", "absent" ] - default: present -''' + required: true + state: + type: str + description: + - If set to V(present) and feature is disabled, then feature is enabled. + - If set to V(present) and feature is already enabled, then nothing is changed. + - If set to V(absent) and feature is enabled, then feature is disabled. + - If set to V(absent) and feature is already disabled, then nothing is changed. + choices: ["present", "absent"] + default: present +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Enable ido-pgsql feature community.general.icinga2_feature: name: ido-pgsql @@ -47,11 +52,11 @@ EXAMPLES = ''' community.general.icinga2_feature: name: api state: absent -''' +""" -RETURN = ''' +RETURN = r""" # -''' +""" import re from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/monitoring/icinga2_host.py b/plugins/modules/icinga2_host.py similarity index 75% rename from plugins/modules/monitoring/icinga2_host.py rename to plugins/modules/icinga2_host.py index b4c4cdbcfb..39a7b48a6d 100644 --- a/plugins/modules/monitoring/icinga2_host.py +++ b/plugins/modules/icinga2_host.py @@ -1,74 +1,74 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # This module is proudly sponsored by CGI (www.cgi.com) and # KPN (www.kpn.com). -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: icinga2_host short_description: Manage a host in Icinga2 description: - - "Add or remove a host to Icinga2 through the API." - - "See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/)" + - Add or remove a host to Icinga2 through the API. + - See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/). author: "Jurgen Brand (@t794104)" +attributes: + check_mode: + support: full + diff_mode: + support: none options: url: type: str description: - - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path + - HTTP, HTTPS, or FTP URL in the form V((http|https|ftp\)://[user[:pass]]@host.domain[:port]/path). use_proxy: description: - - If C(no), it will not use a proxy, even if one is defined in - an environment variable on the target hosts. + - If V(false), it does not use a proxy, even if one is defined in an environment variable on the target hosts. type: bool - default: 'yes' + default: true validate_certs: description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool - default: 'yes' + default: true url_username: type: str description: - The username for use in HTTP basic authentication. - - This parameter can be used without C(url_password) for sites that allow empty passwords. + - This parameter can be used without O(url_password) for sites that allow empty passwords. url_password: type: str description: - - The password for use in HTTP basic authentication. - - If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used. + - The password for use in HTTP basic authentication. + - If the O(url_username) parameter is not specified, the O(url_password) parameter is not used. force_basic_auth: description: - - httplib2, the library used by the uri module only sends authentication information when a webservice - responds to an initial request with a 401 status. Since some basic auth services do not properly - send a 401, logins will fail. This option forces the sending of the Basic authentication header - upon initial request. + - C(httplib2), the library used by Ansible's HTTP request code only sends authentication information when a webservice + responds to an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins + may fail. This option forces the sending of the Basic authentication header upon initial request. type: bool - default: 'no' + default: false client_cert: type: path description: - - PEM formatted certificate chain file to be used for SSL client - authentication. This file can also include the key as well, and if - the key is included, C(client_key) is not required. + - PEM formatted certificate chain file to be used for SSL client authentication. This file can also include the key + as well, and if the key is included, O(client_key) is not required. client_key: type: path description: - - PEM formatted file that contains your private key to be used for SSL - client authentication. If C(client_cert) contains both the certificate - and key, this option is not required. + - PEM formatted file that contains your private key to be used for SSL client authentication. If O(client_cert) contains + both the certificate and key, this option is not required. state: type: str description: - Apply feature state. - choices: [ "present", "absent" ] + choices: ["present", "absent"] default: present name: type: str @@ -94,21 +94,22 @@ options: type: str description: - The name used to display the host. - - If not specified, it defaults to the value of the I(name) parameter. + - If not specified, it defaults to the value of the O(name) parameter. ip: type: str description: - The IP address of the host. - required: true + - This is no longer required since community.general 8.0.0. variables: type: dict description: - Dictionary of variables. extends_documentation_fragment: - - url -''' + - ansible.builtin.url + - community.general.attributes +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add host to icinga community.general.icinga2_host: url: "https://icinga2.example.com" @@ -120,21 +121,20 @@ EXAMPLES = ''' variables: foo: "bar" delegate_to: 127.0.0.1 -''' +""" -RETURN = ''' +RETURN = r""" name: - description: The name used to create, modify or delete the host - type: str - returned: always + description: The name used to create, modify or delete the host. + type: str + returned: always data: - description: The data structure used for create, modify or delete of the host - type: dict - returned: always -''' + description: The data structure used for create, modify or delete of the host. + type: dict + returned: always +""" import json -import os from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url, url_argument_spec @@ -233,11 +233,11 @@ def main(): state=dict(default="present", choices=["absent", "present"]), name=dict(required=True, aliases=['host']), zone=dict(), - template=dict(default=None), + template=dict(), check_command=dict(default="hostalive"), - display_name=dict(default=None), - ip=dict(required=True), - variables=dict(type='dict', default=None), + display_name=dict(), + ip=dict(), + variables=dict(type='dict'), ) # Define the main module @@ -249,9 +249,9 @@ def main(): state = module.params["state"] name = module.params["name"] zone = module.params["zone"] - template = [name] + template = [] if module.params["template"]: - template.append(module.params["template"]) + template = [module.params["template"]] check_command = module.params["check_command"] ip = module.params["ip"] display_name = module.params["display_name"] @@ -266,20 +266,16 @@ def main(): module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e)) data = { + 'templates': template, 'attrs': { 'address': ip, 'display_name': display_name, 'check_command': check_command, 'zone': zone, - 'vars': { - 'made_by': "ansible", - }, - 'templates': template, + 'vars.made_by': "ansible" } } - - if variables: - data['attrs']['vars'].update(variables) + data['attrs'].update({'vars.' + key: value for key, value in variables.items()}) changed = False if icinga.exists(name): @@ -301,7 +297,7 @@ def main(): module.exit_json(changed=False, name=name, data=data) # Template attribute is not allowed in modification - del data['attrs']['templates'] + del data['templates'] ret = icinga.modify(name, data) diff --git a/plugins/modules/identity/ipa/ipa_pwpolicy.py b/plugins/modules/identity/ipa/ipa_pwpolicy.py deleted file mode 100644 index 0f9b141b4c..0000000000 --- a/plugins/modules/identity/ipa/ipa_pwpolicy.py +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ipa_pwpolicy -author: Adralioh (@adralioh) -short_description: Manage FreeIPA password policies -description: -- Add, modify, or delete a password policy using the IPA API. -version_added: 2.0.0 -options: - group: - description: - - Name of the group that the policy applies to. - - If omitted, the global policy is used. - aliases: ["name"] - type: str - state: - description: State to ensure. - default: "present" - choices: ["absent", "present"] - type: str - maxpwdlife: - description: Maximum password lifetime (in days). - type: str - minpwdlife: - description: Minimum password lifetime (in hours). - type: str - historylength: - description: - - Number of previous passwords that are remembered. - - Users cannot reuse remembered passwords. - type: str - minclasses: - description: Minimum number of character classes. - type: str - minlength: - description: Minimum password length. - type: str - priority: - description: - - Priority of the policy. - - High number means lower priority. - - Required when C(cn) is not the global policy. - type: str - maxfailcount: - description: Maximum number of consecutive failures before lockout. - type: str - failinterval: - description: Period (in seconds) after which the number of failed login attempts is reset. - type: str - lockouttime: - description: Period (in seconds) for which users are locked out. - type: str -extends_documentation_fragment: -- community.general.ipa.documentation -notes: -- Supports C(check_mode). -''' - -EXAMPLES = r''' -- name: Modify the global password policy - community.general.ipa_pwpolicy: - maxpwdlife: '90' - minpwdlife: '1' - historylength: '8' - minclasses: '3' - minlength: '16' - maxfailcount: '6' - failinterval: '60' - lockouttime: '600' - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure the password policy for the group admins is present - community.general.ipa_pwpolicy: - group: admins - state: present - maxpwdlife: '60' - minpwdlife: '24' - historylength: '16' - minclasses: '4' - priority: '10' - maxfailcount: '4' - failinterval: '600' - lockouttime: '1200' - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret - -- name: Ensure that the group sysops does not have a unique password policy - community.general.ipa_pwpolicy: - group: sysops - state: absent - ipa_host: ipa.example.com - ipa_user: admin - ipa_pass: topsecret -''' - -RETURN = r''' -pwpolicy: - description: Password policy as returned by IPA API. - returned: always - type: dict - sample: - cn: ['admins'] - cospriority: ['10'] - dn: 'cn=admins,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com' - krbmaxpwdlife: ['60'] - krbminpwdlife: ['24'] - krbpwdfailurecountinterval: ['600'] - krbpwdhistorylength: ['16'] - krbpwdlockoutduration: ['1200'] - krbpwdmaxfailure: ['4'] - krbpwdmindiffchars: ['4'] - objectclass: ['top', 'nscontainer', 'krbpwdpolicy'] -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils.common.text.converters import to_native - - -class PwPolicyIPAClient(IPAClient): - '''The global policy will be selected when `name` is `None`''' - def __init__(self, module, host, port, protocol): - super(PwPolicyIPAClient, self).__init__(module, host, port, protocol) - - def pwpolicy_find(self, name): - if name is None: - # Manually set the cn to the global policy because pwpolicy_find will return a random - # different policy if cn is `None` - name = 'global_policy' - return self._post_json(method='pwpolicy_find', name=None, item={'all': True, 'cn': name}) - - def pwpolicy_add(self, name, item): - return self._post_json(method='pwpolicy_add', name=name, item=item) - - def pwpolicy_mod(self, name, item): - return self._post_json(method='pwpolicy_mod', name=name, item=item) - - def pwpolicy_del(self, name): - return self._post_json(method='pwpolicy_del', name=name) - - -def get_pwpolicy_dict(maxpwdlife=None, minpwdlife=None, historylength=None, minclasses=None, - minlength=None, priority=None, maxfailcount=None, failinterval=None, - lockouttime=None): - pwpolicy = {} - if maxpwdlife is not None: - pwpolicy['krbmaxpwdlife'] = maxpwdlife - if minpwdlife is not None: - pwpolicy['krbminpwdlife'] = minpwdlife - if historylength is not None: - pwpolicy['krbpwdhistorylength'] = historylength - if minclasses is not None: - pwpolicy['krbpwdmindiffchars'] = minclasses - if minlength is not None: - pwpolicy['krbpwdminlength'] = minlength - if priority is not None: - pwpolicy['cospriority'] = priority - if maxfailcount is not None: - pwpolicy['krbpwdmaxfailure'] = maxfailcount - if failinterval is not None: - pwpolicy['krbpwdfailurecountinterval'] = failinterval - if lockouttime is not None: - pwpolicy['krbpwdlockoutduration'] = lockouttime - - return pwpolicy - - -def get_pwpolicy_diff(client, ipa_pwpolicy, module_pwpolicy): - return client.get_diff(ipa_data=ipa_pwpolicy, module_data=module_pwpolicy) - - -def ensure(module, client): - state = module.params['state'] - name = module.params['group'] - - module_pwpolicy = get_pwpolicy_dict(maxpwdlife=module.params.get('maxpwdlife'), - minpwdlife=module.params.get('minpwdlife'), - historylength=module.params.get('historylength'), - minclasses=module.params.get('minclasses'), - minlength=module.params.get('minlength'), - priority=module.params.get('priority'), - maxfailcount=module.params.get('maxfailcount'), - failinterval=module.params.get('failinterval'), - lockouttime=module.params.get('lockouttime')) - - ipa_pwpolicy = client.pwpolicy_find(name=name) - - changed = False - if state == 'present': - if not ipa_pwpolicy: - changed = True - if not module.check_mode: - ipa_pwpolicy = client.pwpolicy_add(name=name, item=module_pwpolicy) - else: - diff = get_pwpolicy_diff(client, ipa_pwpolicy, module_pwpolicy) - if len(diff) > 0: - changed = True - if not module.check_mode: - ipa_pwpolicy = client.pwpolicy_mod(name=name, item=module_pwpolicy) - else: - if ipa_pwpolicy: - changed = True - if not module.check_mode: - client.pwpolicy_del(name=name) - - return changed, ipa_pwpolicy - - -def main(): - argument_spec = ipa_argument_spec() - argument_spec.update(group=dict(type='str', aliases=['name']), - state=dict(type='str', default='present', choices=['present', 'absent']), - maxpwdlife=dict(type='str'), - minpwdlife=dict(type='str'), - historylength=dict(type='str'), - minclasses=dict(type='str'), - minlength=dict(type='str'), - priority=dict(type='str'), - maxfailcount=dict(type='str'), - failinterval=dict(type='str'), - lockouttime=dict(type='str')) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - client = PwPolicyIPAClient(module=module, - host=module.params['ipa_host'], - port=module.params['ipa_port'], - protocol=module.params['ipa_prot']) - - try: - client.login(username=module.params['ipa_user'], - password=module.params['ipa_pass']) - changed, pwpolicy = ensure(module, client) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - module.exit_json(changed=changed, pwpolicy=pwpolicy) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_client.py b/plugins/modules/identity/keycloak/keycloak_client.py deleted file mode 100644 index 88268b3068..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_client.py +++ /dev/null @@ -1,975 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017, Eike Frost -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_client - -short_description: Allows administration of Keycloak clients via Keycloak API - - -description: - - This module allows the administration of Keycloak clients via the Keycloak REST API. It - requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - Aliases are provided so camelCased versions can be used as well. - - - The Keycloak API does not always sanity check inputs e.g. you can set - SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. - If you do not specify a setting, usually a sensible default is chosen. - -options: - state: - description: - - State of the client - - On C(present), the client will be created (or updated if it exists already). - - On C(absent), the client will be removed if it exists - choices: ['present', 'absent'] - default: 'present' - type: str - - realm: - description: - - The realm to create the client in. - type: str - default: master - - client_id: - description: - - Client id of client to be worked on. This is usually an alphanumeric name chosen by - you. Either this or I(id) is required. If you specify both, I(id) takes precedence. - This is 'clientId' in the Keycloak REST API. - aliases: - - clientId - type: str - - id: - description: - - Id of client to be worked on. This is usually an UUID. Either this or I(client_id) - is required. If you specify both, this takes precedence. - type: str - - name: - description: - - Name of the client (this is not the same as I(client_id)). - type: str - - description: - description: - - Description of the client in Keycloak. - type: str - - root_url: - description: - - Root URL appended to relative URLs for this client. - This is 'rootUrl' in the Keycloak REST API. - aliases: - - rootUrl - type: str - - admin_url: - description: - - URL to the admin interface of the client. - This is 'adminUrl' in the Keycloak REST API. - aliases: - - adminUrl - type: str - - base_url: - description: - - Default URL to use when the auth server needs to redirect or link back to the client - This is 'baseUrl' in the Keycloak REST API. - aliases: - - baseUrl - type: str - - enabled: - description: - - Is this client enabled or not? - type: bool - - client_authenticator_type: - description: - - How do clients authenticate with the auth server? Either C(client-secret) or - C(client-jwt) can be chosen. When using C(client-secret), the module parameter - I(secret) can set it, while for C(client-jwt), you can use the keys C(use.jwks.url), - C(jwks.url), and C(jwt.credential.certificate) in the I(attributes) module parameter - to configure its behavior. - This is 'clientAuthenticatorType' in the Keycloak REST API. - choices: ['client-secret', 'client-jwt'] - aliases: - - clientAuthenticatorType - type: str - - secret: - description: - - When using I(client_authenticator_type) C(client-secret) (the default), you can - specify a secret here (otherwise one will be generated if it does not exit). If - changing this secret, the module will not register a change currently (but the - changed secret will be saved). - type: str - - registration_access_token: - description: - - The registration access token provides access for clients to the client registration - service. - This is 'registrationAccessToken' in the Keycloak REST API. - aliases: - - registrationAccessToken - type: str - - default_roles: - description: - - list of default roles for this client. If the client roles referenced do not exist - yet, they will be created. - This is 'defaultRoles' in the Keycloak REST API. - aliases: - - defaultRoles - type: list - elements: str - - redirect_uris: - description: - - Acceptable redirect URIs for this client. - This is 'redirectUris' in the Keycloak REST API. - aliases: - - redirectUris - type: list - elements: str - - web_origins: - description: - - List of allowed CORS origins. - This is 'webOrigins' in the Keycloak REST API. - aliases: - - webOrigins - type: list - elements: str - - not_before: - description: - - Revoke any tokens issued before this date for this client (this is a UNIX timestamp). - This is 'notBefore' in the Keycloak REST API. - type: int - aliases: - - notBefore - - bearer_only: - description: - - The access type of this client is bearer-only. - This is 'bearerOnly' in the Keycloak REST API. - aliases: - - bearerOnly - type: bool - - consent_required: - description: - - If enabled, users have to consent to client access. - This is 'consentRequired' in the Keycloak REST API. - aliases: - - consentRequired - type: bool - - standard_flow_enabled: - description: - - Enable standard flow for this client or not (OpenID connect). - This is 'standardFlowEnabled' in the Keycloak REST API. - aliases: - - standardFlowEnabled - type: bool - - implicit_flow_enabled: - description: - - Enable implicit flow for this client or not (OpenID connect). - This is 'implicitFlowEnabled' in the Keycloak REST API. - aliases: - - implicitFlowEnabled - type: bool - - direct_access_grants_enabled: - description: - - Are direct access grants enabled for this client or not (OpenID connect). - This is 'directAccessGrantsEnabled' in the Keycloak REST API. - aliases: - - directAccessGrantsEnabled - type: bool - - service_accounts_enabled: - description: - - Are service accounts enabled for this client or not (OpenID connect). - This is 'serviceAccountsEnabled' in the Keycloak REST API. - aliases: - - serviceAccountsEnabled - type: bool - - authorization_services_enabled: - description: - - Are authorization services enabled for this client or not (OpenID connect). - This is 'authorizationServicesEnabled' in the Keycloak REST API. - aliases: - - authorizationServicesEnabled - type: bool - - public_client: - description: - - Is the access type for this client public or not. - This is 'publicClient' in the Keycloak REST API. - aliases: - - publicClient - type: bool - - frontchannel_logout: - description: - - Is frontchannel logout enabled for this client or not. - This is 'frontchannelLogout' in the Keycloak REST API. - aliases: - - frontchannelLogout - type: bool - - protocol: - description: - - Type of client (either C(openid-connect) or C(saml). - type: str - choices: ['openid-connect', 'saml'] - - full_scope_allowed: - description: - - Is the "Full Scope Allowed" feature set for this client or not. - This is 'fullScopeAllowed' in the Keycloak REST API. - aliases: - - fullScopeAllowed - type: bool - - node_re_registration_timeout: - description: - - Cluster node re-registration timeout for this client. - This is 'nodeReRegistrationTimeout' in the Keycloak REST API. - type: int - aliases: - - nodeReRegistrationTimeout - - registered_nodes: - description: - - dict of registered cluster nodes (with C(nodename) as the key and last registration - time as the value). - This is 'registeredNodes' in the Keycloak REST API. - type: dict - aliases: - - registeredNodes - - client_template: - description: - - Client template to use for this client. If it does not exist this field will silently - be dropped. - This is 'clientTemplate' in the Keycloak REST API. - type: str - aliases: - - clientTemplate - - use_template_config: - description: - - Whether or not to use configuration from the I(client_template). - This is 'useTemplateConfig' in the Keycloak REST API. - aliases: - - useTemplateConfig - type: bool - - use_template_scope: - description: - - Whether or not to use scope configuration from the I(client_template). - This is 'useTemplateScope' in the Keycloak REST API. - aliases: - - useTemplateScope - type: bool - - use_template_mappers: - description: - - Whether or not to use mapper configuration from the I(client_template). - This is 'useTemplateMappers' in the Keycloak REST API. - aliases: - - useTemplateMappers - type: bool - - always_display_in_console: - description: - - Whether or not to display this client in account console, even if the - user does not have an active session. - aliases: - - alwaysDisplayInConsole - type: bool - version_added: 4.7.0 - - surrogate_auth_required: - description: - - Whether or not surrogate auth is required. - This is 'surrogateAuthRequired' in the Keycloak REST API. - aliases: - - surrogateAuthRequired - type: bool - - authorization_settings: - description: - - a data structure defining the authorization settings for this client. For reference, - please see the Keycloak API docs at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation). - This is 'authorizationSettings' in the Keycloak REST API. - type: dict - aliases: - - authorizationSettings - - authentication_flow_binding_overrides: - description: - - Override realm authentication flow bindings. - type: dict - aliases: - - authenticationFlowBindingOverrides - version_added: 3.4.0 - - default_client_scopes: - description: - - List of default client scopes. - aliases: - - defaultClientScopes - type: list - elements: str - version_added: 4.7.0 - - optional_client_scopes: - description: - - List of optional client scopes. - aliases: - - optionalClientScopes - type: list - elements: str - version_added: 4.7.0 - - protocol_mappers: - description: - - a list of dicts defining protocol mappers for this client. - This is 'protocolMappers' in the Keycloak REST API. - aliases: - - protocolMappers - type: list - elements: dict - suboptions: - consentRequired: - description: - - Specifies whether a user needs to provide consent to a client for this mapper to be active. - type: bool - - consentText: - description: - - The human-readable name of the consent the user is presented to accept. - type: str - - id: - description: - - Usually a UUID specifying the internal ID of this protocol mapper instance. - type: str - - name: - description: - - The name of this protocol mapper. - type: str - - protocol: - description: - - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper. - is active. - choices: ['openid-connect', 'saml'] - type: str - - protocolMapper: - description: - - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is - impossible to provide since this may be extended through SPIs by the user of Keycloak, - by default Keycloak as of 3.4 ships with at least - - C(docker-v2-allow-all-mapper) - - C(oidc-address-mapper) - - C(oidc-full-name-mapper) - - C(oidc-group-membership-mapper) - - C(oidc-hardcoded-claim-mapper) - - C(oidc-hardcoded-role-mapper) - - C(oidc-role-name-mapper) - - C(oidc-script-based-protocol-mapper) - - C(oidc-sha256-pairwise-sub-mapper) - - C(oidc-usermodel-attribute-mapper) - - C(oidc-usermodel-client-role-mapper) - - C(oidc-usermodel-property-mapper) - - C(oidc-usermodel-realm-role-mapper) - - C(oidc-usersessionmodel-note-mapper) - - C(saml-group-membership-mapper) - - C(saml-hardcode-attribute-mapper) - - C(saml-hardcode-role-mapper) - - C(saml-role-list-mapper) - - C(saml-role-name-mapper) - - C(saml-user-attribute-mapper) - - C(saml-user-property-mapper) - - C(saml-user-session-note-mapper) - - An exhaustive list of available mappers on your installation can be obtained on - the admin console by going to Server Info -> Providers and looking under - 'protocol-mapper'. - type: str - - config: - description: - - Dict specifying the configuration options for the protocol mapper; the - contents differ depending on the value of I(protocolMapper) and are not documented - other than by the source of the mappers and its parent class(es). An example is given - below. It is easiest to obtain valid config values by dumping an already-existing - protocol mapper configuration through check-mode in the I(existing) field. - type: dict - - attributes: - description: - - A dict of further attributes for this client. This can contain various configuration - settings; an example is given in the examples section. While an exhaustive list of - permissible options is not available; possible options as of Keycloak 3.4 are listed below. The Keycloak - API does not validate whether a given option is appropriate for the protocol used; if specified - anyway, Keycloak will simply not use it. - type: dict - suboptions: - saml.authnstatement: - description: - - For SAML clients, boolean specifying whether or not a statement containing method and timestamp - should be included in the login response. - - saml.client.signature: - description: - - For SAML clients, boolean specifying whether a client signature is required and validated. - - saml.encrypt: - description: - - Boolean specifying whether SAML assertions should be encrypted with the client's public key. - - saml.force.post.binding: - description: - - For SAML clients, boolean specifying whether always to use POST binding for responses. - - saml.onetimeuse.condition: - description: - - For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses. - - saml.server.signature: - description: - - Boolean specifying whether SAML documents should be signed by the realm. - - saml.server.signature.keyinfo.ext: - description: - - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion - of the signing key id in the SAML Extensions element. - - saml.signature.algorithm: - description: - - Signature algorithm used to sign SAML documents. One of C(RSA_SHA256), C(RSA_SHA1), C(RSA_SHA512), or C(DSA_SHA1). - - saml.signing.certificate: - description: - - SAML signing key certificate, base64-encoded. - - saml.signing.private.key: - description: - - SAML signing key private key, base64-encoded. - - saml_assertion_consumer_url_post: - description: - - SAML POST Binding URL for the client's assertion consumer service (login responses). - - saml_assertion_consumer_url_redirect: - description: - - SAML Redirect Binding URL for the client's assertion consumer service (login responses). - - - saml_force_name_id_format: - description: - - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead. - - saml_name_id_format: - description: - - For SAML clients, the NameID format to use (one of C(username), C(email), C(transient), or C(persistent)) - - saml_signature_canonicalization_method: - description: - - SAML signature canonicalization method. This is one of four values, namely - C(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE, - C(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS, - C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and - C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS. - - saml_single_logout_service_url_post: - description: - - SAML POST binding url for the client's single logout service. - - saml_single_logout_service_url_redirect: - description: - - SAML redirect binding url for the client's single logout service. - - user.info.response.signature.alg: - description: - - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of C(RS256) or C(unsigned). - - request.object.signature.alg: - description: - - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending - OIDC request object. One of C(any), C(none), C(RS256). - - use.jwks.url: - description: - - For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client - public keys. - - jwks.url: - description: - - For OpenID-Connect clients, URL where client keys in JWK are stored. - - jwt.credential.certificate: - description: - - For OpenID-Connect clients, client certificate for validating JWT issued by - client and signed by its key, base64-encoded. - -extends_documentation_fragment: -- community.general.keycloak - -author: - - Eike Frost (@eikef) -''' - -EXAMPLES = ''' -- name: Create or update Keycloak client (minimal example), authentication with credentials - community.general.keycloak_client: - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - client_id: test - state: present - delegate_to: localhost - - -- name: Create or update Keycloak client (minimal example), authentication with token - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - token: TOKEN - client_id: test - state: present - delegate_to: localhost - - -- name: Delete a Keycloak client - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - client_id: test - state: absent - delegate_to: localhost - - -- name: Create or update a Keycloak client (with all the bells and whistles) - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - state: present - realm: master - client_id: test - id: d8b127a3-31f6-44c8-a7e4-4ab9a3e78d95 - name: this_is_a_test - description: Description of this wonderful client - root_url: https://www.example.com/ - admin_url: https://www.example.com/admin_url - base_url: basepath - enabled: True - client_authenticator_type: client-secret - secret: REALLYWELLKEPTSECRET - redirect_uris: - - https://www.example.com/* - - http://localhost:8888/ - web_origins: - - https://www.example.com/* - not_before: 1507825725 - bearer_only: False - consent_required: False - standard_flow_enabled: True - implicit_flow_enabled: False - direct_access_grants_enabled: False - service_accounts_enabled: False - authorization_services_enabled: False - public_client: False - frontchannel_logout: False - protocol: openid-connect - full_scope_allowed: false - node_re_registration_timeout: -1 - client_template: test - use_template_config: False - use_template_scope: false - use_template_mappers: no - always_display_in_console: true - registered_nodes: - node01.example.com: 1507828202 - registration_access_token: eyJWT_TOKEN - surrogate_auth_required: false - default_roles: - - test01 - - test02 - authentication_flow_binding_overrides: - browser: 4c90336b-bf1d-4b87-916d-3677ba4e5fbb - protocol_mappers: - - config: - access.token.claim: True - claim.name: "family_name" - id.token.claim: True - jsonType.label: String - user.attribute: lastName - userinfo.token.claim: True - consentRequired: True - consentText: "${familyName}" - name: family name - protocol: openid-connect - protocolMapper: oidc-usermodel-property-mapper - - config: - attribute.name: Role - attribute.nameformat: Basic - single: false - consentRequired: false - name: role list - protocol: saml - protocolMapper: saml-role-list-mapper - attributes: - saml.authnstatement: True - saml.client.signature: True - saml.force.post.binding: True - saml.server.signature: True - saml.signature.algorithm: RSA_SHA256 - saml.signing.certificate: CERTIFICATEHERE - saml.signing.private.key: PRIVATEKEYHERE - saml_force_name_id_format: False - saml_name_id_format: username - saml_signature_canonicalization_method: "http://www.w3.org/2001/10/xml-exc-c14n#" - user.info.response.signature.alg: RS256 - request.object.signature.alg: RS256 - use.jwks.url: true - jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT - jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH - delegate_to: localhost -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Client testclient has been updated" - -proposed: - description: Representation of proposed client. - returned: always - type: dict - sample: { - clientId: "test" - } - -existing: - description: Representation of existing client (sample is truncated). - returned: always - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } - -end_state: - description: Representation of client after module execution (sample is truncated). - returned: on success - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule - - -def normalise_cr(clientrep, remove_ids=False): - """ Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the - the change detection is more effective. - - :param clientrep: the clientrep dict to be sanitized - :param remove_ids: If set to true, then the unique ID's of objects is removed to make the diff and checks for changed - not alert when the ID's of objects are not usually known, (e.g. for protocol_mappers) - :return: normalised clientrep dict - """ - # Avoid the dict passed in to be modified - clientrep = clientrep.copy() - - if 'attributes' in clientrep: - clientrep['attributes'] = list(sorted(clientrep['attributes'])) - - if 'redirectUris' in clientrep: - clientrep['redirectUris'] = list(sorted(clientrep['redirectUris'])) - - if 'protocolMappers' in clientrep: - clientrep['protocolMappers'] = sorted(clientrep['protocolMappers'], key=lambda x: (x.get('name'), x.get('protocol'), x.get('protocolMapper'))) - for mapper in clientrep['protocolMappers']: - if remove_ids: - mapper.pop('id', None) - - # Set to a default value. - mapper['consentRequired'] = mapper.get('consentRequired', False) - - return clientrep - - -def sanitize_cr(clientrep): - """ Removes probably sensitive details from a client representation. - - :param clientrep: the clientrep dict to be sanitized - :return: sanitized clientrep dict - """ - result = clientrep.copy() - if 'secret' in result: - result['secret'] = 'no_log' - if 'attributes' in result: - if 'saml.signing.private.key' in result['attributes']: - result['attributes']['saml.signing.private.key'] = 'no_log' - return normalise_cr(result) - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - protmapper_spec = dict( - consentRequired=dict(type='bool'), - consentText=dict(type='str'), - id=dict(type='str'), - name=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml']), - protocolMapper=dict(type='str'), - config=dict(type='dict'), - ) - - meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(type='str', default='master'), - - id=dict(type='str'), - client_id=dict(type='str', aliases=['clientId']), - name=dict(type='str'), - description=dict(type='str'), - root_url=dict(type='str', aliases=['rootUrl']), - admin_url=dict(type='str', aliases=['adminUrl']), - base_url=dict(type='str', aliases=['baseUrl']), - surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']), - enabled=dict(type='bool'), - client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']), - secret=dict(type='str', no_log=True), - registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True), - default_roles=dict(type='list', elements='str', aliases=['defaultRoles']), - redirect_uris=dict(type='list', elements='str', aliases=['redirectUris']), - web_origins=dict(type='list', elements='str', aliases=['webOrigins']), - not_before=dict(type='int', aliases=['notBefore']), - bearer_only=dict(type='bool', aliases=['bearerOnly']), - consent_required=dict(type='bool', aliases=['consentRequired']), - standard_flow_enabled=dict(type='bool', aliases=['standardFlowEnabled']), - implicit_flow_enabled=dict(type='bool', aliases=['implicitFlowEnabled']), - direct_access_grants_enabled=dict(type='bool', aliases=['directAccessGrantsEnabled']), - service_accounts_enabled=dict(type='bool', aliases=['serviceAccountsEnabled']), - authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']), - public_client=dict(type='bool', aliases=['publicClient']), - frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']), - protocol=dict(type='str', choices=['openid-connect', 'saml']), - attributes=dict(type='dict'), - full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']), - node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']), - registered_nodes=dict(type='dict', aliases=['registeredNodes']), - client_template=dict(type='str', aliases=['clientTemplate']), - use_template_config=dict(type='bool', aliases=['useTemplateConfig']), - use_template_scope=dict(type='bool', aliases=['useTemplateScope']), - use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']), - always_display_in_console=dict(type='bool', aliases=['alwaysDisplayInConsole']), - authentication_flow_binding_overrides=dict(type='dict', aliases=['authenticationFlowBindingOverrides']), - protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), - authorization_settings=dict(type='dict', aliases=['authorizationSettings']), - default_client_scopes=dict(type='list', elements='str', aliases=['defaultClientScopes']), - optional_client_scopes=dict(type='list', elements='str', aliases=['optionalClientScopes']), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['client_id', 'id'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - cid = module.params.get('id') - state = module.params.get('state') - - # Filter and map the parameters names that apply to the client - client_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and - module.params.get(x) is not None] - - # See if it already exists in Keycloak - if cid is None: - before_client = kc.get_client_by_clientid(module.params.get('client_id'), realm=realm) - if before_client is not None: - cid = before_client['id'] - else: - before_client = kc.get_client_by_id(cid, realm=realm) - - if before_client is None: - before_client = {} - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for client_param in client_params: - new_param_value = module.params.get(client_param) - - # some lists in the Keycloak API are sorted, some are not. - if isinstance(new_param_value, list): - if client_param in ['attributes']: - try: - new_param_value = sorted(new_param_value) - except TypeError: - pass - # Unfortunately, the ansible argument spec checker introduces variables with null values when - # they are not specified - if client_param == 'protocol_mappers': - new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] - - changeset[camel(client_param)] = new_param_value - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_client = before_client.copy() - desired_client.update(changeset) - - result['proposed'] = sanitize_cr(changeset) - result['existing'] = sanitize_cr(before_client) - - # Cater for when it doesn't exist (an empty dict) - if not before_client: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Client does not exist; doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if 'clientId' not in desired_client: - module.fail_json(msg='client_id needs to be specified when creating a new client') - - if module._diff: - result['diff'] = dict(before='', after=sanitize_cr(desired_client)) - - if module.check_mode: - module.exit_json(**result) - - # create it - kc.create_client(desired_client, realm=realm) - after_client = kc.get_client_by_clientid(desired_client['clientId'], realm=realm) - - result['end_state'] = sanitize_cr(after_client) - - result['msg'] = 'Client %s has been created.' % desired_client['clientId'] - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - result['changed'] = True - - if module.check_mode: - # We can only compare the current client with the proposed updates we have - before_norm = normalise_cr(before_client, remove_ids=True) - desired_norm = normalise_cr(desired_client, remove_ids=True) - if module._diff: - result['diff'] = dict(before=sanitize_cr(before_norm), - after=sanitize_cr(desired_norm)) - result['changed'] = (before_norm != desired_norm) - - module.exit_json(**result) - - # do the update - kc.update_client(cid, desired_client, realm=realm) - - after_client = kc.get_client_by_id(cid, realm=realm) - if before_client == after_client: - result['changed'] = False - if module._diff: - result['diff'] = dict(before=sanitize_cr(before_client), - after=sanitize_cr(after_client)) - - result['end_state'] = sanitize_cr(after_client) - - result['msg'] = 'Client %s has been updated.' % desired_client['clientId'] - module.exit_json(**result) - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize_cr(before_client), after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_client(cid, realm=realm) - result['proposed'] = {} - - result['end_state'] = {} - - result['msg'] = 'Client %s has been deleted.' % before_client['clientId'] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_client_rolemapping.py b/plugins/modules/identity/keycloak/keycloak_client_rolemapping.py deleted file mode 100644 index 2d89753143..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_client_rolemapping.py +++ /dev/null @@ -1,350 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_client_rolemapping - -short_description: Allows administration of Keycloak client_rolemapping with the Keycloak API - -version_added: 3.5.0 - -description: - - This module allows you to add, remove or modify Keycloak client_rolemapping with the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will - be returned that way by this module. You may pass single values for attributes when calling the module, - and this will be translated into a list suitable for the API. - - - When updating a client_rolemapping, where possible provide the role ID to the module. This removes a lookup - to the API to translate the name into the role ID. - - -options: - state: - description: - - State of the client_rolemapping. - - On C(present), the client_rolemapping will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the client_rolemapping will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - realm: - type: str - description: - - They Keycloak realm under which this role_representation resides. - default: 'master' - - group_name: - type: str - description: - - Name of the group to be mapped. - - This parameter is required (can be replaced by gid for less API call). - - gid: - type: str - description: - - Id of the group to be mapped. - - This parameter is not required for updating or deleting the rolemapping but - providing it will reduce the number of API calls required. - - client_id: - type: str - description: - - Name of the client to be mapped (different than I(cid)). - - This parameter is required (can be replaced by cid for less API call). - - cid: - type: str - description: - - Id of the client to be mapped. - - This parameter is not required for updating or deleting the rolemapping but - providing it will reduce the number of API calls required. - - roles: - description: - - Roles to be mapped to the group. - type: list - elements: dict - suboptions: - name: - type: str - description: - - Name of the role_representation. - - This parameter is required only when creating or updating the role_representation. - id: - type: str - description: - - The unique identifier for this role_representation. - - This parameter is not required for updating or deleting a role_representation but - providing it will reduce the number of API calls required. - -extends_documentation_fragment: -- community.general.keycloak - - -author: - - Gaëtan Daubresse (@Gaetan2907) -''' - -EXAMPLES = ''' -- name: Map a client role to a group, authentication with credentials - community.general.keycloak_client_rolemapping: - realm: MyCustomRealm - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - state: present - client_id: client1 - group_name: group1 - roles: - - name: role_name1 - id: role_id1 - - name: role_name2 - id: role_id2 - delegate_to: localhost - -- name: Map a client role to a group, authentication with token - community.general.keycloak_client_rolemapping: - realm: MyCustomRealm - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - token: TOKEN - state: present - client_id: client1 - group_name: group1 - roles: - - name: role_name1 - id: role_id1 - - name: role_name2 - id: role_id2 - delegate_to: localhost - -- name: Unmap client role from a group - community.general.keycloak_client_rolemapping: - realm: MyCustomRealm - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - state: absent - client_id: client1 - group_name: group1 - roles: - - name: role_name1 - id: role_id1 - - name: role_name2 - id: role_id2 - delegate_to: localhost - -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Role role1 assigned to group group1." - -proposed: - description: Representation of proposed client role mapping. - returned: always - type: dict - sample: { - clientId: "test" - } - -existing: - description: - - Representation of existing client role mapping. - - The sample is truncated. - returned: always - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } - -end_state: - description: - - Representation of client role mapping after module execution. - - The sample is truncated. - returned: on success - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError, is_struct_included -from ansible.module_utils.basic import AnsibleModule - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - roles_spec = dict( - name=dict(type='str'), - id=dict(type='str'), - ) - - meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - realm=dict(default='master'), - gid=dict(type='str'), - group_name=dict(type='str'), - cid=dict(type='str'), - client_id=dict(type='str'), - roles=dict(type='list', elements='dict', options=roles_spec), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - cid = module.params.get('cid') - client_id = module.params.get('client_id') - gid = module.params.get('gid') - group_name = module.params.get('group_name') - roles = module.params.get('roles') - - # Check the parameters - if cid is None and client_id is None: - module.fail_json(msg='Either the `client_id` or `cid` has to be specified.') - if gid is None and group_name is None: - module.fail_json(msg='Either the `group_name` or `gid` has to be specified.') - - # Get the potential missing parameters - if gid is None: - group_rep = kc.get_group_by_name(group_name, realm=realm) - if group_rep is not None: - gid = group_rep['id'] - else: - module.fail_json(msg='Could not fetch group %s:' % group_name) - if cid is None: - cid = kc.get_client_id(client_id, realm=realm) - if cid is None: - module.fail_json(msg='Could not fetch client %s:' % client_id) - if roles is None: - module.exit_json(msg="Nothing to do (no roles specified).") - else: - for role_index, role in enumerate(roles, start=0): - if role['name'] is None and role['id'] is None: - module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') - # Fetch missing role_id - if role['id'] is None: - role_id = kc.get_client_role_by_name(gid, cid, role['name'], realm=realm) - if role_id is not None: - role['id'] = role_id - else: - module.fail_json(msg='Could not fetch role %s:' % (role['name'])) - # Fetch missing role_name - else: - role['name'] = kc.get_client_rolemapping_by_id(gid, cid, role['id'], realm=realm)['name'] - if role['name'] is None: - module.fail_json(msg='Could not fetch role %s' % (role['id'])) - - # Get effective client-level role mappings - available_roles_before = kc.get_client_available_rolemappings(gid, cid, realm=realm) - assigned_roles_before = kc.get_client_composite_rolemappings(gid, cid, realm=realm) - - result['existing'] = assigned_roles_before - result['proposed'] = roles - - update_roles = [] - for role_index, role in enumerate(roles, start=0): - # Fetch roles to assign if state present - if state == 'present': - for available_role in available_roles_before: - if role['name'] == available_role['name']: - update_roles.append({ - 'id': role['id'], - 'name': role['name'], - }) - # Fetch roles to remove if state absent - else: - for assigned_role in assigned_roles_before: - if role['name'] == assigned_role['name']: - update_roles.append({ - 'id': role['id'], - 'name': role['name'], - }) - - if len(update_roles): - if state == 'present': - # Assign roles - result['changed'] = True - if module._diff: - result['diff'] = dict(before=assigned_roles_before, after=update_roles) - if module.check_mode: - module.exit_json(**result) - kc.add_group_rolemapping(gid, cid, update_roles, realm=realm) - result['msg'] = 'Roles %s assigned to group %s.' % (update_roles, group_name) - assigned_roles_after = kc.get_client_composite_rolemappings(gid, cid, realm=realm) - result['end_state'] = assigned_roles_after - module.exit_json(**result) - else: - # Remove mapping of role - result['changed'] = True - if module._diff: - result['diff'] = dict(before=assigned_roles_before, after=update_roles) - if module.check_mode: - module.exit_json(**result) - kc.delete_group_rolemapping(gid, cid, update_roles, realm=realm) - result['msg'] = 'Roles %s removed from group %s.' % (update_roles, group_name) - assigned_roles_after = kc.get_client_composite_rolemappings(gid, cid, realm=realm) - result['end_state'] = assigned_roles_after - module.exit_json(**result) - # Do nothing - else: - result['changed'] = False - result['msg'] = 'Nothing to do, roles %s are correctly mapped with group %s.' % (roles, group_name) - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_identity_provider.py b/plugins/modules/identity/keycloak/keycloak_identity_provider.py deleted file mode 100644 index a4adddd951..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_identity_provider.py +++ /dev/null @@ -1,646 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_identity_provider - -short_description: Allows administration of Keycloak identity providers via Keycloak API - -version_added: 3.6.0 - -description: - - This module allows you to add, remove or modify Keycloak identity providers via the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html). - - -options: - state: - description: - - State of the identity provider. - - On C(present), the identity provider will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the identity provider will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - realm: - description: - - The Keycloak realm under which this identity provider resides. - default: 'master' - type: str - - alias: - description: - - The alias uniquely identifies an identity provider and it is also used to build the redirect URI. - required: true - type: str - - display_name: - description: - - Friendly name for identity provider. - aliases: - - displayName - type: str - - enabled: - description: - - Enable/disable this identity provider. - type: bool - - store_token: - description: - - Enable/disable whether tokens must be stored after authenticating users. - aliases: - - storeToken - type: bool - - add_read_token_role_on_create: - description: - - Enable/disable whether new users can read any stored tokens. This assigns the C(broker.read-token) role. - aliases: - - addReadTokenRoleOnCreate - type: bool - - trust_email: - description: - - If enabled, email provided by this provider is not verified even if verification is enabled for the realm. - aliases: - - trustEmail - type: bool - - link_only: - description: - - If true, users cannot log in through this provider. They can only link to this provider. - This is useful if you don't want to allow login from the provider, but want to integrate with a provider. - aliases: - - linkOnly - type: bool - - first_broker_login_flow_alias: - description: - - Alias of authentication flow, which is triggered after first login with this identity provider. - aliases: - - firstBrokerLoginFlowAlias - type: str - - post_broker_login_flow_alias: - description: - - Alias of authentication flow, which is triggered after each login with this identity provider. - aliases: - - postBrokerLoginFlowAlias - type: str - - authenticate_by_default: - description: - - Specifies if this identity provider should be used by default for authentication even before displaying login screen. - aliases: - - authenticateByDefault - type: bool - - provider_id: - description: - - Protocol used by this provider (supported values are C(oidc) or C(saml)). - aliases: - - providerId - type: str - - config: - description: - - Dict specifying the configuration options for the provider; the contents differ depending on the value of I(providerId). - Examples are given below for C(oidc) and C(saml). It is easiest to obtain valid config values by dumping an already-existing - identity provider configuration through check-mode in the I(existing) field. - type: dict - suboptions: - hide_on_login_page: - description: - - If hidden, login with this provider is possible only if requested explicitly, for example using the C(kc_idp_hint) parameter. - aliases: - - hideOnLoginPage - type: bool - - gui_order: - description: - - Number defining order of the provider in GUI (for example, on Login page). - aliases: - - guiOrder - type: int - - sync_mode: - description: - - Default sync mode for all mappers. The sync mode determines when user data will be synced using the mappers. - aliases: - - syncMode - type: str - - issuer: - description: - - The issuer identifier for the issuer of the response. If not provided, no validation will be performed. - type: str - - authorizationUrl: - description: - - The Authorization URL. - type: str - - tokenUrl: - description: - - The Token URL. - type: str - - logoutUrl: - description: - - End session endpoint to use to logout user from external IDP. - type: str - - userInfoUrl: - description: - - The User Info URL. - type: str - - clientAuthMethod: - description: - - The client authentication method. - type: str - - clientId: - description: - - The client or client identifier registered within the identity provider. - type: str - - clientSecret: - description: - - The client or client secret registered within the identity provider. - type: str - - defaultScope: - description: - - The scopes to be sent when asking for authorization. - type: str - - validateSignature: - description: - - Enable/disable signature validation of external IDP signatures. - type: bool - - useJwksUrl: - description: - - If the switch is on, identity provider public keys will be downloaded from given JWKS URL. - type: bool - - jwksUrl: - description: - - URL where identity provider keys in JWK format are stored. See JWK specification for more details. - type: str - - entityId: - description: - - The Entity ID that will be used to uniquely identify this SAML Service Provider. - type: str - - singleSignOnServiceUrl: - description: - - The URL that must be used to send authentication requests (SAML AuthnRequest). - type: str - - singleLogoutServiceUrl: - description: - - The URL that must be used to send logout requests. - type: str - - backchannelSupported: - description: - - Does the external IDP support backchannel logout? - type: str - - nameIDPolicyFormat: - description: - - Specifies the URI reference corresponding to a name identifier format. - type: str - - principalType: - description: - - Way to identify and track external users from the assertion. - type: str - - mappers: - description: - - A list of dicts defining mappers associated with this Identity Provider. - type: list - elements: dict - suboptions: - id: - description: - - Unique ID of this mapper. - type: str - - name: - description: - - Name of the mapper. - type: str - - identityProviderAlias: - description: - - Alias of the identity provider for this mapper. - type: str - - identityProviderMapper: - description: - - Type of mapper. - type: str - - config: - description: - - Dict specifying the configuration options for the mapper; the contents differ depending on the value of I(identityProviderMapper). - type: dict - -extends_documentation_fragment: -- community.general.keycloak - -author: - - Laurent Paumier (@laurpaum) -''' - -EXAMPLES = ''' -- name: Create OIDC identity provider, authentication with credentials - community.general.keycloak_identity_provider: - state: present - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: admin - auth_password: admin - realm: myrealm - alias: oidc-idp - display_name: OpenID Connect IdP - enabled: true - provider_id: oidc - config: - issuer: https://idp.example.com - authorizationUrl: https://idp.example.com/auth - tokenUrl: https://idp.example.com/token - userInfoUrl: https://idp.example.com/userinfo - clientAuthMethod: client_secret_post - clientId: my-client - clientSecret: secret - syncMode: FORCE - mappers: - - name: first_name - identityProviderMapper: oidc-user-attribute-idp-mapper - config: - claim: first_name - user.attribute: first_name - syncMode: INHERIT - - name: last_name - identityProviderMapper: oidc-user-attribute-idp-mapper - config: - claim: last_name - user.attribute: last_name - syncMode: INHERIT - -- name: Create SAML identity provider, authentication with credentials - community.general.keycloak_identity_provider: - state: present - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: admin - auth_password: admin - realm: myrealm - alias: saml-idp - display_name: SAML IdP - enabled: true - provider_id: saml - config: - entityId: https://auth.example.com/auth/realms/myrealm - singleSignOnServiceUrl: https://idp.example.com/login - wantAuthnRequestsSigned: true - wantAssertionsSigned: true - mappers: - - name: roles - identityProviderMapper: saml-user-attribute-idp-mapper - config: - user.attribute: roles - attribute.friendly.name: User Roles - attribute.name: roles - syncMode: INHERIT -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Identity provider my-idp has been created" - -proposed: - description: Representation of proposed identity provider. - returned: always - type: dict - sample: { - "config": { - "authorizationUrl": "https://idp.example.com/auth", - "clientAuthMethod": "client_secret_post", - "clientId": "my-client", - "clientSecret": "secret", - "issuer": "https://idp.example.com", - "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" - }, - "displayName": "OpenID Connect IdP", - "providerId": "oidc" - } - -existing: - description: Representation of existing identity provider. - returned: always - type: dict - sample: { - "addReadTokenRoleOnCreate": false, - "alias": "my-idp", - "authenticateByDefault": false, - "config": { - "authorizationUrl": "https://old.example.com/auth", - "clientAuthMethod": "client_secret_post", - "clientId": "my-client", - "clientSecret": "**********", - "issuer": "https://old.example.com", - "syncMode": "FORCE", - "tokenUrl": "https://old.example.com/token", - "userInfoUrl": "https://old.example.com/userinfo" - }, - "displayName": "OpenID Connect IdP", - "enabled": true, - "firstBrokerLoginFlowAlias": "first broker login", - "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", - "linkOnly": false, - "providerId": "oidc", - "storeToken": false, - "trustEmail": false, - } - -end_state: - description: Representation of identity provider after module execution. - returned: on success - type: dict - sample: { - "addReadTokenRoleOnCreate": false, - "alias": "my-idp", - "authenticateByDefault": false, - "config": { - "authorizationUrl": "https://idp.example.com/auth", - "clientAuthMethod": "client_secret_post", - "clientId": "my-client", - "clientSecret": "**********", - "issuer": "https://idp.example.com", - "tokenUrl": "https://idp.example.com/token", - "userInfoUrl": "https://idp.example.com/userinfo" - }, - "displayName": "OpenID Connect IdP", - "enabled": true, - "firstBrokerLoginFlowAlias": "first broker login", - "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", - "linkOnly": false, - "providerId": "oidc", - "storeToken": false, - "trustEmail": false, - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule -from copy import deepcopy - - -def sanitize(idp): - idpcopy = deepcopy(idp) - if 'config' in idpcopy: - if 'clientSecret' in idpcopy['config']: - idpcopy['clientSecret'] = '**********' - return idpcopy - - -def get_identity_provider_with_mappers(kc, alias, realm): - idp = kc.get_identity_provider(alias, realm) - if idp is not None: - idp['mappers'] = sorted(kc.get_identity_provider_mappers(alias, realm), key=lambda x: x.get('name')) - if idp is None: - idp = {} - return idp - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - mapper_spec = dict( - id=dict(type='str'), - name=dict(type='str'), - identityProviderAlias=dict(type='str'), - identityProviderMapper=dict(type='str'), - config=dict(type='dict'), - ) - - meta_args = dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - realm=dict(type='str', default='master'), - alias=dict(type='str', required=True), - add_read_token_role_on_create=dict(type='bool', aliases=['addReadTokenRoleOnCreate']), - authenticate_by_default=dict(type='bool', aliases=['authenticateByDefault']), - config=dict(type='dict'), - display_name=dict(type='str', aliases=['displayName']), - enabled=dict(type='bool'), - first_broker_login_flow_alias=dict(type='str', aliases=['firstBrokerLoginFlowAlias']), - link_only=dict(type='bool', aliases=['linkOnly']), - post_broker_login_flow_alias=dict(type='str', aliases=['postBrokerLoginFlowAlias']), - provider_id=dict(type='str', aliases=['providerId']), - store_token=dict(type='bool', aliases=['storeToken']), - trust_email=dict(type='bool', aliases=['trustEmail']), - mappers=dict(type='list', elements='dict', options=mapper_spec), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - alias = module.params.get('alias') - state = module.params.get('state') - - # Filter and map the parameters names that apply to the identity provider. - idp_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and - module.params.get(x) is not None] - - # See if it already exists in Keycloak - before_idp = get_identity_provider_with_mappers(kc, alias, realm) - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for param in idp_params: - new_param_value = module.params.get(param) - old_value = before_idp[camel(param)] if camel(param) in before_idp else None - if new_param_value != old_value: - changeset[camel(param)] = new_param_value - - # special handling of mappers list to allow change detection - if module.params.get('mappers') is not None: - for change in module.params['mappers']: - change = dict((k, v) for k, v in change.items() if change[k] is not None) - if change.get('id') is None and change.get('name') is None: - module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') - if before_idp == dict(): - old_mapper = dict() - elif change.get('id') is not None: - old_mapper = kc.get_identity_provider_mapper(change['id'], alias, realm) - if old_mapper is None: - old_mapper = dict() - else: - found = [x for x in kc.get_identity_provider_mappers(alias, realm) if x['name'] == change['name']] - if len(found) == 1: - old_mapper = found[0] - else: - old_mapper = dict() - new_mapper = old_mapper.copy() - new_mapper.update(change) - if new_mapper != old_mapper: - if changeset.get('mappers') is None: - changeset['mappers'] = list() - changeset['mappers'].append(new_mapper) - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_idp = before_idp.copy() - desired_idp.update(changeset) - - result['proposed'] = sanitize(changeset) - result['existing'] = sanitize(before_idp) - - # Cater for when it doesn't exist (an empty dict) - if not before_idp: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Identity provider does not exist; doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if module._diff: - result['diff'] = dict(before='', after=sanitize(desired_idp)) - - if module.check_mode: - module.exit_json(**result) - - # create it - desired_idp = desired_idp.copy() - mappers = desired_idp.pop('mappers', []) - kc.create_identity_provider(desired_idp, realm) - for mapper in mappers: - if mapper.get('identityProviderAlias') is None: - mapper['identityProviderAlias'] = alias - kc.create_identity_provider_mapper(mapper, alias, realm) - after_idp = get_identity_provider_with_mappers(kc, alias, realm) - - result['end_state'] = sanitize(after_idp) - - result['msg'] = 'Identity provider {alias} has been created'.format(alias=alias) - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - - # no changes - if desired_idp == before_idp: - result['changed'] = False - result['end_state'] = sanitize(desired_idp) - result['msg'] = "No changes required to identity provider {alias}.".format(alias=alias) - module.exit_json(**result) - - # doing an update - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize(before_idp), after=sanitize(desired_idp)) - - if module.check_mode: - module.exit_json(**result) - - # do the update - desired_idp = desired_idp.copy() - updated_mappers = desired_idp.pop('mappers', []) - kc.update_identity_provider(desired_idp, realm) - for mapper in updated_mappers: - if mapper.get('id') is not None: - kc.update_identity_provider_mapper(mapper, alias, realm) - else: - if mapper.get('identityProviderAlias') is None: - mapper['identityProviderAlias'] = alias - kc.create_identity_provider_mapper(mapper, alias, realm) - for mapper in [x for x in before_idp['mappers'] - if [y for y in updated_mappers if y["name"] == x['name']] == []]: - kc.delete_identity_provider_mapper(mapper['id'], alias, realm) - - after_idp = get_identity_provider_with_mappers(kc, alias, realm) - - result['end_state'] = sanitize(after_idp) - - result['msg'] = "Identity provider {alias} has been updated".format(alias=alias) - module.exit_json(**result) - - elif state == 'absent': - # Process a deletion - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize(before_idp), after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_identity_provider(alias, realm) - - result['end_state'] = {} - - result['msg'] = "Identity provider {alias} has been deleted".format(alias=alias) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_realm.py b/plugins/modules/identity/keycloak/keycloak_realm.py deleted file mode 100644 index fd9f17ebf8..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_realm.py +++ /dev/null @@ -1,819 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017, Eike Frost -# Copyright (c) 2021, Christophe Gilles -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_realm - -short_description: Allows administration of Keycloak realm via Keycloak API - -version_added: 3.0.0 - - -description: - - This module allows the administration of Keycloak realm via the Keycloak REST API. It - requires access to the REST API via OpenID Connect; the user connecting and the realm being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate realm definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - Aliases are provided so camelCased versions can be used as well. - - - The Keycloak API does not always sanity check inputs e.g. you can set - SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. - If you do not specify a setting, usually a sensible default is chosen. - -options: - state: - description: - - State of the realm. - - On C(present), the realm will be created (or updated if it exists already). - - On C(absent), the realm will be removed if it exists. - choices: ['present', 'absent'] - default: 'present' - type: str - - id: - description: - - The realm to create. - type: str - realm: - description: - - The realm name. - type: str - access_code_lifespan: - description: - - The realm access code lifespan. - aliases: - - accessCodeLifespan - type: int - access_code_lifespan_login: - description: - - The realm access code lifespan login. - aliases: - - accessCodeLifespanLogin - type: int - access_code_lifespan_user_action: - description: - - The realm access code lifespan user action. - aliases: - - accessCodeLifespanUserAction - type: int - access_token_lifespan: - description: - - The realm access token lifespan. - aliases: - - accessTokenLifespan - type: int - access_token_lifespan_for_implicit_flow: - description: - - The realm access token lifespan for implicit flow. - aliases: - - accessTokenLifespanForImplicitFlow - type: int - account_theme: - description: - - The realm account theme. - aliases: - - accountTheme - type: str - action_token_generated_by_admin_lifespan: - description: - - The realm action token generated by admin lifespan. - aliases: - - actionTokenGeneratedByAdminLifespan - type: int - action_token_generated_by_user_lifespan: - description: - - The realm action token generated by user lifespan. - aliases: - - actionTokenGeneratedByUserLifespan - type: int - admin_events_details_enabled: - description: - - The realm admin events details enabled. - aliases: - - adminEventsDetailsEnabled - type: bool - admin_events_enabled: - description: - - The realm admin events enabled. - aliases: - - adminEventsEnabled - type: bool - admin_theme: - description: - - The realm admin theme. - aliases: - - adminTheme - type: str - attributes: - description: - - The realm attributes. - type: dict - browser_flow: - description: - - The realm browser flow. - aliases: - - browserFlow - type: str - browser_security_headers: - description: - - The realm browser security headers. - aliases: - - browserSecurityHeaders - type: dict - brute_force_protected: - description: - - The realm brute force protected. - aliases: - - bruteForceProtected - type: bool - client_authentication_flow: - description: - - The realm client authentication flow. - aliases: - - clientAuthenticationFlow - type: str - client_scope_mappings: - description: - - The realm client scope mappings. - aliases: - - clientScopeMappings - type: dict - default_default_client_scopes: - description: - - The realm default default client scopes. - aliases: - - defaultDefaultClientScopes - type: list - elements: str - default_groups: - description: - - The realm default groups. - aliases: - - defaultGroups - type: list - elements: str - default_locale: - description: - - The realm default locale. - aliases: - - defaultLocale - type: str - default_optional_client_scopes: - description: - - The realm default optional client scopes. - aliases: - - defaultOptionalClientScopes - type: list - elements: str - default_roles: - description: - - The realm default roles. - aliases: - - defaultRoles - type: list - elements: str - default_signature_algorithm: - description: - - The realm default signature algorithm. - aliases: - - defaultSignatureAlgorithm - type: str - direct_grant_flow: - description: - - The realm direct grant flow. - aliases: - - directGrantFlow - type: str - display_name: - description: - - The realm display name. - aliases: - - displayName - type: str - display_name_html: - description: - - The realm display name HTML. - aliases: - - displayNameHtml - type: str - docker_authentication_flow: - description: - - The realm docker authentication flow. - aliases: - - dockerAuthenticationFlow - type: str - duplicate_emails_allowed: - description: - - The realm duplicate emails allowed option. - aliases: - - duplicateEmailsAllowed - type: bool - edit_username_allowed: - description: - - The realm edit username allowed option. - aliases: - - editUsernameAllowed - type: bool - email_theme: - description: - - The realm email theme. - aliases: - - emailTheme - type: str - enabled: - description: - - The realm enabled option. - type: bool - enabled_event_types: - description: - - The realm enabled event types. - aliases: - - enabledEventTypes - type: list - elements: str - events_enabled: - description: - - Enables or disables login events for this realm. - aliases: - - eventsEnabled - type: bool - version_added: 3.6.0 - events_expiration: - description: - - The realm events expiration. - aliases: - - eventsExpiration - type: int - events_listeners: - description: - - The realm events listeners. - aliases: - - eventsListeners - type: list - elements: str - failure_factor: - description: - - The realm failure factor. - aliases: - - failureFactor - type: int - internationalization_enabled: - description: - - The realm internationalization enabled option. - aliases: - - internationalizationEnabled - type: bool - login_theme: - description: - - The realm login theme. - aliases: - - loginTheme - type: str - login_with_email_allowed: - description: - - The realm login with email allowed option. - aliases: - - loginWithEmailAllowed - type: bool - max_delta_time_seconds: - description: - - The realm max delta time in seconds. - aliases: - - maxDeltaTimeSeconds - type: int - max_failure_wait_seconds: - description: - - The realm max failure wait in seconds. - aliases: - - maxFailureWaitSeconds - type: int - minimum_quick_login_wait_seconds: - description: - - The realm minimum quick login wait in seconds. - aliases: - - minimumQuickLoginWaitSeconds - type: int - not_before: - description: - - The realm not before. - aliases: - - notBefore - type: int - offline_session_idle_timeout: - description: - - The realm offline session idle timeout. - aliases: - - offlineSessionIdleTimeout - type: int - offline_session_max_lifespan: - description: - - The realm offline session max lifespan. - aliases: - - offlineSessionMaxLifespan - type: int - offline_session_max_lifespan_enabled: - description: - - The realm offline session max lifespan enabled option. - aliases: - - offlineSessionMaxLifespanEnabled - type: bool - otp_policy_algorithm: - description: - - The realm otp policy algorithm. - aliases: - - otpPolicyAlgorithm - type: str - otp_policy_digits: - description: - - The realm otp policy digits. - aliases: - - otpPolicyDigits - type: int - otp_policy_initial_counter: - description: - - The realm otp policy initial counter. - aliases: - - otpPolicyInitialCounter - type: int - otp_policy_look_ahead_window: - description: - - The realm otp policy look ahead window. - aliases: - - otpPolicyLookAheadWindow - type: int - otp_policy_period: - description: - - The realm otp policy period. - aliases: - - otpPolicyPeriod - type: int - otp_policy_type: - description: - - The realm otp policy type. - aliases: - - otpPolicyType - type: str - otp_supported_applications: - description: - - The realm otp supported applications. - aliases: - - otpSupportedApplications - type: list - elements: str - password_policy: - description: - - The realm password policy. - aliases: - - passwordPolicy - type: str - permanent_lockout: - description: - - The realm permanent lockout. - aliases: - - permanentLockout - type: bool - quick_login_check_milli_seconds: - description: - - The realm quick login check in milliseconds. - aliases: - - quickLoginCheckMilliSeconds - type: int - refresh_token_max_reuse: - description: - - The realm refresh token max reuse. - aliases: - - refreshTokenMaxReuse - type: int - registration_allowed: - description: - - The realm registration allowed option. - aliases: - - registrationAllowed - type: bool - registration_email_as_username: - description: - - The realm registration email as username option. - aliases: - - registrationEmailAsUsername - type: bool - registration_flow: - description: - - The realm registration flow. - aliases: - - registrationFlow - type: str - remember_me: - description: - - The realm remember me option. - aliases: - - rememberMe - type: bool - reset_credentials_flow: - description: - - The realm reset credentials flow. - aliases: - - resetCredentialsFlow - type: str - reset_password_allowed: - description: - - The realm reset password allowed option. - aliases: - - resetPasswordAllowed - type: bool - revoke_refresh_token: - description: - - The realm revoke refresh token option. - aliases: - - revokeRefreshToken - type: bool - smtp_server: - description: - - The realm smtp server. - aliases: - - smtpServer - type: dict - ssl_required: - description: - - The realm ssl required option. - choices: ['all', 'external', 'none'] - aliases: - - sslRequired - type: str - sso_session_idle_timeout: - description: - - The realm sso session idle timeout. - aliases: - - ssoSessionIdleTimeout - type: int - sso_session_idle_timeout_remember_me: - description: - - The realm sso session idle timeout remember me. - aliases: - - ssoSessionIdleTimeoutRememberMe - type: int - sso_session_max_lifespan: - description: - - The realm sso session max lifespan. - aliases: - - ssoSessionMaxLifespan - type: int - sso_session_max_lifespan_remember_me: - description: - - The realm sso session max lifespan remember me. - aliases: - - ssoSessionMaxLifespanRememberMe - type: int - supported_locales: - description: - - The realm supported locales. - aliases: - - supportedLocales - type: list - elements: str - user_managed_access_allowed: - description: - - The realm user managed access allowed option. - aliases: - - userManagedAccessAllowed - type: bool - verify_email: - description: - - The realm verify email option. - aliases: - - verifyEmail - type: bool - wait_increment_seconds: - description: - - The realm wait increment in seconds. - aliases: - - waitIncrementSeconds - type: int - -extends_documentation_fragment: -- community.general.keycloak - - -author: - - Christophe Gilles (@kris2kris) -''' - -EXAMPLES = ''' -- name: Create or update Keycloak realm (minimal example) - community.general.keycloak_realm: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - id: realm - state: present - -- name: Delete a Keycloak realm - community.general.keycloak_realm: - auth_client_id: admin-cli - auth_keycloak_url: https://auth.example.com/auth - auth_realm: master - auth_username: USERNAME - auth_password: PASSWORD - id: test - state: absent - -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Realm testrealm has been updated" - -proposed: - description: Representation of proposed realm. - returned: always - type: dict - sample: { - id: "test" - } - -existing: - description: Representation of existing realm (sample is truncated). - returned: always - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } - -end_state: - description: Representation of realm after module execution (sample is truncated). - returned: on success - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule - - -def sanitize_cr(realmrep): - """ Removes probably sensitive details from a realm representation. - - :param realmrep: the realmrep dict to be sanitized - :return: sanitized realmrep dict - """ - result = realmrep.copy() - if 'secret' in result: - result['secret'] = '********' - if 'attributes' in result: - if 'saml.signing.private.key' in result['attributes']: - result['attributes'] = result['attributes'].copy() - result['attributes']['saml.signing.private.key'] = '********' - return result - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - meta_args = dict( - state=dict(default='present', choices=['present', 'absent']), - - id=dict(type='str'), - realm=dict(type='str'), - access_code_lifespan=dict(type='int', aliases=['accessCodeLifespan']), - access_code_lifespan_login=dict(type='int', aliases=['accessCodeLifespanLogin']), - access_code_lifespan_user_action=dict(type='int', aliases=['accessCodeLifespanUserAction']), - access_token_lifespan=dict(type='int', aliases=['accessTokenLifespan'], no_log=False), - access_token_lifespan_for_implicit_flow=dict(type='int', aliases=['accessTokenLifespanForImplicitFlow'], no_log=False), - account_theme=dict(type='str', aliases=['accountTheme']), - action_token_generated_by_admin_lifespan=dict(type='int', aliases=['actionTokenGeneratedByAdminLifespan'], no_log=False), - action_token_generated_by_user_lifespan=dict(type='int', aliases=['actionTokenGeneratedByUserLifespan'], no_log=False), - admin_events_details_enabled=dict(type='bool', aliases=['adminEventsDetailsEnabled']), - admin_events_enabled=dict(type='bool', aliases=['adminEventsEnabled']), - admin_theme=dict(type='str', aliases=['adminTheme']), - attributes=dict(type='dict'), - browser_flow=dict(type='str', aliases=['browserFlow']), - browser_security_headers=dict(type='dict', aliases=['browserSecurityHeaders']), - brute_force_protected=dict(type='bool', aliases=['bruteForceProtected']), - client_authentication_flow=dict(type='str', aliases=['clientAuthenticationFlow']), - client_scope_mappings=dict(type='dict', aliases=['clientScopeMappings']), - default_default_client_scopes=dict(type='list', elements='str', aliases=['defaultDefaultClientScopes']), - default_groups=dict(type='list', elements='str', aliases=['defaultGroups']), - default_locale=dict(type='str', aliases=['defaultLocale']), - default_optional_client_scopes=dict(type='list', elements='str', aliases=['defaultOptionalClientScopes']), - default_roles=dict(type='list', elements='str', aliases=['defaultRoles']), - default_signature_algorithm=dict(type='str', aliases=['defaultSignatureAlgorithm']), - direct_grant_flow=dict(type='str', aliases=['directGrantFlow']), - display_name=dict(type='str', aliases=['displayName']), - display_name_html=dict(type='str', aliases=['displayNameHtml']), - docker_authentication_flow=dict(type='str', aliases=['dockerAuthenticationFlow']), - duplicate_emails_allowed=dict(type='bool', aliases=['duplicateEmailsAllowed']), - edit_username_allowed=dict(type='bool', aliases=['editUsernameAllowed']), - email_theme=dict(type='str', aliases=['emailTheme']), - enabled=dict(type='bool'), - enabled_event_types=dict(type='list', elements='str', aliases=['enabledEventTypes']), - events_enabled=dict(type='bool', aliases=['eventsEnabled']), - events_expiration=dict(type='int', aliases=['eventsExpiration']), - events_listeners=dict(type='list', elements='str', aliases=['eventsListeners']), - failure_factor=dict(type='int', aliases=['failureFactor']), - internationalization_enabled=dict(type='bool', aliases=['internationalizationEnabled']), - login_theme=dict(type='str', aliases=['loginTheme']), - login_with_email_allowed=dict(type='bool', aliases=['loginWithEmailAllowed']), - max_delta_time_seconds=dict(type='int', aliases=['maxDeltaTimeSeconds']), - max_failure_wait_seconds=dict(type='int', aliases=['maxFailureWaitSeconds']), - minimum_quick_login_wait_seconds=dict(type='int', aliases=['minimumQuickLoginWaitSeconds']), - not_before=dict(type='int', aliases=['notBefore']), - offline_session_idle_timeout=dict(type='int', aliases=['offlineSessionIdleTimeout']), - offline_session_max_lifespan=dict(type='int', aliases=['offlineSessionMaxLifespan']), - offline_session_max_lifespan_enabled=dict(type='bool', aliases=['offlineSessionMaxLifespanEnabled']), - otp_policy_algorithm=dict(type='str', aliases=['otpPolicyAlgorithm']), - otp_policy_digits=dict(type='int', aliases=['otpPolicyDigits']), - otp_policy_initial_counter=dict(type='int', aliases=['otpPolicyInitialCounter']), - otp_policy_look_ahead_window=dict(type='int', aliases=['otpPolicyLookAheadWindow']), - otp_policy_period=dict(type='int', aliases=['otpPolicyPeriod']), - otp_policy_type=dict(type='str', aliases=['otpPolicyType']), - otp_supported_applications=dict(type='list', elements='str', aliases=['otpSupportedApplications']), - password_policy=dict(type='str', aliases=['passwordPolicy'], no_log=False), - permanent_lockout=dict(type='bool', aliases=['permanentLockout']), - quick_login_check_milli_seconds=dict(type='int', aliases=['quickLoginCheckMilliSeconds']), - refresh_token_max_reuse=dict(type='int', aliases=['refreshTokenMaxReuse'], no_log=False), - registration_allowed=dict(type='bool', aliases=['registrationAllowed']), - registration_email_as_username=dict(type='bool', aliases=['registrationEmailAsUsername']), - registration_flow=dict(type='str', aliases=['registrationFlow']), - remember_me=dict(type='bool', aliases=['rememberMe']), - reset_credentials_flow=dict(type='str', aliases=['resetCredentialsFlow']), - reset_password_allowed=dict(type='bool', aliases=['resetPasswordAllowed'], no_log=False), - revoke_refresh_token=dict(type='bool', aliases=['revokeRefreshToken']), - smtp_server=dict(type='dict', aliases=['smtpServer']), - ssl_required=dict(choices=["external", "all", "none"], aliases=['sslRequired']), - sso_session_idle_timeout=dict(type='int', aliases=['ssoSessionIdleTimeout']), - sso_session_idle_timeout_remember_me=dict(type='int', aliases=['ssoSessionIdleTimeoutRememberMe']), - sso_session_max_lifespan=dict(type='int', aliases=['ssoSessionMaxLifespan']), - sso_session_max_lifespan_remember_me=dict(type='int', aliases=['ssoSessionMaxLifespanRememberMe']), - supported_locales=dict(type='list', elements='str', aliases=['supportedLocales']), - user_managed_access_allowed=dict(type='bool', aliases=['userManagedAccessAllowed']), - verify_email=dict(type='bool', aliases=['verifyEmail']), - wait_increment_seconds=dict(type='int', aliases=['waitIncrementSeconds']), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'realm', 'enabled'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - - # convert module parameters to realm representation parameters (if they belong in there) - params_to_ignore = list(keycloak_argument_spec().keys()) + ['state'] - - # Filter and map the parameters names that apply to the role - realm_params = [x for x in module.params - if x not in params_to_ignore and - module.params.get(x) is not None] - - # See whether the realm already exists in Keycloak - before_realm = kc.get_realm_by_id(realm=realm) - - if before_realm is None: - before_realm = {} - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for realm_param in realm_params: - new_param_value = module.params.get(realm_param) - changeset[camel(realm_param)] = new_param_value - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_realm = before_realm.copy() - desired_realm.update(changeset) - - result['proposed'] = sanitize_cr(changeset) - before_realm_sanitized = sanitize_cr(before_realm) - result['existing'] = before_realm_sanitized - - # Cater for when it doesn't exist (an empty dict) - if not before_realm: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'Realm does not exist, doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if 'id' not in desired_realm: - module.fail_json(msg='id needs to be specified when creating a new realm') - - if module._diff: - result['diff'] = dict(before='', after=sanitize_cr(desired_realm)) - - if module.check_mode: - module.exit_json(**result) - - # create it - kc.create_realm(desired_realm) - after_realm = kc.get_realm_by_id(desired_realm['id']) - - result['end_state'] = sanitize_cr(after_realm) - - result['msg'] = 'Realm %s has been created.' % desired_realm['id'] - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - - # doing an update - result['changed'] = True - if module.check_mode: - # We can only compare the current realm with the proposed updates we have - if module._diff: - result['diff'] = dict(before=before_realm_sanitized, - after=sanitize_cr(desired_realm)) - result['changed'] = (before_realm != desired_realm) - - module.exit_json(**result) - - # do the update - kc.update_realm(desired_realm, realm=realm) - - after_realm = kc.get_realm_by_id(realm=realm) - - if before_realm == after_realm: - result['changed'] = False - - result['end_state'] = sanitize_cr(after_realm) - - if module._diff: - result['diff'] = dict(before=before_realm_sanitized, - after=sanitize_cr(after_realm)) - - result['msg'] = 'Realm %s has been updated.' % desired_realm['id'] - module.exit_json(**result) - - else: - # Process a deletion (because state was not 'present') - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=before_realm_sanitized, after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_realm(realm=realm) - - result['proposed'] = {} - result['end_state'] = {} - - result['msg'] = 'Realm %s has been deleted.' % before_realm['id'] - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_realm_info.py b/plugins/modules/identity/keycloak/keycloak_realm_info.py deleted file mode 100644 index a84c9dc767..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_realm_info.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_realm_info - -short_description: Allows obtaining Keycloak realm public information via Keycloak API - -version_added: 4.3.0 - -description: - - This module allows you to get Keycloak realm public information via the Keycloak REST API. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will - be returned that way by this module. You may pass single values for attributes when calling the module, - and this will be translated into a list suitable for the API. - -options: - auth_keycloak_url: - description: - - URL to the Keycloak instance. - type: str - required: true - aliases: - - url - validate_certs: - description: - - Verify TLS certificates (do not disable this in production). - type: bool - default: yes - - realm: - type: str - description: - - They Keycloak realm ID. - default: 'master' - -author: - - Fynn Chen (@fynncfchen) -''' - -EXAMPLES = ''' -- name: Get a Keycloak public key - community.general.keycloak_realm_info: - realm: MyCustomRealm - auth_keycloak_url: https://auth.example.com/auth - delegate_to: localhost -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - -realm_info: - description: - - Representation of the realm public infomation. - returned: always - type: dict - contains: - realm: - description: Realm ID. - type: str - returned: always - sample: MyRealm - public_key: - description: Public key of the realm. - type: str - returned: always - sample: MIIBIjANBgkqhkiG9w0BAQEFAAO... - token-service: - description: Token endpoint URL. - type: str - returned: always - sample: https://auth.example.com/auth/realms/MyRealm/protocol/openid-connect - account-service: - description: Account console URL. - type: str - returned: always - sample: https://auth.example.com/auth/realms/MyRealm/account - tokens-not-before: - description: The token not before. - type: int - returned: always - sample: 0 -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI -from ansible.module_utils.basic import AnsibleModule - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = dict( - auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False), - validate_certs=dict(type='bool', default=True), - - realm=dict(default='master'), - ) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - - result = dict(changed=False, msg='', realm_info='') - - kc = KeycloakAPI(module, {}) - - realm = module.params.get('realm') - - realm_info = kc.get_realm_info_by_id(realm=realm) - - result['realm_info'] = realm_info - result['msg'] = 'Get realm public info successful for ID {realm}'.format(realm=realm) - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/identity/keycloak/keycloak_user_federation.py b/plugins/modules/identity/keycloak/keycloak_user_federation.py deleted file mode 100644 index 4d623a4874..0000000000 --- a/plugins/modules/identity/keycloak/keycloak_user_federation.py +++ /dev/null @@ -1,1009 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: keycloak_user_federation - -short_description: Allows administration of Keycloak user federations via Keycloak API - -version_added: 3.7.0 - -description: - - This module allows you to add, remove or modify Keycloak user federations via the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html). - - -options: - state: - description: - - State of the user federation. - - On C(present), the user federation will be created if it does not yet exist, or updated with - the parameters you provide. - - On C(absent), the user federation will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - realm: - description: - - The Keycloak realm under which this user federation resides. - default: 'master' - type: str - - id: - description: - - The unique ID for this user federation. If left empty, the user federation will be searched - by its I(name). - type: str - - name: - description: - - Display name of provider when linked in admin console. - type: str - - provider_id: - description: - - Provider for this user federation. - aliases: - - providerId - type: str - choices: - - ldap - - kerberos - - sssd - - provider_type: - description: - - Component type for user federation (only supported value is C(org.keycloak.storage.UserStorageProvider)). - aliases: - - providerType - default: org.keycloak.storage.UserStorageProvider - type: str - - parent_id: - description: - - Unique ID for the parent of this user federation. Realm ID will be automatically used if left blank. - aliases: - - parentId - type: str - - config: - description: - - Dict specifying the configuration options for the provider; the contents differ depending on - the value of I(provider_id). Examples are given below for C(ldap), C(kerberos) and C(sssd). - It is easiest to obtain valid config values by dumping an already-existing user federation - configuration through check-mode in the I(existing) field. - - The value C(sssd) has been supported since community.general 4.2.0. - type: dict - suboptions: - enabled: - description: - - Enable/disable this user federation. - default: true - type: bool - - priority: - description: - - Priority of provider when doing a user lookup. Lowest first. - default: 0 - type: int - - importEnabled: - description: - - If C(true), LDAP users will be imported into Keycloak DB and synced by the configured - sync policies. - default: true - type: bool - - editMode: - description: - - C(READ_ONLY) is a read-only LDAP store. C(WRITABLE) means data will be synced back to LDAP - on demand. C(UNSYNCED) means user data will be imported, but not synced back to LDAP. - type: str - choices: - - READ_ONLY - - WRITABLE - - UNSYNCED - - syncRegistrations: - description: - - Should newly created users be created within LDAP store? Priority effects which - provider is chosen to sync the new user. - default: false - type: bool - - vendor: - description: - - LDAP vendor (provider). - type: str - - usernameLDAPAttribute: - description: - - Name of LDAP attribute, which is mapped as Keycloak username. For many LDAP server - vendors it can be C(uid). For Active directory it can be C(sAMAccountName) or C(cn). - The attribute should be filled for all LDAP user records you want to import from - LDAP to Keycloak. - type: str - - rdnLDAPAttribute: - description: - - Name of LDAP attribute, which is used as RDN (top attribute) of typical user DN. - Usually it's the same as Username LDAP attribute, however it is not required. For - example for Active directory, it is common to use C(cn) as RDN attribute when - username attribute might be C(sAMAccountName). - type: str - - uuidLDAPAttribute: - description: - - Name of LDAP attribute, which is used as unique object identifier (UUID) for objects - in LDAP. For many LDAP server vendors, it is C(entryUUID); however some are different. - For example for Active directory it should be C(objectGUID). If your LDAP server does - not support the notion of UUID, you can use any other attribute that is supposed to - be unique among LDAP users in tree. - type: str - - userObjectClasses: - description: - - All values of LDAP objectClass attribute for users in LDAP divided by comma. - For example C(inetOrgPerson, organizationalPerson). Newly created Keycloak users - will be written to LDAP with all those object classes and existing LDAP user records - are found just if they contain all those object classes. - type: str - - connectionUrl: - description: - - Connection URL to your LDAP server. - type: str - - usersDn: - description: - - Full DN of LDAP tree where your users are. This DN is the parent of LDAP users. - type: str - - customUserSearchFilter: - description: - - Additional LDAP Filter for filtering searched users. Leave this empty if you don't - need additional filter. - type: str - - searchScope: - description: - - For one level, the search applies only for users in the DNs specified by User DNs. - For subtree, the search applies to the whole subtree. See LDAP documentation for - more details. - default: '1' - type: str - choices: - - '1' - - '2' - - authType: - description: - - Type of the Authentication method used during LDAP Bind operation. It is used in - most of the requests sent to the LDAP server. - default: 'none' - type: str - choices: - - none - - simple - - bindDn: - description: - - DN of LDAP user which will be used by Keycloak to access LDAP server. - type: str - - bindCredential: - description: - - Password of LDAP admin. - type: str - - startTls: - description: - - Encrypts the connection to LDAP using STARTTLS, which will disable connection pooling. - default: false - type: bool - - usePasswordModifyExtendedOp: - description: - - Use the LDAPv3 Password Modify Extended Operation (RFC-3062). The password modify - extended operation usually requires that LDAP user already has password in the LDAP - server. So when this is used with 'Sync Registrations', it can be good to add also - 'Hardcoded LDAP attribute mapper' with randomly generated initial password. - default: false - type: bool - - validatePasswordPolicy: - description: - - Determines if Keycloak should validate the password with the realm password policy - before updating it. - default: false - type: bool - - trustEmail: - description: - - If enabled, email provided by this provider is not verified even if verification is - enabled for the realm. - default: false - type: bool - - useTruststoreSpi: - description: - - Specifies whether LDAP connection will use the truststore SPI with the truststore - configured in standalone.xml/domain.xml. C(Always) means that it will always use it. - C(Never) means that it will not use it. C(Only for ldaps) means that it will use if - your connection URL use ldaps. Note even if standalone.xml/domain.xml is not - configured, the default Java cacerts or certificate specified by - C(javax.net.ssl.trustStore) property will be used. - default: ldapsOnly - type: str - choices: - - always - - ldapsOnly - - never - - connectionTimeout: - description: - - LDAP Connection Timeout in milliseconds. - type: int - - readTimeout: - description: - - LDAP Read Timeout in milliseconds. This timeout applies for LDAP read operations. - type: int - - pagination: - description: - - Does the LDAP server support pagination. - default: true - type: bool - - connectionPooling: - description: - - Determines if Keycloak should use connection pooling for accessing LDAP server. - default: true - type: bool - - connectionPoolingAuthentication: - description: - - A list of space-separated authentication types of connections that may be pooled. - type: str - choices: - - none - - simple - - DIGEST-MD5 - - connectionPoolingDebug: - description: - - A string that indicates the level of debug output to produce. Example valid values are - C(fine) (trace connection creation and removal) and C(all) (all debugging information). - type: str - - connectionPoolingInitSize: - description: - - The number of connections per connection identity to create when initially creating a - connection for the identity. - type: int - - connectionPoolingMaxSize: - description: - - The maximum number of connections per connection identity that can be maintained - concurrently. - type: int - - connectionPoolingPrefSize: - description: - - The preferred number of connections per connection identity that should be maintained - concurrently. - type: int - - connectionPoolingProtocol: - description: - - A list of space-separated protocol types of connections that may be pooled. - Valid types are C(plain) and C(ssl). - type: str - - connectionPoolingTimeout: - description: - - The number of milliseconds that an idle connection may remain in the pool without - being closed and removed from the pool. - type: int - - allowKerberosAuthentication: - description: - - Enable/disable HTTP authentication of users with SPNEGO/Kerberos tokens. The data - about authenticated users will be provisioned from this LDAP server. - default: false - type: bool - - kerberosRealm: - description: - - Name of kerberos realm. - type: str - - serverPrincipal: - description: - - Full name of server principal for HTTP service including server and domain name. For - example C(HTTP/host.foo.org@FOO.ORG). Use C(*) to accept any service principal in the - KeyTab file. - type: str - - keyTab: - description: - - Location of Kerberos KeyTab file containing the credentials of server principal. For - example C(/etc/krb5.keytab). - type: str - - debug: - description: - - Enable/disable debug logging to standard output for Krb5LoginModule. - type: bool - - useKerberosForPasswordAuthentication: - description: - - Use Kerberos login module for authenticate username/password against Kerberos server - instead of authenticating against LDAP server with Directory Service API. - default: false - type: bool - - allowPasswordAuthentication: - description: - - Enable/disable possibility of username/password authentication against Kerberos database. - type: bool - - batchSizeForSync: - description: - - Count of LDAP users to be imported from LDAP to Keycloak within a single transaction. - default: 1000 - type: int - - fullSyncPeriod: - description: - - Period for full synchronization in seconds. - default: -1 - type: int - - changedSyncPeriod: - description: - - Period for synchronization of changed or newly created LDAP users in seconds. - default: -1 - type: int - - updateProfileFirstLogin: - description: - - Update profile on first login. - type: bool - - cachePolicy: - description: - - Cache Policy for this storage provider. - type: str - default: 'DEFAULT' - choices: - - DEFAULT - - EVICT_DAILY - - EVICT_WEEKLY - - MAX_LIFESPAN - - NO_CACHE - - evictionDay: - description: - - Day of the week the entry will become invalid on. - type: str - - evictionHour: - description: - - Hour of day the entry will become invalid on. - type: str - - evictionMinute: - description: - - Minute of day the entry will become invalid on. - type: str - - maxLifespan: - description: - - Max lifespan of cache entry in milliseconds. - type: int - - mappers: - description: - - A list of dicts defining mappers associated with this Identity Provider. - type: list - elements: dict - suboptions: - id: - description: - - Unique ID of this mapper. - type: str - - name: - description: - - Name of the mapper. If no ID is given, the mapper will be searched by name. - type: str - - parentId: - description: - - Unique ID for the parent of this mapper. ID of the user federation will automatically - be used if left blank. - type: str - - providerId: - description: - - The mapper type for this mapper (for instance C(user-attribute-ldap-mapper)). - type: str - - providerType: - description: - - Component type for this mapper (only supported value is C(org.keycloak.storage.ldap.mappers.LDAPStorageMapper)). - type: str - - config: - description: - - Dict specifying the configuration options for the mapper; the contents differ - depending on the value of I(identityProviderMapper). - type: dict - -extends_documentation_fragment: -- community.general.keycloak - -author: - - Laurent Paumier (@laurpaum) -''' - -EXAMPLES = ''' - - name: Create LDAP user federation - community.general.keycloak_user_federation: - auth_keycloak_url: https://keycloak.example.com/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: my-realm - name: my-ldap - state: present - provider_id: ldap - provider_type: org.keycloak.storage.UserStorageProvider - config: - priority: 0 - enabled: true - cachePolicy: DEFAULT - batchSizeForSync: 1000 - editMode: READ_ONLY - importEnabled: true - syncRegistrations: false - vendor: other - usernameLDAPAttribute: uid - rdnLDAPAttribute: uid - uuidLDAPAttribute: entryUUID - userObjectClasses: inetOrgPerson, organizationalPerson - connectionUrl: ldaps://ldap.example.com:636 - usersDn: ou=Users,dc=example,dc=com - authType: simple - bindDn: cn=directory reader - bindCredential: password - searchScope: 1 - validatePasswordPolicy: false - trustEmail: false - useTruststoreSpi: ldapsOnly - connectionPooling: true - pagination: true - allowKerberosAuthentication: false - debug: false - useKerberosForPasswordAuthentication: false - mappers: - - name: "full name" - providerId: "full-name-ldap-mapper" - providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" - config: - ldap.full.name.attribute: cn - read.only: true - write.only: false - - - name: Create Kerberos user federation - community.general.keycloak_user_federation: - auth_keycloak_url: https://keycloak.example.com/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: my-realm - name: my-kerberos - state: present - provider_id: kerberos - provider_type: org.keycloak.storage.UserStorageProvider - config: - priority: 0 - enabled: true - cachePolicy: DEFAULT - kerberosRealm: EXAMPLE.COM - serverPrincipal: HTTP/host.example.com@EXAMPLE.COM - keyTab: keytab - allowPasswordAuthentication: false - updateProfileFirstLogin: false - - - name: Create sssd user federation - community.general.keycloak_user_federation: - auth_keycloak_url: https://keycloak.example.com/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: my-realm - name: my-sssd - state: present - provider_id: sssd - provider_type: org.keycloak.storage.UserStorageProvider - config: - priority: 0 - enabled: true - cachePolicy: DEFAULT - - - name: Delete user federation - community.general.keycloak_user_federation: - auth_keycloak_url: https://keycloak.example.com/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: my-realm - name: my-federation - state: absent - -''' - -RETURN = ''' -msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "No changes required to user federation 164bb483-c613-482e-80fe-7f1431308799." - -proposed: - description: Representation of proposed user federation. - returned: always - type: dict - sample: { - "config": { - "allowKerberosAuthentication": "false", - "authType": "simple", - "batchSizeForSync": "1000", - "bindCredential": "**********", - "bindDn": "cn=directory reader", - "cachePolicy": "DEFAULT", - "connectionPooling": "true", - "connectionUrl": "ldaps://ldap.example.com:636", - "debug": "false", - "editMode": "READ_ONLY", - "enabled": "true", - "importEnabled": "true", - "pagination": "true", - "priority": "0", - "rdnLDAPAttribute": "uid", - "searchScope": "1", - "syncRegistrations": "false", - "trustEmail": "false", - "useKerberosForPasswordAuthentication": "false", - "useTruststoreSpi": "ldapsOnly", - "userObjectClasses": "inetOrgPerson, organizationalPerson", - "usernameLDAPAttribute": "uid", - "usersDn": "ou=Users,dc=example,dc=com", - "uuidLDAPAttribute": "entryUUID", - "validatePasswordPolicy": "false", - "vendor": "other" - }, - "name": "ldap", - "providerId": "ldap", - "providerType": "org.keycloak.storage.UserStorageProvider" - } - -existing: - description: Representation of existing user federation. - returned: always - type: dict - sample: { - "config": { - "allowKerberosAuthentication": "false", - "authType": "simple", - "batchSizeForSync": "1000", - "bindCredential": "**********", - "bindDn": "cn=directory reader", - "cachePolicy": "DEFAULT", - "changedSyncPeriod": "-1", - "connectionPooling": "true", - "connectionUrl": "ldaps://ldap.example.com:636", - "debug": "false", - "editMode": "READ_ONLY", - "enabled": "true", - "fullSyncPeriod": "-1", - "importEnabled": "true", - "pagination": "true", - "priority": "0", - "rdnLDAPAttribute": "uid", - "searchScope": "1", - "syncRegistrations": "false", - "trustEmail": "false", - "useKerberosForPasswordAuthentication": "false", - "useTruststoreSpi": "ldapsOnly", - "userObjectClasses": "inetOrgPerson, organizationalPerson", - "usernameLDAPAttribute": "uid", - "usersDn": "ou=Users,dc=example,dc=com", - "uuidLDAPAttribute": "entryUUID", - "validatePasswordPolicy": "false", - "vendor": "other" - }, - "id": "01122837-9047-4ae4-8ca0-6e2e891a765f", - "mappers": [ - { - "config": { - "always.read.value.from.ldap": "false", - "is.mandatory.in.ldap": "false", - "ldap.attribute": "mail", - "read.only": "true", - "user.model.attribute": "email" - }, - "id": "17d60ce2-2d44-4c2c-8b1f-1fba601b9a9f", - "name": "email", - "parentId": "01122837-9047-4ae4-8ca0-6e2e891a765f", - "providerId": "user-attribute-ldap-mapper", - "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" - } - ], - "name": "myfed", - "parentId": "myrealm", - "providerId": "ldap", - "providerType": "org.keycloak.storage.UserStorageProvider" - } - -end_state: - description: Representation of user federation after module execution. - returned: on success - type: dict - sample: { - "config": { - "allowPasswordAuthentication": "false", - "cachePolicy": "DEFAULT", - "enabled": "true", - "kerberosRealm": "EXAMPLE.COM", - "keyTab": "/etc/krb5.keytab", - "priority": "0", - "serverPrincipal": "HTTP/host.example.com@EXAMPLE.COM", - "updateProfileFirstLogin": "false" - }, - "id": "cf52ae4f-4471-4435-a0cf-bb620cadc122", - "mappers": [], - "name": "kerberos", - "parentId": "myrealm", - "providerId": "kerberos", - "providerType": "org.keycloak.storage.UserStorageProvider" - } -''' - -from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from copy import deepcopy - - -def sanitize(comp): - compcopy = deepcopy(comp) - if 'config' in compcopy: - compcopy['config'] = dict((k, v[0]) for k, v in compcopy['config'].items()) - if 'bindCredential' in compcopy['config']: - compcopy['config']['bindCredential'] = '**********' - if 'mappers' in compcopy: - for mapper in compcopy['mappers']: - if 'config' in mapper: - mapper['config'] = dict((k, v[0]) for k, v in mapper['config'].items()) - return compcopy - - -def main(): - """ - Module execution - - :return: - """ - argument_spec = keycloak_argument_spec() - - config_spec = dict( - allowKerberosAuthentication=dict(type='bool', default=False), - allowPasswordAuthentication=dict(type='bool'), - authType=dict(type='str', choices=['none', 'simple'], default='none'), - batchSizeForSync=dict(type='int', default=1000), - bindCredential=dict(type='str', no_log=True), - bindDn=dict(type='str'), - cachePolicy=dict(type='str', choices=['DEFAULT', 'EVICT_DAILY', 'EVICT_WEEKLY', 'MAX_LIFESPAN', 'NO_CACHE'], default='DEFAULT'), - changedSyncPeriod=dict(type='int', default=-1), - connectionPooling=dict(type='bool', default=True), - connectionPoolingAuthentication=dict(type='str', choices=['none', 'simple', 'DIGEST-MD5']), - connectionPoolingDebug=dict(type='str'), - connectionPoolingInitSize=dict(type='int'), - connectionPoolingMaxSize=dict(type='int'), - connectionPoolingPrefSize=dict(type='int'), - connectionPoolingProtocol=dict(type='str'), - connectionPoolingTimeout=dict(type='int'), - connectionTimeout=dict(type='int'), - connectionUrl=dict(type='str'), - customUserSearchFilter=dict(type='str'), - debug=dict(type='bool'), - editMode=dict(type='str', choices=['READ_ONLY', 'WRITABLE', 'UNSYNCED']), - enabled=dict(type='bool', default=True), - evictionDay=dict(type='str'), - evictionHour=dict(type='str'), - evictionMinute=dict(type='str'), - fullSyncPeriod=dict(type='int', default=-1), - importEnabled=dict(type='bool', default=True), - kerberosRealm=dict(type='str'), - keyTab=dict(type='str', no_log=False), - maxLifespan=dict(type='int'), - pagination=dict(type='bool', default=True), - priority=dict(type='int', default=0), - rdnLDAPAttribute=dict(type='str'), - readTimeout=dict(type='int'), - searchScope=dict(type='str', choices=['1', '2'], default='1'), - serverPrincipal=dict(type='str'), - startTls=dict(type='bool', default=False), - syncRegistrations=dict(type='bool', default=False), - trustEmail=dict(type='bool', default=False), - updateProfileFirstLogin=dict(type='bool'), - useKerberosForPasswordAuthentication=dict(type='bool', default=False), - usePasswordModifyExtendedOp=dict(type='bool', default=False, no_log=False), - useTruststoreSpi=dict(type='str', choices=['always', 'ldapsOnly', 'never'], default='ldapsOnly'), - userObjectClasses=dict(type='str'), - usernameLDAPAttribute=dict(type='str'), - usersDn=dict(type='str'), - uuidLDAPAttribute=dict(type='str'), - validatePasswordPolicy=dict(type='bool', default=False), - vendor=dict(type='str'), - ) - - mapper_spec = dict( - id=dict(type='str'), - name=dict(type='str'), - parentId=dict(type='str'), - providerId=dict(type='str'), - providerType=dict(type='str'), - config=dict(type='dict'), - ) - - meta_args = dict( - config=dict(type='dict', options=config_spec), - state=dict(type='str', default='present', choices=['present', 'absent']), - realm=dict(type='str', default='master'), - id=dict(type='str'), - name=dict(type='str'), - provider_id=dict(type='str', aliases=['providerId'], choices=['ldap', 'kerberos', 'sssd']), - provider_type=dict(type='str', aliases=['providerType'], default='org.keycloak.storage.UserStorageProvider'), - parent_id=dict(type='str', aliases=['parentId']), - mappers=dict(type='list', elements='dict', options=mapper_spec), - ) - - argument_spec.update(meta_args) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) - - result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) - - # Obtain access token, initialize API - try: - connection_header = get_token(module.params) - except KeycloakError as e: - module.fail_json(msg=str(e)) - - kc = KeycloakAPI(module, connection_header) - - realm = module.params.get('realm') - state = module.params.get('state') - config = module.params.get('config') - mappers = module.params.get('mappers') - cid = module.params.get('id') - name = module.params.get('name') - - # Keycloak API expects config parameters to be arrays containing a single string element - if config is not None: - module.params['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v]) - for k, v in config.items() if config[k] is not None) - - if mappers is not None: - for mapper in mappers: - if mapper.get('config') is not None: - mapper['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v]) - for k, v in mapper['config'].items() if mapper['config'][k] is not None) - - # Filter and map the parameters names that apply - comp_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and - module.params.get(x) is not None] - - # See if it already exists in Keycloak - if cid is None: - found = kc.get_components(urlencode(dict(type='org.keycloak.storage.UserStorageProvider', parent=realm, name=name)), realm) - if len(found) > 1: - module.fail_json(msg='No ID given and found multiple user federations with name `{name}`. Cannot continue.'.format(name=name)) - before_comp = next(iter(found), None) - if before_comp is not None: - cid = before_comp['id'] - else: - before_comp = kc.get_component(cid, realm) - - if before_comp is None: - before_comp = {} - - # if user federation exists, get associated mappers - if cid is not None and before_comp: - before_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name')) - - # Build a proposed changeset from parameters given to this module - changeset = {} - - for param in comp_params: - new_param_value = module.params.get(param) - old_value = before_comp[camel(param)] if camel(param) in before_comp else None - if param == 'mappers': - new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] - if new_param_value != old_value: - changeset[camel(param)] = new_param_value - - # special handling of mappers list to allow change detection - if module.params.get('mappers') is not None: - if module.params['provider_id'] in ['kerberos', 'sssd']: - module.fail_json(msg='Cannot configure mappers for {type} provider.'.format(type=module.params['provider_id'])) - for change in module.params['mappers']: - change = dict((k, v) for k, v in change.items() if change[k] is not None) - if change.get('id') is None and change.get('name') is None: - module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') - if cid is None: - old_mapper = {} - elif change.get('id') is not None: - old_mapper = kc.get_component(change['id'], realm) - if old_mapper is None: - old_mapper = {} - else: - found = kc.get_components(urlencode(dict(parent=cid, name=change['name'])), realm) - if len(found) > 1: - module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=change['name'])) - if len(found) == 1: - old_mapper = found[0] - else: - old_mapper = {} - new_mapper = old_mapper.copy() - new_mapper.update(change) - if new_mapper != old_mapper: - if changeset.get('mappers') is None: - changeset['mappers'] = list() - changeset['mappers'].append(new_mapper) - - # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_comp = before_comp.copy() - desired_comp.update(changeset) - - result['proposed'] = sanitize(changeset) - result['existing'] = sanitize(before_comp) - - # Cater for when it doesn't exist (an empty dict) - if not before_comp: - if state == 'absent': - # Do nothing and exit - if module._diff: - result['diff'] = dict(before='', after='') - result['changed'] = False - result['end_state'] = {} - result['msg'] = 'User federation does not exist; doing nothing.' - module.exit_json(**result) - - # Process a creation - result['changed'] = True - - if module._diff: - result['diff'] = dict(before='', after=sanitize(desired_comp)) - - if module.check_mode: - module.exit_json(**result) - - # create it - desired_comp = desired_comp.copy() - updated_mappers = desired_comp.pop('mappers', []) - after_comp = kc.create_component(desired_comp, realm) - - for mapper in updated_mappers: - found = kc.get_components(urlencode(dict(parent=cid, name=mapper['name'])), realm) - if len(found) > 1: - module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=mapper['name'])) - if len(found) == 1: - old_mapper = found[0] - else: - old_mapper = {} - - new_mapper = old_mapper.copy() - new_mapper.update(mapper) - - if new_mapper.get('id') is not None: - kc.update_component(new_mapper, realm) - else: - if new_mapper.get('parentId') is None: - new_mapper['parentId'] = after_comp['id'] - mapper = kc.create_component(new_mapper, realm) - - after_comp['mappers'] = updated_mappers - result['end_state'] = sanitize(after_comp) - - result['msg'] = "User federation {id} has been created".format(id=after_comp['id']) - module.exit_json(**result) - - else: - if state == 'present': - # Process an update - - # no changes - if desired_comp == before_comp: - result['changed'] = False - result['end_state'] = sanitize(desired_comp) - result['msg'] = "No changes required to user federation {id}.".format(id=cid) - module.exit_json(**result) - - # doing an update - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize(before_comp), after=sanitize(desired_comp)) - - if module.check_mode: - module.exit_json(**result) - - # do the update - desired_comp = desired_comp.copy() - updated_mappers = desired_comp.pop('mappers', []) - kc.update_component(desired_comp, realm) - after_comp = kc.get_component(cid, realm) - - for mapper in updated_mappers: - if mapper.get('id') is not None: - kc.update_component(mapper, realm) - else: - if mapper.get('parentId') is None: - mapper['parentId'] = desired_comp['id'] - mapper = kc.create_component(mapper, realm) - - after_comp['mappers'] = updated_mappers - result['end_state'] = sanitize(after_comp) - - result['msg'] = "User federation {id} has been updated".format(id=cid) - module.exit_json(**result) - - elif state == 'absent': - # Process a deletion - result['changed'] = True - - if module._diff: - result['diff'] = dict(before=sanitize(before_comp), after='') - - if module.check_mode: - module.exit_json(**result) - - # delete it - kc.delete_component(cid, realm) - - result['end_state'] = {} - - result['msg'] = "User federation {id} has been deleted".format(id=cid) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_command.py b/plugins/modules/idrac_redfish_command.py similarity index 64% rename from plugins/modules/remote_management/redfish/idrac_redfish_command.py rename to plugins/modules/idrac_redfish_command.py index 82a0f4b09a..b60126764a 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_command.py +++ b/plugins/modules/idrac_redfish_command.py @@ -1,20 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2018 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: idrac_redfish_command short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - perform an action. + - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action. - For use with Dell iDRAC operations that require Redfish OEM extensions. +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +attributes: + check_mode: + support: none + diff_mode: + support: none options: category: required: true @@ -56,32 +61,44 @@ options: - ID of the System, Manager or Chassis to modify. type: str version_added: '0.2.0' + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: "Jose Delarosa (@jose-delarosa)" -''' +""" -EXAMPLES = ''' - - name: Create BIOS configuration job (schedule BIOS setting update) - community.general.idrac_redfish_command: - category: Systems - command: CreateBiosConfigJob - resource_id: System.Embedded.1 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" -''' +EXAMPLES = r""" +- name: Create BIOS configuration job (schedule BIOS setting update) + community.general.idrac_redfish_command: + category: Systems + command: CreateBiosConfigJob + resource_id: System.Embedded.1 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +""" -RETURN = ''' +RETURN = r""" msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +return_values: + description: Dictionary containing command-specific response data from the action. + returned: on success + type: dict + version_added: 6.6.0 + sample: {"job_id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_471269252011"} +""" import re from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.common.text.converters import to_native @@ -120,10 +137,9 @@ class IdracRedfishUtils(RedfishUtils): return response response_output = response['resp'].__dict__ - job_id = response_output["headers"]["Location"] - job_id = re.search("JID_.+", job_id).group() - # Currently not passing job_id back to user but patch is coming - return {'ret': True, 'msg': "Config job %s created" % job_id} + job_id_full = response_output["headers"]["Location"] + job_id = re.search("JID_.+", job_id_full).group() + return {'ret': True, 'msg': "Config job %s created" % job_id, 'job_id': job_id_full} CATEGORY_COMMANDS_ALL = { @@ -135,17 +151,20 @@ CATEGORY_COMMANDS_ALL = { def main(): result = {} + return_values = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10), + resource_id=dict() + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10), - resource_id=dict() - ), + argument_spec, required_together=[ ('username', 'password'), ], @@ -191,7 +210,20 @@ def main(): if category == "Systems": # execute only if we find a System resource + # NOTE: Currently overriding the usage of 'data_modification' due to + # how 'resource_id' is processed. In the case of CreateBiosConfigJob, + # we interact with BOTH systems and managers, so you currently cannot + # specify a single 'resource_id' to make both '_find_systems_resource' + # and '_find_managers_resource' return success. Since + # CreateBiosConfigJob doesn't use the matched 'resource_id' for a + # system regardless of what's specified, disabling the 'resource_id' + # inspection for the next call allows a specific manager to be + # specified with 'resource_id'. If we ever need to expand the input + # to inspect a specific system and manager in parallel, this will need + # updates. + rf_utils.data_modification = False result = rf_utils._find_systems_resource() + rf_utils.data_modification = True if result['ret'] is False: module.fail_json(msg=to_native(result['msg'])) @@ -202,11 +234,13 @@ def main(): if result['ret'] is False: module.fail_json(msg=to_native(result['msg'])) result = rf_utils.create_bios_config_job() + if 'job_id' in result: + return_values['job_id'] = result['job_id'] # Return data back or fail with proper message if result['ret'] is True: del result['ret'] - module.exit_json(changed=True, msg='Action was successful') + module.exit_json(changed=True, msg='Action was successful', return_values=return_values) else: module.fail_json(msg=to_native(result['msg'])) diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_config.py b/plugins/modules/idrac_redfish_config.py similarity index 67% rename from plugins/modules/remote_management/redfish/idrac_redfish_config.py rename to plugins/modules/idrac_redfish_config.py index 683538e4b7..e7d6250624 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_config.py +++ b/plugins/modules/idrac_redfish_config.py @@ -1,20 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: idrac_redfish_config short_description: Manages servers through iDRAC using Dell Redfish APIs description: - - For use with Dell iDRAC operations that require Redfish OEM extensions - - Builds Redfish URIs locally and sends them to remote iDRAC controllers to - set or update a configuration attribute. + - For use with Dell iDRAC operations that require Redfish OEM extensions. + - Builds Redfish URIs locally and sends them to remote iDRAC controllers to set or update a configuration attribute. +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +attributes: + check_mode: + support: none + diff_mode: + support: none options: category: required: true @@ -25,9 +30,8 @@ options: required: true description: - List of commands to execute on iDRAC. - - I(SetManagerAttributes), I(SetLifecycleControllerAttributes) and - I(SetSystemAttributes) are mutually exclusive commands when C(category) - is I(Manager). + - V(SetManagerAttributes), V(SetLifecycleControllerAttributes) and V(SetSystemAttributes) are mutually exclusive commands + when O(category) is V(Manager). type: list elements: str baseuri: @@ -66,90 +70,96 @@ options: - ID of the System, Manager or Chassis to modify. type: str version_added: '0.2.0' + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: "Jose Delarosa (@jose-delarosa)" -''' +""" -EXAMPLES = ''' - - name: Enable NTP and set NTP server and Time zone attributes in iDRAC - community.general.idrac_redfish_config: - category: Manager - command: SetManagerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - NTPConfigGroup.1.NTPEnable: "Enabled" - NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}" - Time.1.Timezone: "{{ timezone }}" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" +EXAMPLES = r""" +- name: Enable NTP and set NTP server and Time zone attributes in iDRAC + community.general.idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + NTPConfigGroup.1.NTPEnable: "Enabled" + NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}" + Time.1.Timezone: "{{ timezone }}" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" - - name: Enable Syslog and set Syslog servers in iDRAC - community.general.idrac_redfish_config: - category: Manager - command: SetManagerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - SysLog.1.SysLogEnable: "Enabled" - SysLog.1.Server1: "{{ syslog_server1 }}" - SysLog.1.Server2: "{{ syslog_server2 }}" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" +- name: Enable Syslog and set Syslog servers in iDRAC + community.general.idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + SysLog.1.SysLogEnable: "Enabled" + SysLog.1.Server1: "{{ syslog_server1 }}" + SysLog.1.Server2: "{{ syslog_server2 }}" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" - - name: Configure SNMP community string, port, protocol and trap format - community.general.idrac_redfish_config: - category: Manager - command: SetManagerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - SNMP.1.AgentEnable: "Enabled" - SNMP.1.AgentCommunity: "public_community_string" - SNMP.1.TrapFormat: "SNMPv1" - SNMP.1.SNMPProtocol: "All" - SNMP.1.DiscoveryPort: 161 - SNMP.1.AlertPort: 162 - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" +- name: Configure SNMP community string, port, protocol and trap format + community.general.idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + SNMP.1.AgentEnable: "Enabled" + SNMP.1.AgentCommunity: "public_community_string" + SNMP.1.TrapFormat: "SNMPv1" + SNMP.1.SNMPProtocol: "All" + SNMP.1.DiscoveryPort: 161 + SNMP.1.AlertPort: 162 + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" - - name: Enable CSIOR - community.general.idrac_redfish_config: - category: Manager - command: SetLifecycleControllerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" +- name: Enable CSIOR + community.general.idrac_redfish_config: + category: Manager + command: SetLifecycleControllerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" - - name: Set Power Supply Redundancy Policy to A/B Grid Redundant - community.general.idrac_redfish_config: - category: Manager - command: SetSystemAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - ServerPwr.1.PSRedPolicy: "A/B Grid Redundant" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" -''' +- name: Set Power Supply Redundancy Policy to A/B Grid Redundant + community.general.idrac_redfish_config: + category: Manager + command: SetSystemAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + ServerPwr.1.PSRedPolicy: "A/B Grid Redundant" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" +""" -RETURN = ''' +RETURN = r""" msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.validation import ( check_mutually_exclusive, check_required_arguments ) -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.common.text.converters import to_native @@ -195,14 +205,14 @@ class IdracRedfishUtils(RedfishUtils): for attr_name, attr_value in attributes.items(): # Check if attribute exists - if attr_name not in data[u'Attributes']: + if attr_name not in data['Attributes']: # Skip and proceed to next attribute if this isn't valid attrs_bad.update({attr_name: attr_value}) continue # Find out if value is already set to what we want. If yes, exclude # those attributes - if data[u'Attributes'][attr_name] == attr_value: + if data['Attributes'][attr_name] == attr_value: attrs_skipped.update({attr_name: attr_value}) else: attrs_to_patch.update({attr_name: attr_value}) @@ -241,18 +251,20 @@ CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = { def main(): result = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + manager_attributes=dict(type='dict', default={}), + timeout=dict(type='int', default=10), + resource_id=dict() + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - manager_attributes=dict(type='dict', default={}), - timeout=dict(type='int', default=10), - resource_id=dict() - ), + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_info.py b/plugins/modules/idrac_redfish_info.py similarity index 57% rename from plugins/modules/remote_management/redfish/idrac_redfish_info.py rename to plugins/modules/idrac_redfish_info.py index 7bfd81a78e..309cefc15f 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_info.py +++ b/plugins/modules/idrac_redfish_info.py @@ -1,22 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: idrac_redfish_info short_description: Gather PowerEdge server information through iDRAC using Redfish APIs description: - - Builds Redfish URIs locally and sends them to remote iDRAC controllers to - get information back. + - Builds Redfish URIs locally and sends them to remote iDRAC controllers to get information back. - For use with Dell EMC iDRAC operations that require Redfish OEM extensions. - - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)! +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module + - community.general.redfish +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: category: required: true @@ -27,8 +30,7 @@ options: required: true description: - List of commands to execute on iDRAC. - - C(GetManagerAttributes) returns the list of dicts containing iDRAC, - LifecycleController and System attributes. + - V(GetManagerAttributes) returns the list of dicts containing iDRAC, LifecycleController and System attributes. type: list elements: str baseuri: @@ -54,72 +56,80 @@ options: - Timeout in seconds for HTTP requests to iDRAC. default: 10 type: int + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: "Jose Delarosa (@jose-delarosa)" -''' +""" -EXAMPLES = ''' - - name: Get Manager attributes with a default of 20 seconds - community.general.idrac_redfish_info: - category: Manager - command: GetManagerAttributes - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 - register: result +EXAMPLES = r""" +- name: Get Manager attributes with a default of 20 seconds + community.general.idrac_redfish_info: + category: Manager + command: GetManagerAttributes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + register: result - # Examples to display the value of all or a single iDRAC attribute - - name: Store iDRAC attributes as a fact variable - ansible.builtin.set_fact: - idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}" +# Examples to display the value of all or a single iDRAC attribute +- name: Store iDRAC attributes as a fact variable + ansible.builtin.set_fact: + idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') + | list | first }}" - - name: Display all iDRAC attributes - ansible.builtin.debug: - var: idrac_attributes +- name: Display all iDRAC attributes + ansible.builtin.debug: + var: idrac_attributes - - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute - ansible.builtin.debug: - var: idrac_attributes['Syslog.1.SysLogEnable'] +- name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute + ansible.builtin.debug: + var: idrac_attributes['Syslog.1.SysLogEnable'] - # Examples to display the value of all or a single LifecycleController attribute - - name: Store LifecycleController attributes as a fact variable - ansible.builtin.set_fact: - lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}" +# Examples to display the value of all or a single LifecycleController attribute +- name: Store LifecycleController attributes as a fact variable + ansible.builtin.set_fact: + lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') + | list | first }}" - - name: Display LifecycleController attributes - ansible.builtin.debug: - var: lc_attributes +- name: Display LifecycleController attributes + ansible.builtin.debug: + var: lc_attributes - - name: Display the value of 'CollectSystemInventoryOnRestart' attribute - ansible.builtin.debug: - var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart'] +- name: Display the value of 'CollectSystemInventoryOnRestart' attribute + ansible.builtin.debug: + var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart'] - # Examples to display the value of all or a single System attribute - - name: Store System attributes as a fact variable - ansible.builtin.set_fact: - system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}" +# Examples to display the value of all or a single System attribute +- name: Store System attributes as a fact variable + ansible.builtin.set_fact: + system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') + | list | first }}" - - name: Display System attributes - ansible.builtin.debug: - var: system_attributes +- name: Display System attributes + ansible.builtin.debug: + var: system_attributes - - name: Display the value of 'PSRedPolicy' - ansible.builtin.debug: - var: system_attributes['ServerPwr.1.PSRedPolicy'] +- name: Display the value of 'PSRedPolicy' + ansible.builtin.debug: + var: system_attributes['ServerPwr.1.PSRedPolicy'] +""" -''' - -RETURN = ''' +RETURN = r""" msg: - description: different results depending on task - returned: always - type: dict - sample: List of Manager attributes -''' + description: Different results depending on task. + returned: always + type: dict + sample: List of Manager attributes +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.common.text.converters import to_native @@ -139,8 +149,8 @@ class IdracRedfishUtils(RedfishUtils): # Manager attributes are supported as part of iDRAC OEM extension # Attributes are supported only on iDRAC9 try: - for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']: - attributes_uri = members[u'@odata.id'] + for members in data['Links']['Oem']['Dell']['DellAttributes']: + attributes_uri = members['@odata.id'] response = self.get_request(self.root_uri + attributes_uri) if response['ret'] is False: @@ -172,16 +182,18 @@ CATEGORY_COMMANDS_ALL = { def main(): result = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) - ), + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/ilo_redfish_command.py b/plugins/modules/ilo_redfish_command.py new file mode 100644 index 0000000000..7f20a45631 --- /dev/null +++ b/plugins/modules/ilo_redfish_command.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: ilo_redfish_command +short_description: Manages Out-Of-Band controllers using Redfish APIs +version_added: 6.6.0 +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action. +attributes: + check_mode: + support: none + diff_mode: + support: none +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + choices: ['Systems'] + command: + required: true + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + username: + required: false + description: + - Username for authenticating to iLO. + type: str + password: + required: false + description: + - Password for authenticating to iLO. + type: str + auth_token: + required: false + description: + - Security token for authenticating to iLO. + type: str + timeout: + required: false + description: + - Timeout in seconds for HTTP requests to iLO. + default: 60 + type: int + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 +author: + - Varni H P (@varini-hp) +""" + +EXAMPLES = r""" +- name: Wait for iLO Reboot Completion + community.general.ilo_redfish_command: + category: Systems + command: WaitforiLORebootCompletion + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = r""" +ilo_redfish_command: + description: Returns the status of the operation performed on the iLO. + type: dict + contains: + WaitforiLORebootCompletion: + description: Returns the output msg and whether the function executed successfully. + type: dict + contains: + ret: + description: Return V(true)/V(false) based on whether the operation was performed successfully. + type: bool + msg: + description: Status of the operation performed on the iLO. + type: str + returned: always +""" + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Systems": ["WaitforiLORebootCompletion"] +} + +from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + result = {} + argument_spec = dict( + category=dict(required=True, choices=list(CATEGORY_COMMANDS_ALL.keys())), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + timeout=dict(type="int", default=60), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + timeout = module.params['timeout'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = iLORedfishUtils(creds, root_uri, timeout, module) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native( + "Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json( + msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + if category == "Systems": + # execute only if we find a System resource + + result = rf_utils._find_systems_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "WaitforiLORebootCompletion": + result[command] = rf_utils.wait_for_ilo_reboot_completion() + + # Return data back or fail with proper message + if not result[command]['ret']: + module.fail_json(msg=result) + + changed = result[command].get('changed', False) + module.exit_json(ilo_redfish_command=result, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/remote_management/redfish/ilo_redfish_config.py b/plugins/modules/ilo_redfish_config.py similarity index 62% rename from plugins/modules/remote_management/redfish/ilo_redfish_config.py rename to plugins/modules/ilo_redfish_config.py index 837b2103b8..5cd441827f 100644 --- a/plugins/modules/remote_management/redfish/ilo_redfish_config.py +++ b/plugins/modules/ilo_redfish_config.py @@ -1,19 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ilo_redfish_config short_description: Sets or updates configuration attributes on HPE iLO with Redfish OEM extensions version_added: 4.2.0 description: - - Builds Redfish URIs locally and sends them to iLO to - set or update a configuration attribute. + - Builds Redfish URIs locally and sends them to iLO to set or update a configuration attribute. - For use with HPE iLO operations that require Redfish OEM extensions. +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +attributes: + check_mode: + support: none + diff_mode: + support: none options: category: required: true @@ -59,74 +64,82 @@ options: description: - Value of the attribute to be configured. type: str + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: - - "Bhavya B (@bhavya06)" -''' + - "Bhavya B (@bhavya06)" +""" -EXAMPLES = ''' - - name: Disable WINS Registration - community.general.ilo_redfish_config: - category: Manager - command: SetWINSReg - baseuri: 15.X.X.X - username: Admin - password: Testpass123 - attribute_name: WINSRegistration +EXAMPLES = r""" +- name: Disable WINS Registration + community.general.ilo_redfish_config: + category: Manager + command: SetWINSReg + baseuri: 15.X.X.X + username: Admin + password: Testpass123 + attribute_name: WINSRegistration - - name: Set Time Zone - community.general.ilo_redfish_config: - category: Manager - command: SetTimeZone - baseuri: 15.X.X.X - username: Admin - password: Testpass123 - attribute_name: TimeZone - attribute_value: Chennai +- name: Set Time Zone + community.general.ilo_redfish_config: + category: Manager + command: SetTimeZone + baseuri: 15.X.X.X + username: Admin + password: Testpass123 + attribute_name: TimeZone + attribute_value: Chennai - - name: Set NTP Servers - community.general.ilo_redfish_config: - category: Manager - command: SetNTPServers - baseuri: 15.X.X.X - username: Admin - password: Testpass123 - attribute_name: StaticNTPServers - attribute_value: X.X.X.X +- name: Set NTP Servers + community.general.ilo_redfish_config: + category: Manager + command: SetNTPServers + baseuri: 15.X.X.X + username: Admin + password: Testpass123 + attribute_name: StaticNTPServers + attribute_value: X.X.X.X +""" -''' - -RETURN = ''' +RETURN = r""" msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +""" CATEGORY_COMMANDS_ALL = { "Manager": ["SetTimeZone", "SetDNSserver", "SetDomainName", "SetNTPServers", "SetWINSReg"] } from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native def main(): result = {} + argument_spec = dict( + category=dict(required=True, choices=list( + CATEGORY_COMMANDS_ALL.keys())), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + attribute_name=dict(required=True), + attribute_value=dict(type='str'), + timeout=dict(type='int', default=10) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True, choices=list( - CATEGORY_COMMANDS_ALL.keys())), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - attribute_name=dict(required=True), - attribute_value=dict(), - timeout=dict(type='int', default=10) - ), + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/remote_management/redfish/ilo_redfish_info.py b/plugins/modules/ilo_redfish_info.py similarity index 61% rename from plugins/modules/remote_management/redfish/ilo_redfish_info.py rename to plugins/modules/ilo_redfish_info.py index 5f5be4f835..6eb7d7b3f4 100644 --- a/plugins/modules/remote_management/redfish/ilo_redfish_info.py +++ b/plugins/modules/ilo_redfish_info.py @@ -1,19 +1,20 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ilo_redfish_info short_description: Gathers server information through iLO using Redfish APIs version_added: 4.2.0 description: - - Builds Redfish URIs locally and sends them to iLO to - get information back. + - Builds Redfish URIs locally and sends them to iLO to get information back. - For use with HPE iLO operations that require Redfish OEM extensions. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module + - community.general.redfish options: category: required: true @@ -49,52 +50,58 @@ options: - Timeout in seconds for HTTP requests to iLO. default: 10 type: int + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: - - "Bhavya B (@bhavya06)" -''' + - "Bhavya B (@bhavya06)" +""" -EXAMPLES = ''' - - name: Get iLO Sessions - community.general.ilo_redfish_info: - category: Sessions - command: GetiLOSessions - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result_sessions -''' +EXAMPLES = r""" +- name: Get iLO Sessions + community.general.ilo_redfish_info: + category: Sessions + command: GetiLOSessions + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result_sessions +""" -RETURN = ''' +RETURN = r""" ilo_redfish_info: - description: Returns iLO sessions. - type: dict - contains: - GetiLOSessions: - description: Returns the iLO session msg and whether the function executed successfully. - type: dict - contains: - ret: - description: Check variable to see if the information was succesfully retrived. - type: bool - msg: - description: Information of all active iLO sessions. - type: list - elements: dict - contains: - Description: - description: Provides a description of the resource. - type: str - Id: - description: The sessionId. - type: str - Name: - description: The name of the resource. - type: str - UserName: - description: Name to use to log in to the management processor. - type: str - returned: always -''' + description: Returns iLO sessions. + type: dict + contains: + GetiLOSessions: + description: Returns the iLO session msg and whether the function executed successfully. + type: dict + contains: + ret: + description: Check variable to see if the information was successfully retrieved. + type: bool + msg: + description: Information of all active iLO sessions. + type: list + elements: dict + contains: + Description: + description: Provides a description of the resource. + type: str + Id: + description: The sessionId. + type: str + Name: + description: The name of the resource. + type: str + UserName: + description: Name to use to log in to the management processor. + type: str + returned: always +""" CATEGORY_COMMANDS_ALL = { "Sessions": ["GetiLOSessions"] @@ -105,23 +112,25 @@ CATEGORY_COMMANDS_DEFAULT = { } from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC def main(): result = {} category_list = [] + argument_spec = dict( + category=dict(required=True, type='list', elements='str'), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True, type='list', elements='str'), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) - ), + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/remote_management/imc/imc_rest.py b/plugins/modules/imc_rest.py similarity index 69% rename from plugins/modules/remote_management/imc/imc_rest.py rename to plugins/modules/imc_rest.py index b685e96b82..ef543c62e0 100644 --- a/plugins/modules/remote_management/imc/imc_rest.py +++ b/plugins/modules/imc_rest.py @@ -1,98 +1,102 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Dag Wieers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Dag Wieers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: imc_rest short_description: Manage Cisco IMC hardware through its REST API description: -- Provides direct access to the Cisco IMC REST API. -- Perform any configuration changes and actions that the Cisco IMC supports. -- More information about the IMC REST API is available from - U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html) + - Provides direct access to the Cisco IMC REST API. + - Perform any configuration changes and actions that the Cisco IMC supports. + - More information about the IMC REST API is available from + U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html). author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) requirements: -- lxml -- xmljson >= 0.1.8 + - lxml + - xmljson >= 0.1.8 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: hostname: description: - - IP Address or hostname of Cisco IMC, resolvable by Ansible control host. + - IP Address or hostname of Cisco IMC, resolvable by Ansible control host. required: true - aliases: [ host, ip ] + aliases: [host, ip] type: str username: description: - - Username used to login to the switch. + - Username used to login to the switch. default: admin - aliases: [ user ] + aliases: [user] type: str password: description: - - The password to use for authentication. + - The password to use for authentication. default: password type: str path: description: - - Name of the absolute path of the filename that includes the body - of the http request being sent to the Cisco IMC REST API. - - Parameter C(path) is mutual exclusive with parameter C(content). - aliases: [ 'src', 'config_file' ] + - Name of the absolute path of the filename that includes the body of the http request being sent to the Cisco IMC REST + API. + - Parameter O(path) is mutual exclusive with parameter O(content). + aliases: ['src', 'config_file'] type: path content: description: - - When used instead of C(path), sets the content of the API requests directly. - - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module. - - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream, - the Cisco IMC output is subsequently merged. - - Parameter C(content) is mutual exclusive with parameter C(path). + - When used instead of O(path), sets the content of the API requests directly. + - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module. + - You can collate multiple IMC XML fragments and they are processed sequentially in a single stream, the Cisco IMC output + is subsequently merged. + - Parameter O(content) is mutual exclusive with parameter O(path). type: str protocol: description: - - Connection protocol to use. + - Connection protocol to use. default: https - choices: [ http, https ] + choices: [http, https] type: str timeout: description: - - The socket level timeout in seconds. - - This is the time that every single connection (every fragment) can spend. - If this C(timeout) is reached, the module will fail with a - C(Connection failure) indicating that C(The read operation timed out). + - The socket level timeout in seconds. + - This is the time that every single connection (every fragment) can spend. If this O(timeout) is reached, the module + fails with a C(Connection failure) indicating that C(The read operation timed out). default: 60 type: int validate_certs: description: - - If C(no), SSL certificates will not be validated. - - This should only set to C(no) used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. + - This should only set to V(false) used on personally controlled sites using self-signed certificates. type: bool - default: 'yes' + default: true notes: -- The XML fragments don't need an authentication cookie, this is injected by the module automatically. -- The Cisco IMC XML output is being translated to JSON using the Cobra convention. -- Any configConfMo change requested has a return status of 'modified', even if there was no actual change - from the previous configuration. As a result, this module will always report a change on subsequent runs. - In case this behaviour is fixed in a future update to Cisco IMC, this module will automatically adapt. -- If you get a C(Connection failure) related to C(The read operation timed out) increase the C(timeout) - parameter. Some XML fragments can take longer than the default timeout. -- More information about the IMC REST API is available from - U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html) -''' + - The XML fragments do not need an authentication cookie, this is injected by the module automatically. + - The Cisco IMC XML output is being translated to JSON using the Cobra convention. + - Any configConfMo change requested has a return status of C(modified), even if there was no actual change from the previous + configuration. As a result, this module always reports a change on subsequent runs. In case this behaviour is fixed in + a future update to Cisco IMC, this module is meant to automatically adapt. + - If you get a C(Connection failure) related to C(The read operation timed out) increase the O(timeout) parameter. Some + XML fragments can take longer than the default timeout. + - More information about the IMC REST API is available from + U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html). +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Power down server community.general.imc_rest: hostname: '{{ imc_hostname }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: no + validate_certs: false # only do this when you trust the network! content: | @@ -104,7 +108,7 @@ EXAMPLES = r''' hostname: '{{ imc_hostname }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: no + validate_certs: false # only do this when you trust the network! timeout: 120 content: | @@ -129,7 +133,7 @@ EXAMPLES = r''' hostname: '{{ imc_hostname }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: no + validate_certs: false # only do this when you trust the network! content: | @@ -147,7 +151,7 @@ EXAMPLES = r''' hostname: '{{ imc_host }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: no + validate_certs: false # only do this when you trust the network! content: | @@ -159,11 +163,11 @@ EXAMPLES = r''' hostname: '{{ imc_host }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: no + validate_certs: false # only do this when you trust the network! content: | - - - + + + delegate_to: localhost - name: Disable HTTP and increase session timeout to max value 10800 secs @@ -171,22 +175,22 @@ EXAMPLES = r''' hostname: '{{ imc_host }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: no + validate_certs: false # only do this when you trust the network! timeout: 120 content: | - - - + + + - - - + + + delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" aaLogin: - description: Cisco IMC XML output for the login, translated to JSON using Cobra convention + description: Cisco IMC XML output for the login, translated to JSON using Cobra convention. returned: success type: dict sample: | @@ -200,27 +204,27 @@ aaLogin: "response": "yes" } configConfMo: - description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention + description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention. returned: success type: dict sample: | elapsed: - description: Elapsed time in seconds + description: Elapsed time in seconds. returned: always type: int sample: 31 response: - description: HTTP response message, including content length + description: HTTP response message, including content length. returned: always type: str sample: OK (729 bytes) status: - description: The HTTP response status code + description: The HTTP response status code. returned: always type: dict sample: 200 error: - description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention + description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention. returned: failed type: dict sample: | @@ -232,24 +236,24 @@ error: "response": "yes" } error_code: - description: Cisco IMC error code + description: Cisco IMC error code. returned: failed type: str sample: ERR-xml-parse-error error_text: - description: Cisco IMC error message + description: Cisco IMC error message. returned: failed type: str sample: | XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. input: - description: RAW XML input sent to the Cisco IMC, causing the error + description: RAW XML input sent to the Cisco IMC, causing the error. returned: failed type: str sample: | output: - description: RAW XML output received from the Cisco IMC, with error details + description: RAW XML output received from the Cisco IMC, with error details. returned: failed type: str sample: > @@ -257,13 +261,12 @@ output: response="yes" errorCode="ERR-xml-parse-error" invocationResult="594" - errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n"/> -''' + errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n" /> +""" -import datetime import os import traceback -from functools import partial +from itertools import zip_longest LXML_ETREE_IMP_ERR = None try: @@ -282,9 +285,12 @@ except ImportError: HAS_XMLJSON_COBRA = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six.moves import zip_longest from ansible.module_utils.urls import fetch_url +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + def imc_response(module, rawoutput, rawinput=''): ''' Handle IMC returned data ''' @@ -313,8 +319,7 @@ def merge(one, two): ''' Merge two complex nested datastructures into one''' if isinstance(one, dict) and isinstance(two, dict): copy = dict(one) - # copy.update({key: merge(one.get(key, None), two[key]) for key in two}) - copy.update(dict((key, merge(one.get(key, None), two[key])) for key in two)) + copy.update({key: merge(one.get(key, None), two[key]) for key in two}) return copy elif isinstance(one, list) and isinstance(two, list): @@ -368,14 +373,14 @@ def main(): else: module.fail_json(msg='Cannot find/access path:\n%s' % path) - start = datetime.datetime.utcnow() + start = now() # Perform login first url = '%s://%s/nuova' % (protocol, hostname) data = '' % (username, password) resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout) if resp is None or auth['status'] != 200: - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + result['elapsed'] = (now() - start).seconds module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result) result.update(imc_response(module, resp.read())) @@ -408,7 +413,7 @@ def main(): # Perform actual request resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout) if resp is None or info['status'] != 200: - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + result['elapsed'] = (now() - start).seconds module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result) # Merge results with previous results @@ -424,7 +429,7 @@ def main(): result['changed'] = ('modified' in results) # Report success - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + result['elapsed'] = (now() - start).seconds module.exit_json(**result) finally: logout(module, url, cookie, timeout) diff --git a/plugins/modules/cloud/smartos/imgadm.py b/plugins/modules/imgadm.py similarity index 74% rename from plugins/modules/cloud/smartos/imgadm.py rename to plugins/modules/imgadm.py index 18a67d014a..da016f8597 100644 --- a/plugins/modules/cloud/smartos/imgadm.py +++ b/plugins/modules/imgadm.py @@ -1,65 +1,66 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2016, 2017 Jasper Lievisse Adriaanse -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, 2017 Jasper Lievisse Adriaanse +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: imgadm short_description: Manage SmartOS images description: - - Manage SmartOS virtual machine images through imgadm(1M) + - Manage SmartOS virtual machine images through imgadm(1M). author: Jasper Lievisse Adriaanse (@jasperla) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - force: - required: false - type: bool - description: - - Force a given operation (where supported by imgadm(1M)). - pool: - required: false - default: zones - description: - - zpool to import to or delete images from. - type: str - source: - required: false - description: - - URI for the image source. - type: str - state: - required: true - choices: [ present, absent, deleted, imported, updated, vacuumed ] - description: - - State the object operated on should be in. C(imported) is an alias for - for C(present) and C(deleted) for C(absent). When set to C(vacuumed) - and C(uuid) to C(*), it will remove all unused images. - type: str + force: + required: false + type: bool + description: + - Force a given operation (where supported by imgadm(1M)). + pool: + required: false + default: zones + description: + - The zpool to import to or delete images from. + type: str + source: + required: false + description: + - URI for the image source. + type: str + state: + required: true + choices: [present, absent, deleted, imported, updated, vacuumed] + description: + - State the object operated on should be in. V(imported) is an alias for for V(present) and V(deleted) for V(absent). + When set to V(vacuumed) and O(uuid=*), it removes all unused images. + type: str - type: - required: false - choices: [ imgapi, docker, dsapi ] - default: imgapi - description: - - Type for image sources. - type: str + type: + required: false + choices: [imgapi, docker, dsapi] + default: imgapi + description: + - Type for image sources. + type: str - uuid: - required: false - description: - - Image UUID. Can either be a full UUID or C(*) for all images. - type: str + uuid: + required: false + description: + - Image UUID. Can either be a full UUID or V(*) for all images. + type: str +""" -requirements: - - python >= 2.6 -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Import an image community.general.imgadm: uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' @@ -95,25 +96,25 @@ EXAMPLES = ''' community.general.imgadm: source: 'https://docker.io' state: absent -''' +""" -RETURN = ''' +RETURN = r""" source: - description: Source that is managed. - returned: When not managing an image. - type: str - sample: https://datasets.project-fifo.net + description: Source that is managed. + returned: When not managing an image. + type: str + sample: https://datasets.project-fifo.net uuid: - description: UUID for an image operated on. - returned: When not managing an image source. - type: str - sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764 + description: UUID for an image operated on. + returned: When not managing an image source. + type: str + sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764 state: - description: State of the target, after execution. - returned: success - type: str - sample: 'present' -''' + description: State of the target, after execution. + returned: success + type: str + sample: 'present' +""" import re @@ -134,7 +135,7 @@ class Imgadm(object): self.uuid = module.params['uuid'] # Since there are a number of (natural) aliases, prevent having to look - # them up everytime we operate on `state`. + # them up every time we operate on `state`. if self.params['state'] in ['present', 'imported', 'updated']: self.present = True else: @@ -155,9 +156,9 @@ class Imgadm(object): def update_images(self): if self.uuid == '*': - cmd = '{0} update'.format(self.cmd) + cmd = [self.cmd, 'update'] else: - cmd = '{0} update {1}'.format(self.cmd, self.uuid) + cmd = [self.cmd, 'update', self.uuid] (rc, stdout, stderr) = self.module.run_command(cmd) @@ -166,7 +167,7 @@ class Imgadm(object): # There is no feedback from imgadm(1M) to determine if anything # was actually changed. So treat this as an 'always-changes' operation. - # Note that 'imgadm -v' produces unparseable JSON... + # Note that 'imgadm -v' produces unparsable JSON... self.changed = True def manage_sources(self): @@ -174,13 +175,13 @@ class Imgadm(object): source = self.params['source'] imgtype = self.params['type'] - cmd = '{0} sources'.format(self.cmd) + cmd = [self.cmd, 'sources'] if force: - cmd += ' -f' + cmd = cmd + ['-f'] if self.present: - cmd = '{0} -a {1} -t {2}'.format(cmd, source, imgtype) + cmd = cmd + ['-a', source, '-t', imgtype] (rc, stdout, stderr) = self.module.run_command(cmd) if rc != 0: @@ -219,7 +220,7 @@ class Imgadm(object): if state == 'vacuumed': # Unconditionally pass '--force', otherwise we're prompted with 'y/N' - cmd = '{0} vacuum -f'.format(self.cmd) + cmd = [self.cmd, 'vacuum', '-f'] (rc, stdout, stderr) = self.module.run_command(cmd) @@ -231,8 +232,7 @@ class Imgadm(object): else: self.changed = True if self.present: - cmd = '{0} import -P {1} -q {2}'.format(self.cmd, pool, self.uuid) - + cmd = [self.cmd, 'import', '-P', pool, '-q'] + ([self.uuid] if self.uuid else []) (rc, stdout, stderr) = self.module.run_command(cmd) if rc != 0: @@ -250,8 +250,7 @@ class Imgadm(object): if re.match(regex, stdout.splitlines()[-1]): self.changed = True else: - cmd = '{0} delete -P {1} {2}'.format(self.cmd, pool, self.uuid) - + cmd = [self.cmd, 'delete', '-P', pool] + ([self.uuid] if self.uuid else []) (rc, stdout, stderr) = self.module.run_command(cmd) regex = '.*ImageNotInstalled.*' diff --git a/plugins/modules/net_tools/infinity/infinity.py b/plugins/modules/infinity.py similarity index 91% rename from plugins/modules/net_tools/infinity/infinity.py rename to plugins/modules/infinity.py index bd12f85af2..7f568faa0d 100644 --- a/plugins/modules/net_tools/infinity/infinity.py +++ b/plugins/modules/infinity.py @@ -1,19 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: infinity short_description: Manage Infinity IPAM using Rest API description: - Manage Infinity IPAM using REST API. author: - Meirong Liu (@MeganLiu) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: server_ip: description: @@ -33,56 +39,50 @@ options: required: true action: description: - - Action to perform + - Action to perform. type: str required: true - choices: [add_network, delete_network, get_network, get_network_id, release_ip, release_network, reserve_network, reserve_next_available_ip ] + choices: [add_network, delete_network, get_network, get_network_id, release_ip, release_network, reserve_network, reserve_next_available_ip] network_id: description: - Network ID. type: str - default: '' ip_address: description: - IP Address for a reservation or a release. type: str - default: '' network_address: description: - - Network address with CIDR format (e.g., 192.168.310.0). + - Network address with CIDR format (for example V(192.168.310.0)). type: str - default: '' network_size: description: - - Network bitmask (e.g. 255.255.255.220) or CIDR format (e.g., /26). + - Network bitmask (for example V(255.255.255.220) or CIDR format V(/26)). type: str - default: '' network_name: description: - The name of a network. type: str - default: '' network_location: description: - - The parent network id for a given network. + - The parent network ID for a given network. type: int default: -1 network_type: description: - - Network type defined by Infinity + - Network type defined by Infinity. type: str - choices: [ lan, shared_lan, supernet ] + choices: [lan, shared_lan, supernet] default: lan network_family: description: - - Network family defined by Infinity, e.g. IPv4, IPv6 and Dual stack + - Network family defined by Infinity, for example V(IPv4), V(IPv6) and V(Dual stack). type: str - choices: [ '4', '6', dual ] + choices: ['4', '6', dual] default: '4' -''' +""" -EXAMPLES = r''' ---- +EXAMPLES = r""" - hosts: localhost connection: local strategy: debug @@ -99,28 +99,48 @@ EXAMPLES = r''' network_id: 1201 network_size: /28 register: infinity -''' +""" -RETURN = r''' +RETURN = r""" network_id: - description: id for a given network - returned: success - type: str - sample: '1501' + description: ID for a given network. + returned: success + type: str + sample: '1501' ip_info: - description: when reserve next available ip address from a network, the ip address info ) is returned. - returned: success - type: str - sample: '{"address": "192.168.10.3", "hostname": "", "FQDN": "", "domainname": "", "id": 3229}' + description: + - When reserve next available IP address from a network, the IP address info is returned. + - Please note that the value is a B(string) containing JSON data. + returned: success + type: str + sample: >- + { + "address": "192.168.10.3", + "hostname": "", + "FQDN": "", + "domainname": "", + "id": 3229 + } network_info: - description: when reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved network is returned. - returned: success - type: str - sample: {"network_address": "192.168.10.32/28","network_family": "4", "network_id": 3102, - "network_size": null,"description": null,"network_location": "3085", - "ranges": { "id": 0, "name": null,"first_ip": null,"type": null,"last_ip": null}, - "network_type": "lan","network_name": "'reserve_new_ansible_network'"} -''' + description: + - When reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved + network is returned. + - Please note that the value is a B(string) containing JSON data. + returned: success + type: str + sample: >- + { + "network_address": "192.168.10.32/28", + "network_family": "4", + "network_id": 3102, + "network_size": null, + "description": null, + "network_location": "3085", + "ranges": {"id": 0, "name": null, "first_ip": null, "type": null, "last_ip": null}, + "network_type": "lan", + "network_name": "'reserve_new_ansible_network'" + } +""" from ansible.module_utils.basic import AnsibleModule, json diff --git a/plugins/modules/database/influxdb/influxdb_database.py b/plugins/modules/influxdb_database.py similarity index 68% rename from plugins/modules/database/influxdb/influxdb_database.py rename to plugins/modules/influxdb_database.py index 6601b30124..600599ab0c 100644 --- a/plugins/modules/database/influxdb/influxdb_database.py +++ b/plugins/modules/influxdb_database.py @@ -1,67 +1,69 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Kamil Szczygiel -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Kamil Szczygiel +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: influxdb_database short_description: Manage InfluxDB databases description: - - Manage InfluxDB databases. + - Manage InfluxDB databases. author: "Kamil Szczygiel (@kamsz)" requirements: - - "python >= 2.6" - - "influxdb >= 0.9" - - requests + - "influxdb >= 0.9" + - requests +attributes: + check_mode: + support: full + diff_mode: + support: none options: - database_name: - description: - - Name of the database. - required: true - type: str - state: - description: - - Determines if the database should be created or destroyed. - choices: [ absent, present ] - default: present - type: str + database_name: + description: + - Name of the database. + required: true + type: str + state: + description: + - Determines if the database should be created or destroyed. + choices: [absent, present] + default: present + type: str extends_documentation_fragment: -- community.general.influxdb + - community.general.influxdb + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Example influxdb_database command from Ansible Playbooks - name: Create database community.general.influxdb_database: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" - name: Destroy database community.general.influxdb_database: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" - state: absent + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + state: absent - name: Create database using custom credentials community.general.influxdb_database: - hostname: "{{influxdb_ip_address}}" - username: "{{influxdb_username}}" - password: "{{influxdb_password}}" - database_name: "{{influxdb_database_name}}" - ssl: yes - validate_certs: yes -''' + hostname: "{{influxdb_ip_address}}" + username: "{{influxdb_username}}" + password: "{{influxdb_password}}" + database_name: "{{influxdb_database_name}}" + ssl: true + validate_certs: true +""" -RETURN = r''' +RETURN = r""" # only defaults -''' +""" try: import requests.exceptions diff --git a/plugins/modules/database/influxdb/influxdb_query.py b/plugins/modules/influxdb_query.py similarity index 84% rename from plugins/modules/database/influxdb/influxdb_query.py rename to plugins/modules/influxdb_query.py index bff6fa989b..1707d401f2 100644 --- a/plugins/modules/database/influxdb/influxdb_query.py +++ b/plugins/modules/influxdb_query.py @@ -1,22 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: influxdb_query short_description: Query data points from InfluxDB description: - Query data points from InfluxDB. author: "René Moser (@resmo)" requirements: - - "python >= 2.6" - "influxdb >= 0.9" +attributes: + check_mode: + support: full + diff_mode: + support: none options: query: description: @@ -29,11 +31,11 @@ options: required: true type: str extends_documentation_fragment: -- community.general.influxdb + - community.general.influxdb + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Query connections community.general.influxdb_query: hostname: "{{ influxdb_ip_address }}" @@ -51,17 +53,17 @@ EXAMPLES = r''' - name: Print results from the query ansible.builtin.debug: var: connection.query_results -''' +""" -RETURN = r''' +RETURN = r""" query_results: - description: Result from the query + description: Result from the query. returned: success type: list sample: - mean: 1245.5333333333333 time: "1970-01-01T00:00:00Z" -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native diff --git a/plugins/modules/database/influxdb/influxdb_retention_policy.py b/plugins/modules/influxdb_retention_policy.py similarity index 71% rename from plugins/modules/database/influxdb/influxdb_retention_policy.py rename to plugins/modules/influxdb_retention_policy.py index 6cb45229cd..c1848a4694 100644 --- a/plugins/modules/database/influxdb/influxdb_retention_policy.py +++ b/plugins/modules/influxdb_retention_policy.py @@ -1,140 +1,138 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Kamil Szczygiel -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Kamil Szczygiel +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: influxdb_retention_policy short_description: Manage InfluxDB retention policies description: - - Manage InfluxDB retention policies. + - Manage InfluxDB retention policies. author: "Kamil Szczygiel (@kamsz)" requirements: - - "python >= 2.6" - - "influxdb >= 0.9" - - requests + - "influxdb >= 0.9" + - requests +attributes: + check_mode: + support: full + diff_mode: + support: none options: - database_name: - description: - - Name of the database. - required: true - type: str - policy_name: - description: - - Name of the retention policy. - required: true - type: str - state: - description: - - State of the retention policy. - choices: [ absent, present ] - default: present - type: str - version_added: 3.1.0 - duration: - description: - - Determines how long InfluxDB should keep the data. If specified, it - should be C(INF) or at least one hour. If not specified, C(INF) is - assumed. Supports complex duration expressions with multiple units. - - Required only if I(state) is set to C(present). - type: str - replication: - description: - - Determines how many independent copies of each point are stored in the cluster. - - Required only if I(state) is set to C(present). - type: int - default: - description: - - Sets the retention policy as default retention policy. - type: bool - default: false - shard_group_duration: - description: - - Determines the time range covered by a shard group. If specified it - must be at least one hour. If none, it's determined by InfluxDB by - the rentention policy's duration. Supports complex duration expressions - with multiple units. - type: str - version_added: '2.0.0' + database_name: + description: + - Name of the database. + required: true + type: str + policy_name: + description: + - Name of the retention policy. + required: true + type: str + state: + description: + - State of the retention policy. + choices: [absent, present] + default: present + type: str + version_added: 3.1.0 + duration: + description: + - Determines how long InfluxDB should keep the data. If specified, it should be V(INF) or at least one hour. If not + specified, V(INF) is assumed. Supports complex duration expressions with multiple units. + - Required only if O(state) is set to V(present). + type: str + replication: + description: + - Determines how many independent copies of each point are stored in the cluster. + - Required only if O(state) is set to V(present). + type: int + default: + description: + - Sets the retention policy as default retention policy. + type: bool + default: false + shard_group_duration: + description: + - Determines the time range covered by a shard group. If specified it must be at least one hour. If not provided, it + is determined by InfluxDB by the rentention policy's duration. Supports complex duration expressions with multiple + units. + type: str + version_added: '2.0.0' extends_documentation_fragment: -- community.general.influxdb + - community.general.influxdb + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Example influxdb_retention_policy command from Ansible Playbooks - name: Create 1 hour retention policy community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 1h - replication: 1 - ssl: yes - validate_certs: yes - state: present + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 1h + replication: 1 + ssl: true + validate_certs: true + state: present - name: Create 1 day retention policy with 1 hour shard group duration community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 1d - replication: 1 - shard_group_duration: 1h - state: present + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 1d + replication: 1 + shard_group_duration: 1h + state: present - name: Create 1 week retention policy with 1 day shard group duration community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 1w - replication: 1 - shard_group_duration: 1d - state: present + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 1w + replication: 1 + shard_group_duration: 1d + state: present - name: Create infinite retention policy with 1 week of shard group duration community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: INF - replication: 1 - ssl: no - validate_certs: no - shard_group_duration: 1w - state: present + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: INF + replication: 1 + ssl: false + shard_group_duration: 1w + state: present - name: Create retention policy with complex durations community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 5d1h30m - replication: 1 - ssl: no - validate_certs: no - shard_group_duration: 1d10h30m - state: present + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 5d1h30m + replication: 1 + ssl: false + shard_group_duration: 1d10h30m + state: present - name: Drop retention policy community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - state: absent -''' + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + state: absent +""" -RETURN = r''' +RETURN = r""" # only defaults -''' +""" import re @@ -183,7 +181,7 @@ def parse_duration_literal(value, extended=False): lookup = (EXTENDED_DURATION_REGEX if extended else DURATION_REGEX).findall(value) for duration_literal in lookup: - filtered_literal = list(filter(None, duration_literal)) + filtered_literal = [_f for _f in duration_literal if _f] duration_val = float(filtered_literal[0]) duration += duration_val * DURATION_UNIT_NANOSECS[filtered_literal[1]] diff --git a/plugins/modules/database/influxdb/influxdb_user.py b/plugins/modules/influxdb_user.py similarity index 89% rename from plugins/modules/database/influxdb/influxdb_user.py rename to plugins/modules/influxdb_user.py index 76524d8613..b6351a0c27 100644 --- a/plugins/modules/database/influxdb/influxdb_user.py +++ b/plugins/modules/influxdb_user.py @@ -1,29 +1,31 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Vitaliy Zhhuta +# Copyright (c) 2017, Vitaliy Zhhuta # insipred by Kamil Szczygiel influxdb_database module -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: influxdb_user short_description: Manage InfluxDB users description: - Manage InfluxDB users. author: "Vitaliy Zhhuta (@zhhuta)" requirements: - - "python >= 2.6" - "influxdb >= 0.9" +attributes: + check_mode: + support: full + diff_mode: + support: none options: user_name: description: - Name of the user. - required: True + required: true type: str user_password: description: @@ -33,29 +35,29 @@ options: admin: description: - Whether the user should be in the admin role or not. - - Since version 2.8, the role will also be updated. - default: no + - Since version 2.8, the role is also updated. + default: false type: bool state: description: - State of the user. - choices: [ absent, present ] + choices: [absent, present] default: present type: str grants: description: - Privileges to grant to this user. - Takes a list of dicts containing the "database" and "privilege" keys. - - If this argument is not provided, the current grants will be left alone. - - If an empty list is provided, all grants for the user will be removed. + - If this argument is not provided, the current grants are left alone. + - If an empty list is provided, all grants for the user are removed. type: list elements: dict extends_documentation_fragment: -- community.general.influxdb + - community.general.influxdb + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a user on localhost using default login credentials community.general.influxdb_user: user_name: john @@ -72,7 +74,7 @@ EXAMPLES = r''' community.general.influxdb_user: user_name: john user_password: s3cr3t - admin: yes + admin: true hostname: "{{ influxdb_hostname }}" login_username: "{{ influxdb_username }}" login_password: "{{ influxdb_password }}" @@ -95,11 +97,9 @@ EXAMPLES = r''' login_username: "{{ influxdb_username }}" login_password: "{{ influxdb_password }}" state: absent -''' +""" -RETURN = r''' -#only defaults -''' +RETURN = r"""#""" import json @@ -167,8 +167,14 @@ def drop_user(module, client, user_name): def set_user_grants(module, client, user_name, grants): changed = False + current_grants = [] try: current_grants = client.get_list_privileges(user_name) + except influx.exceptions.InfluxDBClientError as e: + if not module.check_mode or 'user not found' not in e.content: + module.fail_json(msg=e.content) + + try: parsed_grants = [] # Fix privileges wording for i, v in enumerate(current_grants): @@ -209,7 +215,7 @@ def main(): argument_spec.update( state=dict(default='present', type='str', choices=['present', 'absent']), user_name=dict(required=True, type='str'), - user_password=dict(required=False, type='str', no_log=True), + user_password=dict(type='str', no_log=True), admin=dict(default='False', type='bool'), grants=dict(type='list', elements='dict'), ) diff --git a/plugins/modules/database/influxdb/influxdb_write.py b/plugins/modules/influxdb_write.py similarity index 65% rename from plugins/modules/database/influxdb/influxdb_write.py rename to plugins/modules/influxdb_write.py index e34fe9c2cf..d0348aca01 100644 --- a/plugins/modules/database/influxdb/influxdb_write.py +++ b/plugins/modules/influxdb_write.py @@ -1,22 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: influxdb_write short_description: Write data points into InfluxDB description: - Write data points into InfluxDB. author: "René Moser (@resmo)" requirements: - - "python >= 2.6" - "influxdb >= 0.9" +attributes: + check_mode: + support: none + diff_mode: + support: none options: data_points: description: @@ -30,35 +32,35 @@ options: required: true type: str extends_documentation_fragment: -- community.general.influxdb + - community.general.influxdb + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Write points into database community.general.influxdb_write: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" - data_points: - - measurement: connections - tags: - host: server01 - region: us-west - time: "{{ ansible_date_time.iso8601 }}" - fields: - value: 2000 - - measurement: connections - tags: - host: server02 - region: us-east - time: "{{ ansible_date_time.iso8601 }}" - fields: - value: 3000 -''' + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + data_points: + - measurement: connections + tags: + host: server01 + region: us-west + time: "{{ ansible_date_time.iso8601 }}" + fields: + value: 2000 + - measurement: connections + tags: + host: server02 + region: us-east + time: "{{ ansible_date_time.iso8601 }}" + fields: + value: 3000 +""" -RETURN = r''' +RETURN = r""" # only defaults -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native diff --git a/plugins/modules/ini_file.py b/plugins/modules/ini_file.py new file mode 100644 index 0000000000..27b55c3bf4 --- /dev/null +++ b/plugins/modules/ini_file.py @@ -0,0 +1,660 @@ +#!/usr/bin/python + +# Copyright (c) 2012, Jan-Piet Mens +# Copyright (c) 2015, Ales Nosek +# Copyright (c) 2017, Ansible Project +# Copyright (c) 2023, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ini_file +short_description: Tweak settings in INI files +extends_documentation_fragment: + - files + - community.general.attributes +description: + - Manage (add, remove, change) individual settings in an INI-style file without having to manage the file as a whole with, + say, M(ansible.builtin.template) or M(ansible.builtin.assemble). + - Adds missing sections if they do not exist. + - This module adds missing ending newlines to files to keep in line with the POSIX standard, even when no other modifications + need to be applied. +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + path: + description: + - Path to the INI-style file; this file is created if required. + type: path + required: true + aliases: [dest] + section: + description: + - Section name in INI file. This is added if O(state=present) automatically when a single value is being set. + - If being omitted, the O(option) is placed before the first O(section). + - Omitting O(section) is also required if the config format does not support sections. + type: str + section_has_values: + type: list + elements: dict + required: false + suboptions: + option: + type: str + description: Matching O(section) must contain this option. + required: true + value: + type: str + description: Matching O(section_has_values[].option) must have this specific value. + values: + description: + - The string value to be associated with an O(section_has_values[].option). + - Mutually exclusive with O(section_has_values[].value). + - O(section_has_values[].value=v) is equivalent to O(section_has_values[].values=[v]). + type: list + elements: str + description: + - Among possibly multiple sections of the same name, select the first one that contains matching options and values. + - With O(state=present), if a suitable section is not found, a new section is added, including the required options. + - With O(state=absent), at most one O(section) is removed if it contains the values. + version_added: 8.6.0 + option: + description: + - If set (required for changing a O(value)), this is the name of the option. + - May be omitted if adding/removing a whole O(section). + type: str + value: + description: + - The string value to be associated with an O(option). + - May be omitted when removing an O(option). + - Mutually exclusive with O(values). + - O(value=v) is equivalent to O(values=[v]). + type: str + values: + description: + - The string value to be associated with an O(option). + - May be omitted when removing an O(option). + - Mutually exclusive with O(value). + - O(value=v) is equivalent to O(values=[v]). + type: list + elements: str + version_added: 3.6.0 + backup: + description: + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered + it incorrectly. + type: bool + default: false + state: + description: + - If set to V(absent) and O(exclusive) set to V(true) all matching O(option) lines are removed. + - If set to V(absent) and O(exclusive) set to V(false) the specified O(option=value) lines are removed, but the other + O(option)s with the same name are not touched. + - If set to V(present) and O(exclusive) set to V(false) the specified O(option=values) lines are added, but the other + O(option)s with the same name are not touched. + - If set to V(present) and O(exclusive) set to V(true) all given O(option=values) lines are added and the other O(option)s + with the same name are removed. + type: str + choices: [absent, present] + default: present + exclusive: + description: + - If set to V(true) (default), all matching O(option) lines are removed when O(state=absent), or replaced when O(state=present). + - If set to V(false), only the specified O(value)/O(values) are added when O(state=present), or removed when O(state=absent), + and existing ones are not modified. + type: bool + default: true + version_added: 3.6.0 + no_extra_spaces: + description: + - Do not insert spaces before and after '=' symbol. + type: bool + default: false + ignore_spaces: + description: + - Do not change a line if doing so would only add or remove spaces before or after the V(=) symbol. + type: bool + default: false + version_added: 7.5.0 + create: + description: + - If set to V(false), the module fails if the file does not already exist. + - By default it creates the file if it is missing. + type: bool + default: true + allow_no_value: + description: + - Allow option without value and without '=' symbol. + type: bool + default: false + modify_inactive_option: + description: + - By default the module replaces a commented line that matches the given option. + - Set this option to V(false) to avoid this. This is useful when you want to keep commented example C(key=value) pairs + for documentation purposes. + type: bool + default: true + version_added: 8.0.0 + follow: + description: + - This flag indicates that filesystem links, if they exist, should be followed. + - O(follow=true) can modify O(path) when combined with parameters such as O(mode). + type: bool + default: false + version_added: 7.1.0 +notes: + - While it is possible to add an O(option) without specifying a O(value), this makes no sense. + - As of community.general 3.2.0, UTF-8 BOM markers are discarded when reading files. +author: + - Jan-Piet Mens (@jpmens) + - Ales Nosek (@noseka1) +""" + +EXAMPLES = r""" +- name: Ensure "fav=lemonade is in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/conf + section: drinks + option: fav + value: lemonade + mode: '0600' + backup: true + +- name: Ensure "temperature=cold is in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/anotherconf + section: drinks + option: temperature + value: cold + backup: true + +- name: Add "beverage=lemon juice" is in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/conf + section: drinks + option: beverage + value: lemon juice + mode: '0600' + state: present + exclusive: false + +- name: Ensure multiple values "beverage=coke" and "beverage=pepsi" are in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/conf + section: drinks + option: beverage + values: + - coke + - pepsi + mode: '0600' + state: present + +- name: Add "beverage=lemon juice" outside a section in specified file + community.general.ini_file: + path: /etc/conf + option: beverage + value: lemon juice + state: present + +- name: Remove the peer configuration for 10.128.0.11/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.128.0.11/32 + mode: '0600' + state: absent + +- name: Add "beverage=lemon juice" outside a section in specified file + community.general.ini_file: + path: /etc/conf + option: beverage + value: lemon juice + state: present + +- name: Update the public key for peer 10.128.0.12/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.128.0.12/32 + option: PublicKey + value: xxxxxxxxxxxxxxxxxxxx + mode: '0600' + state: present + +- name: Remove the peer configuration for 10.128.0.11/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.4.0.11/32 + mode: '0600' + state: absent + +- name: Update the public key for peer 10.128.0.12/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.4.0.12/32 + option: PublicKey + value: xxxxxxxxxxxxxxxxxxxx + mode: '0600' + state: present +""" + +import io +import os +import re +import tempfile +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_text + + +def match_opt(option, line): + option = re.escape(option) + return re.match('( |\t)*([#;]?)( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) + + +def match_active_opt(option, line): + option = re.escape(option) + return re.match('()()( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) + + +def update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg): + option_changed = None + if ignore_spaces: + old_match = match_opt(option, section_lines[index]) + if not old_match.group(2): + new_match = match_opt(option, newline) + option_changed = old_match.group(8) != new_match.group(8) + if option_changed is None: + option_changed = section_lines[index] != newline + if option_changed: + section_lines[index] = newline + changed = changed or option_changed + if option_changed: + msg = 'option changed' + changed_lines[index] = 1 + return (changed, msg) + + +def check_section_has_values(section_has_values, section_lines): + if section_has_values is not None: + for condition in section_has_values: + for line in section_lines: + match = match_opt(condition["option"], line) + if match and (len(condition["values"]) == 0 or match.group(8) in condition["values"]): + break + else: + return False + return True + + +def do_ini(module, filename, section=None, section_has_values=None, option=None, values=None, + state='present', exclusive=True, backup=False, no_extra_spaces=False, + ignore_spaces=False, create=True, allow_no_value=False, modify_inactive_option=True, follow=False): + + if section is not None: + section = to_text(section) + if option is not None: + option = to_text(option) + + # deduplicate entries in values + values_unique = [] + [values_unique.append(to_text(value)) for value in values if value not in values_unique and value is not None] + values = values_unique + + diff = dict( + before='', + after='', + before_header='%s (content)' % filename, + after_header='%s (content)' % filename, + ) + + if follow and os.path.islink(filename): + target_filename = os.path.realpath(filename) + else: + target_filename = filename + + if not os.path.exists(target_filename): + if not create: + module.fail_json(rc=257, msg='Destination %s does not exist!' % target_filename) + destpath = os.path.dirname(target_filename) + if not os.path.exists(destpath) and not module.check_mode: + os.makedirs(destpath) + ini_lines = [] + else: + with io.open(target_filename, 'r', encoding="utf-8-sig") as ini_file: + ini_lines = [to_text(line) for line in ini_file.readlines()] + + if module._diff: + diff['before'] = ''.join(ini_lines) + + changed = False + + # ini file could be empty + if not ini_lines: + ini_lines.append('\n') + + # last line of file may not contain a trailing newline + if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n': + ini_lines[-1] += '\n' + changed = True + + # append fake section lines to simplify the logic + # At top: + # Fake random section to do not match any other in the file + # Using commit hash as fake section name + fake_section_name = "ad01e11446efb704fcdbdb21f2c43757423d91c5" + + # Insert it at the beginning + ini_lines.insert(0, '[%s]' % fake_section_name) + + # At bottom: + ini_lines.append('[') + + # If no section is defined, fake section is used + if not section: + section = fake_section_name + + within_section = not section + section_start = section_end = 0 + msg = 'OK' + if no_extra_spaces: + assignment_format = '%s=%s\n' + else: + assignment_format = '%s = %s\n' + + option_no_value_present = False + + non_blank_non_comment_pattern = re.compile(to_text(r'^[ \t]*([#;].*)?$')) + + before = after = [] + section_lines = [] + + section_pattern = re.compile(to_text(r'^\[\s*%s\s*]' % re.escape(section.strip()))) + + for index, line in enumerate(ini_lines): + # end of section: + if within_section and line.startswith('['): + if check_section_has_values( + section_has_values, ini_lines[section_start:index] + ): + section_end = index + break + else: + # look for another section + within_section = False + section_start = section_end = 0 + + # find start and end of section + if section_pattern.match(line): + within_section = True + section_start = index + + before = ini_lines[0:section_start] + section_lines = ini_lines[section_start:section_end] + after = ini_lines[section_end:len(ini_lines)] + + # Keep track of changed section_lines + changed_lines = [0] * len(section_lines) + + # Determine whether to consider using commented out/inactive options or only active ones + if modify_inactive_option: + match_function = match_opt + else: + match_function = match_active_opt + + # handling multiple instances of option=value when state is 'present' with/without exclusive is a bit complex + # + # 1. edit all lines where we have a option=value pair with a matching value in values[] + # 2. edit all the remaining lines where we have a matching option + # 3. delete remaining lines where we have a matching option + # 4. insert missing option line(s) at the end of the section + + if state == 'present' and option: + for index, line in enumerate(section_lines): + if match_function(option, line): + match = match_function(option, line) + if values and match.group(8) in values: + matched_value = match.group(8) + if not matched_value and allow_no_value: + # replace existing option with no value line(s) + newline = '%s\n' % option + option_no_value_present = True + else: + # replace existing option=value line(s) + newline = assignment_format % (option, matched_value) + (changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg) + values.remove(matched_value) + elif not values and allow_no_value: + # replace existing option with no value line(s) + newline = '%s\n' % option + (changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg) + option_no_value_present = True + break + + if state == 'present' and exclusive and not allow_no_value: + # override option with no value to option with value if not allow_no_value + if len(values) > 0: + for index, line in enumerate(section_lines): + if not changed_lines[index] and match_function(option, line): + newline = assignment_format % (option, values.pop(0)) + (changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg) + if len(values) == 0: + break + # remove all remaining option occurrences from the rest of the section + for index in range(len(section_lines) - 1, 0, -1): + if not changed_lines[index] and match_function(option, section_lines[index]): + del section_lines[index] + del changed_lines[index] + changed = True + msg = 'option changed' + + if state == 'present': + # insert missing option line(s) at the end of the section + for index in range(len(section_lines), 0, -1): + # search backwards for previous non-blank or non-comment line + if not non_blank_non_comment_pattern.match(section_lines[index - 1]): + if option and values: + # insert option line(s) + for element in values[::-1]: + # items are added backwards, so traverse the list backwards to not confuse the user + # otherwise some of their options might appear in reverse order for whatever fancy reason ¯\_(ツ)_/¯ + if element is not None: + # insert option=value line + section_lines.insert(index, assignment_format % (option, element)) + msg = 'option added' + changed = True + elif element is None and allow_no_value: + # insert option with no value line + section_lines.insert(index, '%s\n' % option) + msg = 'option added' + changed = True + elif option and not values and allow_no_value and not option_no_value_present: + # insert option with no value line(s) + section_lines.insert(index, '%s\n' % option) + msg = 'option added' + changed = True + break + + if state == 'absent': + if option: + if exclusive: + # delete all option line(s) with given option and ignore value + new_section_lines = [line for line in section_lines if not (match_active_opt(option, line))] + if section_lines != new_section_lines: + changed = True + msg = 'option changed' + section_lines = new_section_lines + elif not exclusive and len(values) > 0: + # delete specified option=value line(s) + new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(8) in values)] + if section_lines != new_section_lines: + changed = True + msg = 'option changed' + section_lines = new_section_lines + else: + # drop the entire section + if section_lines: + section_lines = [] + msg = 'section removed' + changed = True + + # reassemble the ini_lines after manipulation + ini_lines = before + section_lines + after + + # remove the fake section line + del ini_lines[0] + del ini_lines[-1:] + + if not within_section and state == 'present': + ini_lines.append('[%s]\n' % section) + msg = 'section and option added' + if section_has_values: + for condition in section_has_values: + if condition['option'] != option: + if len(condition['values']) > 0: + for value in condition['values']: + ini_lines.append(assignment_format % (condition['option'], value)) + elif allow_no_value: + ini_lines.append('%s\n' % condition['option']) + elif not exclusive: + for value in condition['values']: + if value not in values: + values.append(value) + if option and values: + for value in values: + ini_lines.append(assignment_format % (option, value)) + elif option and not values and allow_no_value: + ini_lines.append('%s\n' % option) + else: + msg = 'only section added' + changed = True + + if module._diff: + diff['after'] = ''.join(ini_lines) + + backup_file = None + if changed and not module.check_mode: + if backup: + backup_file = module.backup_local(target_filename) + + encoded_ini_lines = [to_bytes(line) for line in ini_lines] + try: + tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir) + f = os.fdopen(tmpfd, 'wb') + f.writelines(encoded_ini_lines) + f.close() + except IOError: + module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc()) + + try: + module.atomic_move(tmpfile, os.path.abspath(target_filename)) + except IOError: + module.ansible.fail_json(msg='Unable to move temporary \ + file %s to %s, IOError' % (tmpfile, target_filename), traceback=traceback.format_exc()) + + return (changed, backup_file, diff, msg) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True, aliases=['dest']), + section=dict(type='str'), + section_has_values=dict(type='list', elements='dict', options=dict( + option=dict(type='str', required=True), + value=dict(type='str'), + values=dict(type='list', elements='str') + ), mutually_exclusive=[['value', 'values']]), + option=dict(type='str'), + value=dict(type='str'), + values=dict(type='list', elements='str'), + backup=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + exclusive=dict(type='bool', default=True), + no_extra_spaces=dict(type='bool', default=False), + ignore_spaces=dict(type='bool', default=False), + allow_no_value=dict(type='bool', default=False), + modify_inactive_option=dict(type='bool', default=True), + create=dict(type='bool', default=True), + follow=dict(type='bool', default=False) + ), + mutually_exclusive=[ + ['value', 'values'] + ], + add_file_common_args=True, + supports_check_mode=True, + ) + + path = module.params['path'] + section = module.params['section'] + section_has_values = module.params['section_has_values'] + option = module.params['option'] + value = module.params['value'] + values = module.params['values'] + state = module.params['state'] + exclusive = module.params['exclusive'] + backup = module.params['backup'] + no_extra_spaces = module.params['no_extra_spaces'] + ignore_spaces = module.params['ignore_spaces'] + allow_no_value = module.params['allow_no_value'] + modify_inactive_option = module.params['modify_inactive_option'] + create = module.params['create'] + follow = module.params['follow'] + + if state == 'present' and not allow_no_value and value is None and not values: + module.fail_json(msg="Parameter 'value(s)' must be defined if state=present and allow_no_value=False.") + + if value is not None: + values = [value] + elif values is None: + values = [] + + if section_has_values: + for condition in section_has_values: + if condition['value'] is not None: + condition['values'] = [condition['value']] + elif condition['values'] is None: + condition['values'] = [] +# raise Exception("section_has_values: {}".format(section_has_values)) + + (changed, backup_file, diff, msg) = do_ini( + module, path, section, section_has_values, option, values, state, exclusive, backup, + no_extra_spaces, ignore_spaces, create, allow_no_value, modify_inactive_option, follow) + + if not module.check_mode and os.path.exists(path): + file_args = module.load_file_common_arguments(module.params) + changed = module.set_fs_attributes_if_different(file_args, changed) + + results = dict( + changed=changed, + diff=diff, + msg=msg, + path=path, + ) + if backup_file is not None: + results['backup_file'] = backup_file + + # Mission complete + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/installp.py b/plugins/modules/installp.py similarity index 81% rename from plugins/modules/packaging/os/installp.py rename to plugins/modules/installp.py index af7a950afa..57f70db687 100644 --- a/plugins/modules/packaging/os/installp.py +++ b/plugins/modules/installp.py @@ -1,84 +1,89 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Kairo Araujo -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: installp author: -- Kairo Araujo (@kairoaraujo) + - Kairo Araujo (@kairoaraujo) short_description: Manage packages on AIX description: - - Manage packages using 'installp' on AIX + - Manage packages using 'installp' on AIX. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: accept_license: description: - - Whether to accept the license for the package(s). + - Whether to accept the license for the package(s). type: bool - default: no + default: false name: description: - - One or more packages to install or remove. - - Use C(all) to install all packages available on informed C(repository_path). + - One or more packages to install or remove. + - Use V(all) to install all packages available on informed O(repository_path). type: list elements: str required: true - aliases: [ pkg ] + aliases: [pkg] repository_path: description: - - Path with AIX packages (required to install). + - Path with AIX packages (required to install). type: path state: description: - - Whether the package needs to be present on or absent from the system. + - Whether the package needs to be present on or absent from the system. type: str - choices: [ absent, present ] + choices: [absent, present] default: present notes: -- If the package is already installed, even the package/fileset is new, the module will not install it. -''' + - If the package is already installed, even the package/fileset is new, the module does not install it. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Install package foo community.general.installp: name: foo repository_path: /repository/AIX71/installp/base - accept_license: yes + accept_license: true state: present - name: Install bos.sysmgt that includes bos.sysmgt.nim.master, bos.sysmgt.nim.spot community.general.installp: name: bos.sysmgt repository_path: /repository/AIX71/installp/base - accept_license: yes + accept_license: true state: present - name: Install bos.sysmgt.nim.master only community.general.installp: name: bos.sysmgt.nim.master repository_path: /repository/AIX71/installp/base - accept_license: yes + accept_license: true state: present - name: Install bos.sysmgt.nim.master and bos.sysmgt.nim.spot community.general.installp: name: bos.sysmgt.nim.master, bos.sysmgt.nim.spot repository_path: /repository/AIX71/installp/base - accept_license: yes + accept_license: true state: present - name: Remove packages bos.sysmgt.nim.master community.general.installp: name: bos.sysmgt.nim.master state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ import os import re @@ -98,7 +103,7 @@ def _check_new_pkg(module, package, repository_path): if os.path.isdir(repository_path): installp_cmd = module.get_bin_path('installp', True) - rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path)) + rc, package_result, err = module.run_command([installp_cmd, "-l", "-MR", "-d", repository_path]) if rc != 0: module.fail_json(msg="Failed to run installp.", rc=rc, err=err) @@ -125,7 +130,7 @@ def _check_new_pkg(module, package, repository_path): def _check_installed_pkg(module, package, repository_path): """ Check the package on AIX. - It verifies if the package is installed and informations + It verifies if the package is installed and information :param module: Ansible module parameters spec. :param package: Package/fileset name. @@ -134,7 +139,7 @@ def _check_installed_pkg(module, package, repository_path): """ lslpp_cmd = module.get_bin_path('lslpp', True) - rc, lslpp_result, err = module.run_command("%s -lcq %s*" % (lslpp_cmd, package)) + rc, lslpp_result, err = module.run_command([lslpp_cmd, "-lcq", "%s*" % (package, )]) if rc == 1: package_state = ' '.join(err.split()[-2:]) @@ -165,7 +170,7 @@ def remove(module, installp_cmd, packages): if pkg_check: if not module.check_mode: - rc, remove_out, err = module.run_command("%s -u %s" % (installp_cmd, package)) + rc, remove_out, err = module.run_command([installp_cmd, "-u", package]) if rc != 0: module.fail_json(msg="Failed to run installp.", rc=rc, err=err) remove_count += 1 @@ -194,8 +199,8 @@ def install(module, installp_cmd, packages, repository_path, accept_license): already_installed_pkgs = {} accept_license_param = { - True: '-Y', - False: '', + True: ['-Y'], + False: [], } # Validate if package exists on repository path. @@ -222,7 +227,8 @@ def install(module, installp_cmd, packages, repository_path, accept_license): else: if not module.check_mode: - rc, out, err = module.run_command("%s -a %s -X -d %s %s" % (installp_cmd, accept_license_param[accept_license], repository_path, package)) + rc, out, err = module.run_command( + [installp_cmd, "-a"] + accept_license_param[accept_license] + ["-X", "-d", repository_path, package]) if rc != 0: module.fail_json(msg="Failed to run installp", rc=rc, err=err) installed_pkgs.append(package) diff --git a/plugins/modules/interfaces_file.py b/plugins/modules/interfaces_file.py new file mode 100644 index 0000000000..c7038d1008 --- /dev/null +++ b/plugins/modules/interfaces_file.py @@ -0,0 +1,418 @@ +#!/usr/bin/python +# +# Copyright (c) 2016, Roman Belyakovsky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: interfaces_file +short_description: Tweak settings in C(/etc/network/interfaces) files +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes +description: + - Manage (add, remove, change) individual interface options in an interfaces-style file without having to manage the file + as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file. + - Read information about interfaces from interfaces-styled files. +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + dest: + type: path + description: + - Path to the interfaces file. + default: /etc/network/interfaces + iface: + type: str + description: + - Name of the interface, required for value changes or option remove. + address_family: + type: str + description: + - Address family of the interface, useful if same interface name is used for both V(inet) and V(inet6). + option: + type: str + description: + - Name of the option, required for value changes or option remove. + value: + type: str + description: + - If O(option) is not presented for the O(iface) and O(state) is V(present), then O(option) is added. If O(option) already + exists and is not V(pre-up), V(up), V(post-up) or V(down), its value is updated. V(pre-up), V(up), V(post-up) and + V(down) options cannot be updated, only adding new options, removing existing ones or cleaning the whole option set + are supported. + backup: + description: + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered + it incorrectly. + type: bool + default: false + state: + type: str + description: + - If set to V(absent) the option or section is removed if present instead of created. + default: "present" + choices: ["present", "absent"] + +notes: + - If option is defined multiple times last one is updated but all others are deleted in case of an O(state=absent). +requirements: [] +author: "Roman Belyakovsky (@hryamzik)" +""" + +RETURN = r""" +dest: + description: Destination file/path. + returned: success + type: str + sample: "/etc/network/interfaces" +ifaces: + description: Interfaces dictionary. + returned: success + type: dict + contains: + ifaces: + description: Interface dictionary. + returned: success + type: dict + contains: + eth0: + description: Name of the interface. + returned: success + type: dict + contains: + address_family: + description: Interface address family. + returned: success + type: str + sample: "inet" + method: + description: Interface method. + returned: success + type: str + sample: "manual" + mtu: + description: Other options, all values returned as strings. + returned: success + type: str + sample: "1500" + pre-up: + description: List of C(pre-up) scripts. + returned: success + type: list + elements: str + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + up: + description: List of C(up) scripts. + returned: success + type: list + elements: str + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + post-up: + description: List of C(post-up) scripts. + returned: success + type: list + elements: str + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + down: + description: List of C(down) scripts. + returned: success + type: list + elements: str + sample: + - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" +""" + +EXAMPLES = r""" +- name: Set eth1 mtu configuration value to 8000 + community.general.interfaces_file: + dest: /etc/network/interfaces.d/eth1.cfg + iface: eth1 + option: mtu + value: 8000 + backup: true + state: present + register: eth1_cfg +""" + +import os +import re +import tempfile + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes + + +def lineDict(line): + return {'line': line, 'line_type': 'unknown'} + + +def optionDict(line, iface, option, value, address_family): + return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family} + + +def getValueFromLine(s): + spaceRe = re.compile(r'\s+') + m = list(spaceRe.finditer(s))[-1] + valueEnd = m.start() + option = s.split()[0] + optionStart = s.find(option) + optionLen = len(option) + return s[optionLen + optionStart:].strip() + + +def read_interfaces_file(module, filename): + with open(filename, 'r') as f: + return read_interfaces_lines(module, f) + + +def read_interfaces_lines(module, line_strings): + lines = [] + ifaces = {} + currently_processing = None + i = 0 + for line in line_strings: + i += 1 + words = line.split() + if len(words) < 1: + lines.append(lineDict(line)) + continue + if words[0][0] == "#": + lines.append(lineDict(line)) + continue + if words[0] == "mapping": + # currmap = calloc(1, sizeof *currmap); + lines.append(lineDict(line)) + currently_processing = "MAPPING" + elif words[0] == "source": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "source-dir": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "source-directory": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "iface": + currif = { + "pre-up": [], + "up": [], + "down": [], + "post-up": [] + } + iface_name = words[1] + try: + currif['address_family'] = words[2] + except IndexError: + currif['address_family'] = None + address_family = currif['address_family'] + try: + currif['method'] = words[3] + except IndexError: + currif['method'] = None + + ifaces[iface_name] = currif + lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif, 'address_family': address_family}) + currently_processing = "IFACE" + elif words[0] == "auto": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0].startswith("allow-"): + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "no-auto-down": + lines.append(lineDict(line)) + currently_processing = "NONE" + elif words[0] == "no-scripts": + lines.append(lineDict(line)) + currently_processing = "NONE" + else: + if currently_processing == "IFACE": + option_name = words[0] + value = getValueFromLine(line) + lines.append(optionDict(line, iface_name, option_name, value, address_family)) + if option_name in ["pre-up", "up", "down", "post-up"]: + currif[option_name].append(value) + else: + currif[option_name] = value + elif currently_processing == "MAPPING": + lines.append(lineDict(line)) + elif currently_processing == "NONE": + lines.append(lineDict(line)) + else: + module.fail_json(msg="misplaced option %s in line %d" % (line, i)) + return None, None + return lines, ifaces + + +def get_interface_options(iface_lines): + return [i for i in iface_lines if i['line_type'] == 'option'] + + +def get_target_options(iface_options, option): + return [i for i in iface_options if i['option'] == option] + + +def update_existing_option_line(target_option, value): + old_line = target_option['line'] + old_value = target_option['value'] + prefix_start = old_line.find(target_option["option"]) + optionLen = len(target_option["option"]) + old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + optionLen:]) + start = old_value_position.start() + prefix_start + optionLen + end = old_value_position.end() + prefix_start + optionLen + line = old_line[:start] + value + old_line[end:] + return line + + +def set_interface_option(module, lines, iface, option, raw_value, state, address_family=None): + value = str(raw_value) + changed = False + + iface_lines = [item for item in lines if "iface" in item and item["iface"] == iface] + if address_family is not None: + iface_lines = [item for item in iface_lines + if "address_family" in item and item["address_family"] == address_family] + + if len(iface_lines) < 1: + # interface not found + module.fail_json(msg="Error: interface %s not found" % iface) + return changed, None + + iface_options = get_interface_options(iface_lines) + target_options = get_target_options(iface_options, option) + + if state == "present": + if len(target_options) < 1: + changed = True + # add new option + last_line_dict = iface_lines[-1] + changed, lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family) + else: + if option in ["pre-up", "up", "down", "post-up"]: + if len([i for i in target_options if i['value'] == value]) < 1: + changed, lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options, address_family) + else: + # if more than one option found edit the last one + if target_options[-1]['value'] != value: + changed = True + target_option = target_options[-1] + line = update_existing_option_line(target_option, value) + address_family = target_option['address_family'] + index = len(lines) - lines[::-1].index(target_option) - 1 + lines[index] = optionDict(line, iface, option, value, address_family) + elif state == "absent": + if len(target_options) >= 1: + if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None": + for target_option in [ito for ito in target_options if ito['value'] == value]: + changed = True + lines = [ln for ln in lines if ln != target_option] + else: + changed = True + for target_option in target_options: + lines = [ln for ln in lines if ln != target_option] + else: + module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state) + + return changed, lines + + +def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family): + # Changing method of interface is not an addition + if option == 'method': + changed = False + for ln in lines: + if ln.get('line_type', '') == 'iface' and ln.get('iface', '') == iface and value != ln.get('params', {}).get('method', ''): + if address_family is not None and ln.get('address_family') != address_family: + continue + changed = True + ln['line'] = re.sub(ln.get('params', {}).get('method', '') + '$', value, ln.get('line')) + ln['params']['method'] = value + return changed, lines + + last_line = last_line_dict['line'] + prefix_start = last_line.find(last_line.split()[0]) + suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1]) + prefix = last_line[:prefix_start] + + if len(iface_options) < 1: + # interface has no options, ident + prefix += " " + + line = prefix + "%s %s" % (option, value) + last_line[suffix_start:] + option_dict = optionDict(line, iface, option, value, address_family) + index = len(lines) - lines[::-1].index(last_line_dict) + lines.insert(index, option_dict) + return True, lines + + +def write_changes(module, lines, dest): + + tmpfd, tmpfile = tempfile.mkstemp() + with os.fdopen(tmpfd, 'wb') as f: + f.write(to_bytes(''.join(lines), errors='surrogate_or_strict')) + module.atomic_move(tmpfile, os.path.realpath(dest)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + dest=dict(type='path', default='/etc/network/interfaces'), + iface=dict(type='str'), + address_family=dict(type='str'), + option=dict(type='str'), + value=dict(type='str'), + backup=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + add_file_common_args=True, + supports_check_mode=True, + required_by=dict( + option=('iface',), + ), + ) + + dest = module.params['dest'] + iface = module.params['iface'] + address_family = module.params['address_family'] + option = module.params['option'] + value = module.params['value'] + backup = module.params['backup'] + state = module.params['state'] + + if option is not None and state == "present" and value is None: + module.fail_json(msg="Value must be set if option is defined and state is 'present'") + + lines, ifaces = read_interfaces_file(module, dest) + + changed = False + + if option is not None: + changed, lines = set_interface_option(module, lines, iface, option, value, state, address_family) + + if changed: + dummy, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d]) + + if changed and not module.check_mode: + if backup: + module.backup_local(dest) + write_changes(module, [d['line'] for d in lines if 'line' in d], dest) + + module.exit_json(dest=dest, changed=changed, ifaces=ifaces) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/ip_netns.py b/plugins/modules/ip_netns.py similarity index 67% rename from plugins/modules/net_tools/ip_netns.py rename to plugins/modules/ip_netns.py index 00f1112bc0..0a3fa9f86d 100644 --- a/plugins/modules/net_tools/ip_netns.py +++ b/plugins/modules/ip_netns.py @@ -1,48 +1,40 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Arie Bregman -# -# This file is a module for Ansible that interacts with Network Manager -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# Copyright (c) 2017, Arie Bregman +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -DOCUMENTATION = ''' ---- +from __future__ import annotations + +DOCUMENTATION = r""" module: ip_netns author: "Arie Bregman (@bregman-arie)" short_description: Manage network namespaces -requirements: [ ip ] +requirements: [ip] description: - - Create or delete network namespaces using the ip command. + - Create or delete network namespaces using the C(ip) command. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - required: false - description: - - Name of the namespace - type: str - state: - required: false - default: "present" - choices: [ present, absent ] - description: - - Whether the namespace should exist - type: str -''' + name: + required: false + description: + - Name of the namespace. + type: str + state: + required: false + default: "present" + choices: [present, absent] + description: + - Whether the namespace should exist. + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a namespace named mario community.general.ip_netns: name: mario @@ -52,11 +44,11 @@ EXAMPLES = ''' community.general.ip_netns: name: luigi state: absent -''' +""" -RETURN = ''' +RETURN = r""" # Default return values -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_text diff --git a/plugins/modules/identity/ipa/ipa_config.py b/plugins/modules/ipa_config.py similarity index 84% rename from plugins/modules/identity/ipa/ipa_config.py rename to plugins/modules/ipa_config.py index 2b41dfb098..ffa035d6e9 100644 --- a/plugins/modules/identity/ipa/ipa_config.py +++ b/plugins/modules/ipa_config.py @@ -1,18 +1,21 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Fran Fitzpatrick -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Fran Fitzpatrick +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_config author: Fran Fitzpatrick (@fxfitz) short_description: Manage Global FreeIPA Configuration Settings description: -- Modify global configuration settings of a FreeIPA Server. + - Modify global configuration settings of a FreeIPA Server. +attributes: + check_mode: + support: full + diff_mode: + support: none options: ipaconfigstring: description: Extra hashes to generate in password plug-in. @@ -34,6 +37,12 @@ options: aliases: ["primarygroup"] type: str version_added: '2.5.0' + ipagroupobjectclasses: + description: A list of group objectclasses. + aliases: ["groupobjectclasses"] + type: list + elements: str + version_added: '7.3.0' ipagroupsearchfields: description: A list of fields to search in when searching for groups. aliases: ["groupsearchfields"] @@ -79,12 +88,21 @@ options: elements: str version_added: '3.7.0' ipauserauthtype: - description: The authentication type to use by default. + description: + - The authentication type to use by default. + - The choice V(idp) has been added in community.general 7.3.0. + - The choice V(passkey) has been added in community.general 8.1.0. aliases: ["userauthtype"] - choices: ["password", "radius", "otp", "pkinit", "hardened", "disabled"] + choices: ["password", "radius", "otp", "pkinit", "hardened", "idp", "passkey", "disabled"] type: list elements: str version_added: '2.5.0' + ipauserobjectclasses: + description: A list of user objectclasses. + aliases: ["userobjectclasses"] + type: list + elements: str + version_added: '7.3.0' ipausersearchfields: description: A list of fields to search in when searching for users. aliases: ["usersearchfields"] @@ -92,11 +110,12 @@ options: elements: str version_added: '2.5.0' extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure password plugin features DC:Disable Last Success and KDC:Disable Lockout are enabled community.general.ipa_config: ipaconfigstring: ["KDC:Disable Last Success", "KDC:Disable Lockout"] @@ -199,14 +218,14 @@ EXAMPLES = r''' ipa_host: localhost ipa_user: admin ipa_pass: supersecret -''' +""" -RETURN = r''' +RETURN = r""" config: description: Configuration as returned by IPA API. returned: always type: dict -''' +""" import traceback @@ -228,11 +247,12 @@ class ConfigIPAClient(IPAClient): def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None, ipadefaultemaildomain=None, ipadefaultprimarygroup=None, - ipagroupsearchfields=None, ipahomesrootdir=None, - ipakrbauthzdata=None, ipamaxusernamelength=None, - ipapwdexpadvnotify=None, ipasearchrecordslimit=None, - ipasearchtimelimit=None, ipaselinuxusermaporder=None, - ipauserauthtype=None, ipausersearchfields=None): + ipagroupsearchfields=None, ipagroupobjectclasses=None, + ipahomesrootdir=None, ipakrbauthzdata=None, + ipamaxusernamelength=None, ipapwdexpadvnotify=None, + ipasearchrecordslimit=None, ipasearchtimelimit=None, + ipaselinuxusermaporder=None, ipauserauthtype=None, + ipausersearchfields=None, ipauserobjectclasses=None): config = {} if ipaconfigstring is not None: config['ipaconfigstring'] = ipaconfigstring @@ -242,6 +262,8 @@ def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None, config['ipadefaultemaildomain'] = ipadefaultemaildomain if ipadefaultprimarygroup is not None: config['ipadefaultprimarygroup'] = ipadefaultprimarygroup + if ipagroupobjectclasses is not None: + config['ipagroupobjectclasses'] = ipagroupobjectclasses if ipagroupsearchfields is not None: config['ipagroupsearchfields'] = ','.join(ipagroupsearchfields) if ipahomesrootdir is not None: @@ -260,6 +282,8 @@ def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None, config['ipaselinuxusermaporder'] = '$'.join(ipaselinuxusermaporder) if ipauserauthtype is not None: config['ipauserauthtype'] = ipauserauthtype + if ipauserobjectclasses is not None: + config['ipauserobjectclasses'] = ipauserobjectclasses if ipausersearchfields is not None: config['ipausersearchfields'] = ','.join(ipausersearchfields) @@ -276,6 +300,7 @@ def ensure(module, client): ipadefaultloginshell=module.params.get('ipadefaultloginshell'), ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'), ipadefaultprimarygroup=module.params.get('ipadefaultprimarygroup'), + ipagroupobjectclasses=module.params.get('ipagroupobjectclasses'), ipagroupsearchfields=module.params.get('ipagroupsearchfields'), ipahomesrootdir=module.params.get('ipahomesrootdir'), ipakrbauthzdata=module.params.get('ipakrbauthzdata'), @@ -286,6 +311,7 @@ def ensure(module, client): ipaselinuxusermaporder=module.params.get('ipaselinuxusermaporder'), ipauserauthtype=module.params.get('ipauserauthtype'), ipausersearchfields=module.params.get('ipausersearchfields'), + ipauserobjectclasses=module.params.get('ipauserobjectclasses'), ) ipa_config = client.config_show() diff = get_config_diff(client, ipa_config, module_config) @@ -315,6 +341,8 @@ def main(): ipadefaultloginshell=dict(type='str', aliases=['loginshell']), ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']), ipadefaultprimarygroup=dict(type='str', aliases=['primarygroup']), + ipagroupobjectclasses=dict(type='list', elements='str', + aliases=['groupobjectclasses']), ipagroupsearchfields=dict(type='list', elements='str', aliases=['groupsearchfields']), ipahomesrootdir=dict(type='str', aliases=['homesrootdir']), @@ -330,9 +358,11 @@ def main(): ipauserauthtype=dict(type='list', elements='str', aliases=['userauthtype'], choices=["password", "radius", "otp", "pkinit", - "hardened", "disabled"]), + "hardened", "idp", "passkey", "disabled"]), ipausersearchfields=dict(type='list', elements='str', aliases=['usersearchfields']), + ipauserobjectclasses=dict(type='list', elements='str', + aliases=['userobjectclasses']), ) module = AnsibleModule( diff --git a/plugins/modules/identity/ipa/ipa_dnsrecord.py b/plugins/modules/ipa_dnsrecord.py similarity index 69% rename from plugins/modules/identity/ipa/ipa_dnsrecord.py rename to plugins/modules/ipa_dnsrecord.py index 36f4cfdded..2507cc7f14 100644 --- a/plugins/modules/identity/ipa/ipa_dnsrecord.py +++ b/plugins/modules/ipa_dnsrecord.py @@ -1,87 +1,94 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) +# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_dnsrecord author: Abhijeet Kasurde (@Akasurde) short_description: Manage FreeIPA DNS records description: -- Add, modify and delete an IPA DNS Record using IPA API. + - Add, modify and delete an IPA DNS Record using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none options: zone_name: description: - - The DNS zone name to which DNS record needs to be managed. + - The DNS zone name to which DNS record needs to be managed. required: true type: str record_name: description: - - The DNS record name to manage. + - The DNS record name to manage. required: true aliases: ["name"] type: str record_type: description: - - The type of DNS record name. - - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV' and 'MX' are supported. - - "'A6', 'CNAME', 'DNAME' and 'TXT' are added in version 2.5." - - "'SRV' and 'MX' are added in version 2.8." + - The type of DNS record name. + - Support for V(NS) was added in comunity.general 8.2.0. + - Support for V(SSHFP) was added in community.general 9.1.0. required: false default: 'A' - choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'PTR', 'SRV', 'TXT'] + choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'NS', 'PTR', 'SRV', 'TXT', 'SSHFP'] type: str record_value: description: - - Manage DNS record name with this value. - - Mutually exclusive with I(record_values), and exactly one of I(record_value) and I(record_values) has to be specified. - - Use I(record_values) if you need to specify multiple values. - - In the case of 'A' or 'AAAA' record types, this will be the IP address. - - In the case of 'A6' record type, this will be the A6 Record data. - - In the case of 'CNAME' record type, this will be the hostname. - - In the case of 'DNAME' record type, this will be the DNAME target. - - In the case of 'PTR' record type, this will be the hostname. - - In the case of 'TXT' record type, this will be a text. - - In the case of 'SRV' record type, this will be a service record. - - In the case of 'MX' record type, this will be a mail exchanger record. + - Manage DNS record name with this value. + - Mutually exclusive with O(record_values), and exactly one of O(record_value) and O(record_values) has to be specified. + - Use O(record_values) if you need to specify multiple values. + - In the case of V(A) or V(AAAA) record types, this is the IP address. + - In the case of V(A6) record type, this is the A6 Record data. + - In the case of V(CNAME) record type, this is the hostname. + - In the case of V(DNAME) record type, this is the DNAME target. + - In the case of V(NS) record type, this is the name server hostname. Hostname must already have a valid A or AAAA record. + - In the case of V(PTR) record type, this is the hostname. + - In the case of V(TXT) record type, this is a text. + - In the case of V(SRV) record type, this is a service record. + - In the case of V(MX) record type, this is a mail exchanger record. + - In the case of V(SSHFP) record type, this is an SSH fingerprint record. type: str record_values: description: - - Manage DNS record name with this value. - - Mutually exclusive with I(record_values), and exactly one of I(record_value) and I(record_values) has to be specified. - - In the case of 'A' or 'AAAA' record types, this will be the IP address. - - In the case of 'A6' record type, this will be the A6 Record data. - - In the case of 'CNAME' record type, this will be the hostname. - - In the case of 'DNAME' record type, this will be the DNAME target. - - In the case of 'PTR' record type, this will be the hostname. - - In the case of 'TXT' record type, this will be a text. - - In the case of 'SRV' record type, this will be a service record. - - In the case of 'MX' record type, this will be a mail exchanger record. + - Manage DNS record name with this value. + - Mutually exclusive with O(record_value), and exactly one of O(record_value) and O(record_values) has to be specified. + - In the case of V(A) or V(AAAA) record types, this is the IP address. + - In the case of V(A6) record type, this is the A6 Record data. + - In the case of V(CNAME) record type, this is the hostname. + - In the case of V(DNAME) record type, this is the DNAME target. + - In the case of V(NS) record type, this is the name server hostname. Hostname must already have a valid A or AAAA record. + - In the case of V(PTR) record type, this is the hostname. + - In the case of V(TXT) record type, this is a text. + - In the case of V(SRV) record type, this is a service record. + - In the case of V(MX) record type, this is a mail exchanger record. + - In the case of V(SSHFP) record type, this is an SSH fingerprint record. type: list elements: str record_ttl: description: - - Set the TTL for the record. - - Applies only when adding a new or changing the value of I(record_value) or I(record_values). + - Set the TTL for the record. + - Applies only when adding a new or changing the value of O(record_value) or O(record_values). required: false type: int state: - description: State to ensure + description: State to ensure. required: false default: present choices: ["absent", "present"] type: str extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure dns record is present community.general.ipa_dnsrecord: ipa_host: spider.example.com @@ -155,14 +162,38 @@ EXAMPLES = r''' ipa_user: admin ipa_pass: topsecret state: absent -''' -RETURN = r''' +- name: Ensure an NS record for a subdomain is present + community.general.ipa_dnsrecord: + name: subdomain + zone_name: example.com + record_type: 'NS' + record_value: 'ns1.subdomain.exmaple.com' + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: ChangeMe! + +- name: Retrieve the current sshfp fingerprints + ansible.builtin.command: ssh-keyscan -D localhost + register: ssh_hostkeys + +- name: Update the SSHFP records in DNS + community.general.ipa_dnsrecord: + name: "{{ inventory_hostname}}" + zone_name: example.com + record_type: 'SSHFP' + record_values: "{{ ssh_hostkeys.stdout.split('\n') | map('split', 'SSHFP ') | map('last') | list }}" + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: ChangeMe! +""" + +RETURN = r""" dnsrecord: description: DNS record as returned by IPA API. returned: always type: dict -''' +""" import traceback @@ -198,6 +229,8 @@ class DNSRecordIPAClient(IPAClient): item.update(cname_part_hostname=value) elif details['record_type'] == 'DNAME': item.update(dname_part_target=value) + elif details['record_type'] == 'NS': + item.update(ns_part_hostname=value) elif details['record_type'] == 'PTR': item.update(ptr_part_hostname=value) elif details['record_type'] == 'TXT': @@ -206,6 +239,8 @@ class DNSRecordIPAClient(IPAClient): item.update(srvrecord=value) elif details['record_type'] == 'MX': item.update(mxrecord=value) + elif details['record_type'] == 'SSHFP': + item.update(sshfprecord=value) self._post_json(method='dnsrecord_add', name=zone_name, item=item) @@ -234,6 +269,8 @@ def get_dnsrecord_dict(details=None): module_dnsrecord.update(cnamerecord=details['record_values']) elif details['record_type'] == 'DNAME' and details['record_values']: module_dnsrecord.update(dnamerecord=details['record_values']) + elif details['record_type'] == 'NS' and details['record_values']: + module_dnsrecord.update(nsrecord=details['record_values']) elif details['record_type'] == 'PTR' and details['record_values']: module_dnsrecord.update(ptrrecord=details['record_values']) elif details['record_type'] == 'TXT' and details['record_values']: @@ -242,6 +279,8 @@ def get_dnsrecord_dict(details=None): module_dnsrecord.update(srvrecord=details['record_values']) elif details['record_type'] == 'MX' and details['record_values']: module_dnsrecord.update(mxrecord=details['record_values']) + elif details['record_type'] == 'SSHFP' and details['record_values']: + module_dnsrecord.update(sshfprecord=details['record_values']) if details.get('record_ttl'): module_dnsrecord.update(dnsttl=details['record_ttl']) @@ -304,7 +343,7 @@ def ensure(module, client): def main(): - record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV', 'MX'] + record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'NS', 'PTR', 'TXT', 'SRV', 'MX', 'SSHFP'] argument_spec = ipa_argument_spec() argument_spec.update( zone_name=dict(type='str', required=True), @@ -313,7 +352,7 @@ def main(): record_value=dict(type='str'), record_values=dict(type='list', elements='str'), state=dict(type='str', default='present', choices=['present', 'absent']), - record_ttl=dict(type='int', required=False), + record_ttl=dict(type='int'), ) module = AnsibleModule( diff --git a/plugins/modules/identity/ipa/ipa_dnszone.py b/plugins/modules/ipa_dnszone.py similarity index 84% rename from plugins/modules/identity/ipa/ipa_dnszone.py rename to plugins/modules/ipa_dnszone.py index 33ae59e9d0..57faaef955 100644 --- a/plugins/modules/identity/ipa/ipa_dnszone.py +++ b/plugins/modules/ipa_dnszone.py @@ -1,27 +1,30 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Fran Fitzpatrick (francis.x.fitzpatrick@gmail.com) +# Copyright (c) 2017, Fran Fitzpatrick (francis.x.fitzpatrick@gmail.com) # Borrowed heavily from other work by Abhijeet Kasurde (akasurde@redhat.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_dnszone author: Fran Fitzpatrick (@fxfitz) short_description: Manage FreeIPA DNS Zones description: -- Add and delete an IPA DNS Zones using IPA API + - Add and delete an IPA DNS Zones using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none options: zone_name: description: - - The DNS zone name to which needs to be managed. + - The DNS zone name to which needs to be managed. required: true type: str state: - description: State to ensure + description: State to ensure. required: false default: present choices: ["absent", "present"] @@ -36,11 +39,12 @@ options: type: bool version_added: 4.3.0 extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure dns zone is present community.general.ipa_dnszone: ipa_host: spider.example.com @@ -71,14 +75,14 @@ EXAMPLES = r''' state: present zone_name: example.com allowsyncptr: true -''' +""" -RETURN = r''' +RETURN = r""" zone: description: DNS zone as returned by IPA API. returned: always type: dict -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec @@ -145,7 +149,8 @@ def ensure(module, client): changed = True if not module.check_mode: client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr}) - elif ipa_dnszone['idnsallowdynupdate'][0] != str(dynamicupdate).upper() or ipa_dnszone['idnsallowsyncptr'][0] != str(allowsyncptr).upper(): + elif ipa_dnszone['idnsallowdynupdate'][0] != str(dynamicupdate).upper() or \ + ipa_dnszone.get('idnsallowsyncptr') and ipa_dnszone['idnsallowsyncptr'][0] != str(allowsyncptr).upper(): changed = True if not module.check_mode: client.dnszone_mod(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr}) @@ -167,8 +172,8 @@ def main(): argument_spec = ipa_argument_spec() argument_spec.update(zone_name=dict(type='str', required=True), state=dict(type='str', default='present', choices=['present', 'absent']), - dynamicupdate=dict(type='bool', required=False, default=False), - allowsyncptr=dict(type='bool', required=False, default=False), + dynamicupdate=dict(type='bool', default=False), + allowsyncptr=dict(type='bool', default=False), ) module = AnsibleModule(argument_spec=argument_spec, diff --git a/plugins/modules/ipa_getkeytab.py b/plugins/modules/ipa_getkeytab.py new file mode 100644 index 0000000000..0b4e102ac0 --- /dev/null +++ b/plugins/modules/ipa_getkeytab.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# Copyright (c) 2024 Alexander Bakanovskii +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ipa_getkeytab +short_description: Manage keytab file in FreeIPA +version_added: 9.5.0 +description: + - Manage keytab file with C(ipa-getkeytab) utility. + - See U(https://manpages.ubuntu.com/manpages/jammy/man1/ipa-getkeytab.1.html) for reference. +author: "Alexander Bakanovskii (@abakanovskii)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + path: + description: + - The base path where to put generated keytab file. + type: path + aliases: ["keytab"] + required: true + principal: + description: + - The non-realm part of the full principal name. + type: str + required: true + ipa_host: + description: + - The IPA server to retrieve the keytab from (FQDN). + type: str + ldap_uri: + description: + - LDAP URI. If V(ldap://) is specified, STARTTLS is initiated by default. + - Can not be used with the O(ipa_host) option. + type: str + bind_dn: + description: + - The LDAP DN to bind as when retrieving a keytab without Kerberos credentials. + - Generally used with the O(bind_pw) option. + type: str + bind_pw: + description: + - The LDAP password to use when not binding with Kerberos. + type: str + password: + description: + - Use this password for the key instead of one randomly generated. + type: str + ca_cert: + description: + - The path to the IPA CA certificate used to validate LDAPS/STARTTLS connections. + type: path + sasl_mech: + description: + - SASL mechanism to use if O(bind_dn) and O(bind_pw) are not specified. + choices: ["GSSAPI", "EXTERNAL"] + type: str + retrieve_mode: + description: + - Retrieve an existing key from the server instead of generating a new one. + - This is incompatible with the O(password), and works only against a IPA server more recent than version 3.3. + - The user requesting the keytab must have access to the keys for this operation to succeed. + - Be aware that if set V(true), a new keytab is generated. + - This invalidates all previously retrieved keytabs for this service principal. + type: bool + encryption_types: + description: + - The list of encryption types to use to generate keys. + - It uses local client defaults if not provided. + - Valid values depend on the Kerberos library version and configuration. + type: str + state: + description: + - The state of the keytab file. + - V(present) only check for existence of a file, if you want to recreate keytab with other parameters you should set + O(force=true). + type: str + default: present + choices: ["present", "absent"] + force: + description: + - Force recreation if exists already. + type: bool +requirements: + - freeipa-client + - Managed host is FreeIPA client +extends_documentation_fragment: + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Get Kerberos ticket using default principal + community.general.krb_ticket: + password: "{{ aldpro_admin_password }}" + +- name: Create keytab + community.general.ipa_getkeytab: + path: /etc/ipa/test.keytab + principal: HTTP/freeipa-dc02.ipa.test + ipa_host: freeipa-dc01.ipa.test + +- name: Retrieve already existing keytab + community.general.ipa_getkeytab: + path: /etc/ipa/test.keytab + principal: HTTP/freeipa-dc02.ipa.test + ipa_host: freeipa-dc01.ipa.test + retrieve_mode: true + +- name: Force keytab recreation + community.general.ipa_getkeytab: + path: /etc/ipa/test.keytab + principal: HTTP/freeipa-dc02.ipa.test + ipa_host: freeipa-dc01.ipa.test + force: true +""" + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +class IPAKeytab(object): + def __init__(self, module, **kwargs): + self.module = module + self.path = kwargs['path'] + self.state = kwargs['state'] + self.principal = kwargs['principal'] + self.ipa_host = kwargs['ipa_host'] + self.ldap_uri = kwargs['ldap_uri'] + self.bind_dn = kwargs['bind_dn'] + self.bind_pw = kwargs['bind_pw'] + self.password = kwargs['password'] + self.ca_cert = kwargs['ca_cert'] + self.sasl_mech = kwargs['sasl_mech'] + self.retrieve_mode = kwargs['retrieve_mode'] + self.encryption_types = kwargs['encryption_types'] + + self.runner = CmdRunner( + module, + command='ipa-getkeytab', + arg_formats=dict( + retrieve_mode=cmd_runner_fmt.as_bool('--retrieve'), + path=cmd_runner_fmt.as_opt_val('--keytab'), + ipa_host=cmd_runner_fmt.as_opt_val('--server'), + principal=cmd_runner_fmt.as_opt_val('--principal'), + ldap_uri=cmd_runner_fmt.as_opt_val('--ldapuri'), + bind_dn=cmd_runner_fmt.as_opt_val('--binddn'), + bind_pw=cmd_runner_fmt.as_opt_val('--bindpw'), + password=cmd_runner_fmt.as_opt_val('--password'), + ca_cert=cmd_runner_fmt.as_opt_val('--cacert'), + sasl_mech=cmd_runner_fmt.as_opt_val('--mech'), + encryption_types=cmd_runner_fmt.as_opt_val('--enctypes'), + ) + ) + + def _exec(self, check_rc=True): + with self.runner( + "retrieve_mode path ipa_host principal ldap_uri bind_dn bind_pw password ca_cert sasl_mech encryption_types", + check_rc=check_rc + ) as ctx: + rc, out, err = ctx.run() + return out + + +def main(): + arg_spec = dict( + path=dict(type='path', required=True, aliases=["keytab"]), + state=dict(default='present', choices=['present', 'absent']), + principal=dict(type='str', required=True), + ipa_host=dict(type='str'), + ldap_uri=dict(type='str'), + bind_dn=dict(type='str'), + bind_pw=dict(type='str'), + password=dict(type='str', no_log=True), + ca_cert=dict(type='path'), + sasl_mech=dict(type='str', choices=["GSSAPI", "EXTERNAL"]), + retrieve_mode=dict(type='bool'), + encryption_types=dict(type='str'), + force=dict(type='bool'), + ) + module = AnsibleModule( + argument_spec=arg_spec, + mutually_exclusive=[('ipa_host', 'ldap_uri'), ('retrieve_mode', 'password')], + supports_check_mode=True, + ) + + path = module.params['path'] + state = module.params['state'] + force = module.params['force'] + + keytab = IPAKeytab(module, + path=path, + state=state, + principal=module.params['principal'], + ipa_host=module.params['ipa_host'], + ldap_uri=module.params['ldap_uri'], + bind_dn=module.params['bind_dn'], + bind_pw=module.params['bind_pw'], + password=module.params['password'], + ca_cert=module.params['ca_cert'], + sasl_mech=module.params['sasl_mech'], + retrieve_mode=module.params['retrieve_mode'], + encryption_types=module.params['encryption_types'], + ) + + changed = False + if state == 'present': + if os.path.exists(path): + if force and not module.check_mode: + try: + os.remove(path) + except OSError as e: + module.fail_json(msg="Error deleting: %s - %s." % (e.filename, e.strerror)) + keytab._exec() + changed = True + if force and module.check_mode: + changed = True + else: + changed = True + keytab._exec() + + if state == 'absent': + if os.path.exists(path): + changed = True + if not module.check_mode: + try: + os.remove(path) + except OSError as e: + module.fail_json(msg="Error deleting: %s - %s." % (e.filename, e.strerror)) + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_group.py b/plugins/modules/ipa_group.py similarity index 65% rename from plugins/modules/identity/ipa/ipa_group.py rename to plugins/modules/ipa_group.py index d6af57ba1f..2c004c8bb7 100644 --- a/plugins/modules/identity/ipa/ipa_group.py +++ b/plugins/modules/ipa_group.py @@ -1,80 +1,95 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_group author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA group description: -- Add, modify and delete group within IPA server + - Add, modify and delete group within IPA server. +attributes: + check_mode: + support: full + diff_mode: + support: none options: append: description: - - If C(yes), add the listed I(user) and I(group) to the group members. - - If C(no), only the listed I(user) and I(group) will be group members, removing any other members. - default: no + - If V(true), add the listed O(user) and O(group) to the group members. + - If V(false), only the listed O(user) and O(group) are set as group members, removing any other members. + default: false type: bool version_added: 4.0.0 cn: description: - - Canonical name. - - Can not be changed as it is the unique identifier. + - Canonical name. + - Can not be changed as it is the unique identifier. required: true aliases: ['name'] type: str description: description: - - Description of the group. + - Description of the group. type: str external: description: - - Allow adding external non-IPA members from trusted domains. + - Allow adding external non-IPA members from trusted domains. type: bool gidnumber: description: - - GID (use this option to set it manually). + - GID (use this option to set it manually). aliases: ['gid'] type: str group: description: - - List of group names assigned to this group. - - If I(append=no) and an empty list is passed all groups will be removed from this group. - - Groups that are already assigned but not passed will be removed. - - If I(append=yes) the listed groups will be assigned without removing other groups. - - If option is omitted assigned groups will not be checked or changed. + - List of group names assigned to this group. + - If O(append=false) and an empty list is passed all groups are removed from this group. + - Groups that are already assigned but not passed are removed. + - If O(append=true) the listed groups are assigned without removing other groups. + - If option is omitted assigned groups are not checked or changed. type: list elements: str nonposix: description: - - Create as a non-POSIX group. + - Create as a non-POSIX group. type: bool user: description: - - List of user names assigned to this group. - - If I(append=no) and an empty list is passed all users will be removed from this group. - - Users that are already assigned but not passed will be removed. - - If I(append=yes) the listed users will be assigned without removing other users. - - If option is omitted assigned users will not be checked or changed. + - List of user names assigned to this group. + - If O(append=false) and an empty list is passed all users are removed from this group. + - Users that are already assigned but not passed are removed. + - If O(append=true) the listed users are assigned without removing other users. + - If option is omitted assigned users are not checked or changed. type: list elements: str + external_user: + description: + - List of external users assigned to this group. + - Behaves identically to O(user) with respect to O(append) attribute. + - List entries can be in V(DOMAIN\\\\username) or SID format. + - Unless SIDs are provided, the module always attempts to make changes even if the group already has all the users. + This is because only SIDs are returned by IPA query. + - O(external=true) is needed for this option to work. + type: list + elements: str + version_added: 6.3.0 state: description: - - State to ensure + - State to ensure. default: "present" choices: ["absent", "present"] type: str extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure group is present community.general.ipa_group: name: oinstall @@ -88,8 +103,8 @@ EXAMPLES = r''' community.general.ipa_group: name: ops group: - - sysops - - appops + - sysops + - appops ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret @@ -98,8 +113,8 @@ EXAMPLES = r''' community.general.ipa_group: name: sysops user: - - linus - - larry + - linus + - larry ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret @@ -108,13 +123,35 @@ EXAMPLES = r''' community.general.ipa_group: name: developers user: - - john - append: yes + - john + append: true state: present ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret +- name: Add external user to a group + community.general.ipa_group: + name: developers + external: true + append: true + external_user: + - S-1-5-21-123-1234-12345-63421 + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Add a user from MYDOMAIN + community.general.ipa_group: + name: developers + external: true + append: true + external_user: + - MYDOMAIN\\john + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + - name: Ensure group is absent community.general.ipa_group: name: sysops @@ -122,14 +159,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" group: - description: Group as returned by IPA API + description: Group as returned by IPA API. returned: always type: dict -''' +""" import traceback @@ -163,6 +200,9 @@ class GroupIPAClient(IPAClient): def group_add_member_user(self, name, item): return self.group_add_member(name=name, item={'user': item}) + def group_add_member_externaluser(self, name, item): + return self.group_add_member(name=name, item={'ipaexternalmember': item}) + def group_remove_member(self, name, item): return self._post_json(method='group_remove_member', name=name, item=item) @@ -172,6 +212,9 @@ class GroupIPAClient(IPAClient): def group_remove_member_user(self, name, item): return self.group_remove_member(name=name, item={'user': item}) + def group_remove_member_externaluser(self, name, item): + return self.group_remove_member(name=name, item={'ipaexternalmember': item}) + def get_group_dict(description=None, external=None, gid=None, nonposix=None): group = {} @@ -207,12 +250,19 @@ def ensure(module, client): name = module.params['cn'] group = module.params['group'] user = module.params['user'] + external = module.params['external'] + external_user = module.params['external_user'] append = module.params['append'] - module_group = get_group_dict(description=module.params['description'], external=module.params['external'], - gid=module.params['gidnumber'], nonposix=module.params['nonposix']) + module_group = get_group_dict(description=module.params['description'], + external=external, + gid=module.params['gidnumber'], + nonposix=module.params['nonposix']) ipa_group = client.group_find(name=name) + if not (external or external_user is None): + module.fail_json("external_user can only be set if external = True") + changed = False if state == 'present': if not ipa_group: @@ -241,6 +291,11 @@ def ensure(module, client): client.group_remove_member_user, append=append) or changed + if external_user is not None: + changed = client.modify_if_diff(name, ipa_group.get('ipaexternalmember', []), external_user, + client.group_add_member_externaluser, + client.group_remove_member_externaluser, + append=append) or changed else: if ipa_group: changed = True @@ -255,6 +310,7 @@ def main(): argument_spec.update(cn=dict(type='str', required=True, aliases=['name']), description=dict(type='str'), external=dict(type='bool'), + external_user=dict(type='list', elements='str'), gidnumber=dict(type='str', aliases=['gid']), group=dict(type='list', elements='str'), nonposix=dict(type='bool'), diff --git a/plugins/modules/identity/ipa/ipa_hbacrule.py b/plugins/modules/ipa_hbacrule.py similarity index 79% rename from plugins/modules/identity/ipa/ipa_hbacrule.py rename to plugins/modules/ipa_hbacrule.py index 5f0704d58b..67e39bbe98 100644 --- a/plugins/modules/identity/ipa/ipa_hbacrule.py +++ b/plugins/modules/ipa_hbacrule.py @@ -1,113 +1,117 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_hbacrule author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA HBAC rule description: -- Add, modify or delete an IPA HBAC rule using IPA API. + - Add, modify or delete an IPA HBAC rule using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none options: cn: description: - - Canonical name. - - Can not be changed as it is the unique identifier. + - Canonical name. + - Can not be changed as it is the unique identifier. required: true aliases: ["name"] type: str description: - description: Description + description: Description. type: str host: description: - - List of host names to assign. - - If an empty list is passed all hosts will be removed from the rule. - - If option is omitted hosts will not be checked or changed. + - List of host names to assign. + - If an empty list is passed all hosts are removed from the rule. + - If option is omitted hosts are not checked or changed. required: false type: list elements: str hostcategory: - description: Host category + description: Host category. choices: ['all'] type: str hostgroup: description: - - List of hostgroup names to assign. - - If an empty list is passed all hostgroups will be removed. from the rule - - If option is omitted hostgroups will not be checked or changed. + - List of hostgroup names to assign. + - If an empty list is passed all hostgroups are removed from the rule. + - If option is omitted hostgroups are not checked or changed. type: list elements: str service: description: - - List of service names to assign. - - If an empty list is passed all services will be removed from the rule. - - If option is omitted services will not be checked or changed. + - List of service names to assign. + - If an empty list is passed all services are removed from the rule. + - If option is omitted services are not checked or changed. type: list elements: str servicecategory: - description: Service category + description: Service category. choices: ['all'] type: str servicegroup: description: - - List of service group names to assign. - - If an empty list is passed all assigned service groups will be removed from the rule. - - If option is omitted service groups will not be checked or changed. + - List of service group names to assign. + - If an empty list is passed all assigned service groups are removed from the rule. + - If option is omitted service groups are not checked or changed. type: list elements: str sourcehost: description: - - List of source host names to assign. - - If an empty list if passed all assigned source hosts will be removed from the rule. - - If option is omitted source hosts will not be checked or changed. + - List of source host names to assign. + - If an empty list if passed all assigned source hosts are removed from the rule. + - If option is omitted source hosts are not checked or changed. type: list elements: str sourcehostcategory: - description: Source host category + description: Source host category. choices: ['all'] type: str sourcehostgroup: description: - - List of source host group names to assign. - - If an empty list if passed all assigned source host groups will be removed from the rule. - - If option is omitted source host groups will not be checked or changed. + - List of source host group names to assign. + - If an empty list if passed all assigned source host groups are removed from the rule. + - If option is omitted source host groups are not checked or changed. type: list elements: str state: - description: State to ensure + description: State to ensure. default: "present" - choices: ["absent", "disabled", "enabled","present"] + choices: ["absent", "disabled", "enabled", "present"] type: str user: description: - - List of user names to assign. - - If an empty list if passed all assigned users will be removed from the rule. - - If option is omitted users will not be checked or changed. + - List of user names to assign. + - If an empty list if passed all assigned users are removed from the rule. + - If option is omitted users are not checked or changed. type: list elements: str usercategory: - description: User category + description: User category. choices: ['all'] type: str usergroup: description: - - List of user group names to assign. - - If an empty list if passed all assigned user groups will be removed from the rule. - - If option is omitted user groups will not be checked or changed. + - List of user group names to assign. + - If an empty list if passed all assigned user groups are removed from the rule. + - If option is omitted user groups are not checked or changed. type: list elements: str extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure rule to allow all users to access any host from any host community.general.ipa_hbacrule: name: allow_all @@ -125,9 +129,9 @@ EXAMPLES = r''' name: allow_all_developers_access_to_db description: Allow all developers to access any database from any host hostgroup: - - db-server + - db-server usergroup: - - developers + - developers state: present ipa_host: ipa.example.com ipa_user: admin @@ -140,20 +144,21 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" hbacrule: description: HBAC rule as returned by IPA API. returned: always type: dict -''' +""" import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion class HBACRuleIPAClient(IPAClient): @@ -224,10 +229,17 @@ def ensure(module, client): name = module.params['cn'] state = module.params['state'] + ipa_version = client.get_ipa_version() if state in ['present', 'enabled']: - ipaenabledflag = 'TRUE' + if LooseVersion(ipa_version) < LooseVersion('4.9.10'): + ipaenabledflag = 'TRUE' + else: + ipaenabledflag = True else: - ipaenabledflag = 'FALSE' + if LooseVersion(ipa_version) < LooseVersion('4.9.10'): + ipaenabledflag = 'FALSE' + else: + ipaenabledflag = False host = module.params['host'] hostcategory = module.params['hostcategory'] diff --git a/plugins/modules/identity/ipa/ipa_host.py b/plugins/modules/ipa_host.py similarity index 62% rename from plugins/modules/identity/ipa/ipa_host.py rename to plugins/modules/ipa_host.py index 25c65f0b34..b3e796f1f5 100644 --- a/plugins/modules/identity/ipa/ipa_host.py +++ b/plugins/modules/ipa_host.py @@ -1,102 +1,126 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_host author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA host description: -- Add, modify and delete an IPA host using IPA API. + - Add, modify and delete an IPA host using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none options: fqdn: description: - - Full qualified domain name. - - Can not be changed as it is the unique identifier. + - Full qualified domain name. + - Can not be changed as it is the unique identifier. required: true aliases: ["name"] type: str description: description: - - A description of this host. + - A description of this host. type: str + userclass: + description: + - Host category (semantics placed on this attribute are for local interpretation). + type: str + version_added: 12.0.0 force: description: - - Force host name even if not in DNS. + - Force host name even if not in DNS. required: false type: bool ip_address: description: - - Add the host to DNS with this IP address. + - Add the host to DNS with this IP address. type: str mac_address: description: - - List of Hardware MAC address(es) off this host. - - If option is omitted MAC addresses will not be checked or changed. - - If an empty list is passed all assigned MAC addresses will be removed. - - MAC addresses that are already assigned but not passed will be removed. + - List of Hardware MAC address(es) off this host. + - If option is omitted MAC addresses are not checked nor changed. + - If an empty list is passed all assigned MAC addresses are removed. + - MAC addresses that are already assigned but not passed are removed. aliases: ["macaddress"] type: list elements: str + l: + description: + - Host locality (for example V(Baltimore, MD)). + aliases: ["locality"] + type: str + version_added: 12.0.0 ns_host_location: description: - - Host location (e.g. "Lab 2") + - Host location (for example V(Lab 2)). aliases: ["nshostlocation"] type: str ns_hardware_platform: description: - - Host hardware platform (e.g. "Lenovo T61") + - Host hardware platform (for example V(Lenovo T61")). aliases: ["nshardwareplatform"] type: str ns_os_version: description: - - Host operating system and version (e.g. "Fedora 9") + - Host operating system and version (for example V(Fedora 9)). aliases: ["nsosversion"] type: str user_certificate: description: - - List of Base-64 encoded server certificates. - - If option is omitted certificates will not be checked or changed. - - If an empty list is passed all assigned certificates will be removed. - - Certificates already assigned but not passed will be removed. + - List of Base-64 encoded server certificates. + - If option is omitted certificates are not checked nor changed. + - If an empty list is passed all assigned certificates are removed. + - Certificates already assigned but not passed are removed. aliases: ["usercertificate"] type: list elements: str state: - description: State to ensure. + description: + - State to ensure. default: present choices: ["absent", "disabled", "enabled", "present"] type: str + force_creation: + description: + - Create host if O(state=disabled) or O(state=enabled) but not present. + default: true + type: bool + version_added: 9.5.0 update_dns: description: - - If set C("True") with state as C("absent"), then removes DNS records of the host managed by FreeIPA DNS. - - This option has no effect for states other than "absent". + - If set V(true) with O(state=absent), then removes DNS records of the host managed by FreeIPA DNS. + - This option has no effect for states other than V(absent). type: bool random_password: description: Generate a random password to be used in bulk enrollment. type: bool extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure host is present community.general.ipa_host: name: host01.example.com description: Example host + userclass: Server ip_address: 192.168.0.123 + locality: Baltimore, MD ns_host_location: Lab ns_os_version: CentOS 7 ns_hardware_platform: Lenovo T61 mac_address: - - "08:00:27:E3:B1:2D" - - "52:54:00:BD:97:1E" + - "08:00:27:E3:B1:2D" + - "52:54:00:BD:97:1E" state: present ipa_host: ipa.example.com ipa_user: admin @@ -111,8 +135,7 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret - validate_certs: False - random_password: True + random_password: true - name: Ensure host is disabled community.general.ipa_host: @@ -145,19 +168,19 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret - update_dns: True -''' + update_dns: true +""" -RETURN = r''' +RETURN = r""" host: description: Host as returned by IPA API. returned: always type: dict host_diff: - description: List of options that differ and would be changed + description: List of options that differ and would be changed. returned: if check mode and a difference is found type: list -''' +""" import traceback @@ -189,15 +212,19 @@ class HostIPAClient(IPAClient): return self._post_json(method='host_disable', name=name) -def get_host_dict(description=None, force=None, ip_address=None, ns_host_location=None, ns_hardware_platform=None, +def get_host_dict(description=None, userclass=None, force=None, ip_address=None, l=None, ns_host_location=None, ns_hardware_platform=None, ns_os_version=None, user_certificate=None, mac_address=None, random_password=None): data = {} if description is not None: data['description'] = description + if userclass is not None: + data['userclass'] = userclass if force is not None: data['force'] = force if ip_address is not None: data['ip_address'] = ip_address + if l is not None: + data['l'] = l if ns_host_location is not None: data['nshostlocation'] = ns_host_location if ns_hardware_platform is not None: @@ -227,26 +254,33 @@ def get_host_diff(client, ipa_host, module_host): def ensure(module, client): name = module.params['fqdn'] state = module.params['state'] + force_creation = module.params['force_creation'] ipa_host = client.host_find(name=name) module_host = get_host_dict(description=module.params['description'], - force=module.params['force'], ip_address=module.params['ip_address'], + userclass=module.params['userclass'], + force=module.params['force'], + ip_address=module.params['ip_address'], + l=module.params['l'], ns_host_location=module.params['ns_host_location'], ns_hardware_platform=module.params['ns_hardware_platform'], ns_os_version=module.params['ns_os_version'], user_certificate=module.params['user_certificate'], mac_address=module.params['mac_address'], - random_password=module.params.get('random_password'), + random_password=module.params['random_password'], ) changed = False if state in ['present', 'enabled', 'disabled']: - if not ipa_host: + if not ipa_host and (force_creation or state == 'present'): changed = True if not module.check_mode: # OTP password generated by FreeIPA is visible only for host_add command # so, return directly from here. return changed, client.host_add(name=name, host=module_host) else: + if state in ['disabled', 'enabled']: + module.fail_json(msg="No host with name " + ipa_host + " found") + diff = get_host_diff(client, ipa_host, module_host) if len(diff) > 0: changed = True @@ -254,12 +288,15 @@ def ensure(module, client): data = {} for key in diff: data[key] = module_host.get(key) + if "usercertificate" not in data: + data["usercertificate"] = [ + cert['__base64__'] for cert in ipa_host.get("usercertificate", []) + ] ipa_host_show = client.host_show(name=name) - if ipa_host_show.get('has_keytab', False) and module.params.get('random_password'): + if ipa_host_show.get('has_keytab', True) and (state == 'disabled' or module.params.get('random_password')): client.host_disable(name=name) return changed, client.host_mod(name=name, host=data) - - else: + elif state == 'absent': if ipa_host: changed = True update_dns = module.params.get('update_dns', False) @@ -271,18 +308,23 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() - argument_spec.update(description=dict(type='str'), - fqdn=dict(type='str', required=True, aliases=['name']), - force=dict(type='bool'), - ip_address=dict(type='str'), - ns_host_location=dict(type='str', aliases=['nshostlocation']), - ns_hardware_platform=dict(type='str', aliases=['nshardwareplatform']), - ns_os_version=dict(type='str', aliases=['nsosversion']), - user_certificate=dict(type='list', aliases=['usercertificate'], elements='str'), - mac_address=dict(type='list', aliases=['macaddress'], elements='str'), - update_dns=dict(type='bool'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), - random_password=dict(type='bool', no_log=False),) + argument_spec.update( + description=dict(type='str'), + fqdn=dict(type='str', required=True, aliases=['name']), + force=dict(type='bool'), + ip_address=dict(type='str'), + l=dict(type='str', aliases=['locality']), + ns_host_location=dict(type='str', aliases=['nshostlocation']), + ns_hardware_platform=dict(type='str', aliases=['nshardwareplatform']), + ns_os_version=dict(type='str', aliases=['nsosversion']), + userclass=dict(type='str'), + user_certificate=dict(type='list', aliases=['usercertificate'], elements='str'), + mac_address=dict(type='list', aliases=['macaddress'], elements='str'), + update_dns=dict(type='bool'), + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + random_password=dict(type='bool', no_log=False), + force_creation=dict(type='bool', default=True) + ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) diff --git a/plugins/modules/identity/ipa/ipa_hostgroup.py b/plugins/modules/ipa_hostgroup.py similarity index 71% rename from plugins/modules/identity/ipa/ipa_hostgroup.py rename to plugins/modules/ipa_hostgroup.py index 9d5c6f99c7..f4f40d0bd9 100644 --- a/plugins/modules/identity/ipa/ipa_hostgroup.py +++ b/plugins/modules/ipa_hostgroup.py @@ -1,67 +1,80 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_hostgroup author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA host-group description: -- Add, modify and delete an IPA host-group using IPA API. + - Add, modify and delete an IPA host-group using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none options: + append: + description: + - If V(true), add the listed O(host) to the O(hostgroup). + - If V(false), only the listed O(host) is set in O(hostgroup), removing any other hosts. + default: false + type: bool + version_added: 6.6.0 cn: description: - - Name of host-group. - - Can not be changed as it is the unique identifier. + - Name of host-group. + - Can not be changed as it is the unique identifier. required: true aliases: ["name"] type: str description: description: - - Description. + - Description. type: str host: description: - - List of hosts that belong to the host-group. - - If an empty list is passed all hosts will be removed from the group. - - If option is omitted hosts will not be checked or changed. - - If option is passed all assigned hosts that are not passed will be unassigned from the group. + - List of hosts that belong to the host-group. + - If an empty list is passed all hosts are removed from the group. + - If option is omitted hosts are not checked nor changed. + - If option is passed all assigned hosts that are not passed are unassigned from the group. type: list elements: str hostgroup: description: - - List of host-groups than belong to that host-group. - - If an empty list is passed all host-groups will be removed from the group. - - If option is omitted host-groups will not be checked or changed. - - If option is passed all assigned hostgroups that are not passed will be unassigned from the group. + - List of host-groups than belong to that host-group. + - If an empty list is passed all host-groups are removed from the group. + - If option is omitted host-groups are not checked nor changed. + - If option is passed all assigned hostgroups that are not passed are unassigned from the group. type: list elements: str state: description: - - State to ensure. + - State to ensure. + - V("absent") and V("disabled") give the same results. + - V("present") and V("enabled") give the same results. default: "present" choices: ["absent", "disabled", "enabled", "present"] type: str extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure host-group databases is present community.general.ipa_hostgroup: name: databases state: present host: - - db.example.com + - db.example.com hostgroup: - - mysql-server - - oracle-server + - mysql-server + - oracle-server ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret @@ -73,14 +86,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" hostgroup: description: Hostgroup as returned by IPA API. returned: always type: dict -''' +""" import traceback @@ -140,12 +153,13 @@ def ensure(module, client): state = module.params['state'] host = module.params['host'] hostgroup = module.params['hostgroup'] + append = module.params['append'] ipa_hostgroup = client.hostgroup_find(name=name) module_hostgroup = get_hostgroup_dict(description=module.params['description']) changed = False - if state == 'present': + if state in ['present', 'enabled']: if not ipa_hostgroup: changed = True if not module.check_mode: @@ -161,14 +175,18 @@ def ensure(module, client): client.hostgroup_mod(name=name, item=data) if host is not None: - changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []), [item.lower() for item in host], - client.hostgroup_add_host, client.hostgroup_remove_host) or changed + changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []), + [item.lower() for item in host], + client.hostgroup_add_host, + client.hostgroup_remove_host, + append=append) or changed if hostgroup is not None: changed = client.modify_if_diff(name, ipa_hostgroup.get('member_hostgroup', []), [item.lower() for item in hostgroup], client.hostgroup_add_hostgroup, - client.hostgroup_remove_hostgroup) or changed + client.hostgroup_remove_hostgroup, + append=append) or changed else: if ipa_hostgroup: @@ -185,7 +203,8 @@ def main(): description=dict(type='str'), host=dict(type='list', elements='str'), hostgroup=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled'])) + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + append=dict(type='bool', default=False)) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) diff --git a/plugins/modules/identity/ipa/ipa_otpconfig.py b/plugins/modules/ipa_otpconfig.py similarity index 87% rename from plugins/modules/identity/ipa/ipa_otpconfig.py rename to plugins/modules/ipa_otpconfig.py index 9a10baec0b..a260cc7a13 100644 --- a/plugins/modules/identity/ipa/ipa_otpconfig.py +++ b/plugins/modules/ipa_otpconfig.py @@ -1,20 +1,23 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Ansible Project +# Copyright (c) 2021, Ansible Project # Heavily influenced from Fran Fitzpatrick ipa_config module -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_otpconfig author: justchris1 (@justchris1) short_description: Manage FreeIPA OTP Configuration Settings version_added: 2.5.0 description: -- Modify global configuration settings of a FreeIPA Server with respect to OTP (One Time Passwords). + - Modify global configuration settings of a FreeIPA Server with respect to OTP (One Time Passwords). +attributes: + check_mode: + support: full + diff_mode: + support: none options: ipatokentotpauthwindow: description: TOTP authentication window in seconds. @@ -33,11 +36,12 @@ options: aliases: ["hotpsyncwindow"] type: int extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure the TOTP authentication window is set to 300 seconds community.general.ipa_otpconfig: ipatokentotpauthwindow: '300' @@ -45,7 +49,7 @@ EXAMPLES = r''' ipa_user: admin ipa_pass: supersecret -- name: Ensure the TOTP syncronization window is set to 86400 seconds +- name: Ensure the TOTP synchronization window is set to 86400 seconds community.general.ipa_otpconfig: ipatokentotpsyncwindow: '86400' ipa_host: localhost @@ -59,20 +63,20 @@ EXAMPLES = r''' ipa_user: admin ipa_pass: supersecret -- name: Ensure the HOTP syncronization window is set to 100 hops +- name: Ensure the HOTP synchronization window is set to 100 hops community.general.ipa_otpconfig: ipatokenhotpsyncwindow: '100' ipa_host: localhost ipa_user: admin ipa_pass: supersecret -''' +""" -RETURN = r''' +RETURN = r""" otpconfig: description: OTP configuration as returned by IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/identity/ipa/ipa_otptoken.py b/plugins/modules/ipa_otptoken.py similarity index 90% rename from plugins/modules/identity/ipa/ipa_otptoken.py rename to plugins/modules/ipa_otptoken.py index 4027a1c459..388ccfb4d9 100644 --- a/plugins/modules/identity/ipa/ipa_otptoken.py +++ b/plugins/modules/ipa_otptoken.py @@ -1,19 +1,22 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_otptoken author: justchris1 (@justchris1) short_description: Manage FreeIPA OTPs version_added: 2.5.0 description: -- Add, modify, and delete One Time Passwords in IPA. + - Add, modify, and delete One Time Passwords in IPA. +attributes: + check_mode: + support: full + diff_mode: + support: none options: uniqueid: description: Unique ID of the token in IPA. @@ -21,41 +24,41 @@ options: aliases: ["name"] type: str newuniqueid: - description: If specified, the unique id specified will be changed to this. + description: If specified, the unique ID specified is changed to this. type: str otptype: description: - - Type of OTP. - - "B(Note:) Cannot be modified after OTP is created." + - Type of OTP. + - B(Note:) Cannot be modified after OTP is created. type: str - choices: [ totp, hotp ] + choices: [totp, hotp] secretkey: description: - - Token secret (Base64). - - If OTP is created and this is not specified, a random secret will be generated by IPA. - - "B(Note:) Cannot be modified after OTP is created." + - Token secret (Base64). + - If OTP is created and this is not specified, a random secret is generated by IPA. + - B(Note:) Cannot be modified after OTP is created. type: str description: description: Description of the token (informational only). type: str owner: - description: Assigned user of the token. + description: Assigned user of the token. type: str enabled: - description: Mark the token as enabled (default C(true)). + description: Mark the token as enabled (default V(true)). default: true type: bool notbefore: description: - - First date/time the token can be used. - - In the format C(YYYYMMddHHmmss). - - For example, C(20180121182022) will allow the token to be used starting on 21 January 2018 at 18:20:22. + - First date/time the token can be used. + - In the format C(YYYYMMddHHmmss). + - For example, V(20180121182022) allows the token to be used starting on 21 January 2018 at 18:20:22. type: str notafter: description: - - Last date/time the token can be used. - - In the format C(YYYYMMddHHmmss). - - For example, C(20200121182022) will allow the token to be used until 21 January 2020 at 18:20:22. + - Last date/time the token can be used. + - In the format C(YYYYMMddHHmmss). + - For example, V(20200121182022) allows the token to be used until 21 January 2020 at 18:20:22. type: str vendor: description: Token vendor name (informational only). @@ -73,36 +76,38 @@ options: type: str algorithm: description: - - Token hash algorithm. - - "B(Note:) Cannot be modified after OTP is created." + - Token hash algorithm. + - B(Note:) Cannot be modified after OTP is created. choices: ['sha1', 'sha256', 'sha384', 'sha512'] type: str digits: description: - - Number of digits each token code will have. - - "B(Note:) Cannot be modified after OTP is created." - choices: [ 6, 8 ] + - Number of digits each token code has. + - B(Note:) Cannot be modified after OTP is created. + choices: [6, 8] type: int offset: description: - - TOTP token / IPA server time difference. - - "B(Note:) Cannot be modified after OTP is created." + - TOTP token / IPA server time difference. + - B(Note:) Cannot be modified after OTP is created. type: int interval: description: - - Length of TOTP token code validity in seconds. - - "B(Note:) Cannot be modified after OTP is created." + - Length of TOTP token code validity in seconds. + - B(Note:) Cannot be modified after OTP is created. type: int counter: description: - - Initial counter for the HOTP token. - - "B(Note:) Cannot be modified after OTP is created." + - Initial counter for the HOTP token. + - B(Note:) Cannot be modified after OTP is created. type: int extends_documentation_fragment: -- community.general.ipa.documentation -''' + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a totp for pinky, allowing the IPA server to generate using defaults community.general.ipa_otptoken: uniqueid: Token123 @@ -154,14 +159,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" otptoken: - description: OTP Token as returned by IPA API + description: OTP Token as returned by IPA API. returned: always type: dict -''' +""" import base64 import traceback @@ -230,7 +235,7 @@ def get_otptoken_dict(ansible_to_ipa, uniqueid=None, newuniqueid=None, otptype=N if owner is not None: otptoken[ansible_to_ipa['owner']] = owner if enabled is not None: - otptoken[ansible_to_ipa['enabled']] = 'FALSE' if enabled else 'TRUE' + otptoken[ansible_to_ipa['enabled']] = False if enabled else True if notbefore is not None: otptoken[ansible_to_ipa['notbefore']] = notbefore + 'Z' if notafter is not None: @@ -385,9 +390,7 @@ def ensure(module, client): 'counter': 'ipatokenhotpcounter'} # Create inverse dictionary for mapping return values - ipa_to_ansible = {} - for (k, v) in ansible_to_ipa.items(): - ipa_to_ansible[v] = k + ipa_to_ansible = {v: k for k, v in ansible_to_ipa.items()} unmodifiable_after_creation = ['otptype', 'secretkey', 'algorithm', 'digits', 'offset', 'interval', 'counter'] @@ -445,8 +448,8 @@ def ensure(module, client): module_otptoken['all'] = True ipa_otptoken = client.otptoken_add(name=uniqueid, item=module_otptoken) else: - if not(validate_modifications(ansible_to_ipa, module, ipa_otptoken, - module_otptoken, unmodifiable_after_creation)): + if not validate_modifications(ansible_to_ipa, module, ipa_otptoken, + module_otptoken, unmodifiable_after_creation): module.fail_json(msg="Modifications requested in module are not valid") # IPA will reject 'modifications' that do not actually modify anything diff --git a/plugins/modules/ipa_pwpolicy.py b/plugins/modules/ipa_pwpolicy.py new file mode 100644 index 0000000000..10650a49dd --- /dev/null +++ b/plugins/modules/ipa_pwpolicy.py @@ -0,0 +1,307 @@ +#!/usr/bin/python +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ipa_pwpolicy +author: Adralioh (@adralioh) +short_description: Manage FreeIPA password policies +description: + - Add, modify, or delete a password policy using the IPA API. +version_added: 2.0.0 +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + group: + description: + - Name of the group that the policy applies to. + - If omitted, the global policy is used. + aliases: ["name"] + type: str + state: + description: State to ensure. + default: "present" + choices: ["absent", "present"] + type: str + maxpwdlife: + description: Maximum password lifetime (in days). + type: str + minpwdlife: + description: Minimum password lifetime (in hours). + type: str + historylength: + description: + - Number of previous passwords that are remembered. + - Users cannot reuse remembered passwords. + type: str + minclasses: + description: Minimum number of character classes. + type: str + minlength: + description: Minimum password length. + type: str + priority: + description: + - Priority of the policy. + - High number means lower priority. + - Required when C(cn) is not the global policy. + type: str + maxfailcount: + description: Maximum number of consecutive failures before lockout. + type: str + failinterval: + description: Period (in seconds) after which the number of failed login attempts is reset. + type: str + lockouttime: + description: Period (in seconds) for which users are locked out. + type: str + gracelimit: + description: Maximum number of LDAP logins after password expiration. + type: int + version_added: 8.2.0 + maxrepeat: + description: Maximum number of allowed same consecutive characters in the new password. + type: int + version_added: 8.2.0 + maxsequence: + description: Maximum length of monotonic character sequences in the new password. An example of a monotonic sequence of + length 5 is V(12345). + type: int + version_added: 8.2.0 + dictcheck: + description: Check whether the password (with possible modifications) matches a word in a dictionary (using cracklib). + type: bool + version_added: 8.2.0 + usercheck: + description: Check whether the password (with possible modifications) contains the user name in some form (if the name + has > 3 characters). + type: bool + version_added: 8.2.0 +extends_documentation_fragment: + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Modify the global password policy + community.general.ipa_pwpolicy: + maxpwdlife: '90' + minpwdlife: '1' + historylength: '8' + minclasses: '3' + minlength: '16' + maxfailcount: '6' + failinterval: '60' + lockouttime: '600' + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure the password policy for the group admins is present + community.general.ipa_pwpolicy: + group: admins + state: present + maxpwdlife: '60' + minpwdlife: '24' + historylength: '16' + minclasses: '4' + priority: '10' + minlength: '6' + maxfailcount: '4' + failinterval: '600' + lockouttime: '1200' + gracelimit: 3 + maxrepeat: 3 + maxsequence: 3 + dictcheck: true + usercheck: true + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure that the group sysops does not have a unique password policy + community.general.ipa_pwpolicy: + group: sysops + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +""" + +RETURN = r""" +pwpolicy: + description: Password policy as returned by IPA API. + returned: always + type: dict + sample: + cn: ['admins'] + cospriority: ['10'] + dn: 'cn=admins,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com' + krbmaxpwdlife: ['60'] + krbminpwdlife: ['24'] + krbpwdfailurecountinterval: ['600'] + krbpwdhistorylength: ['16'] + krbpwdlockoutduration: ['1200'] + krbpwdmaxfailure: ['4'] + krbpwdmindiffchars: ['4'] + objectclass: ['top', 'nscontainer', 'krbpwdpolicy'] +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils.common.text.converters import to_native + + +class PwPolicyIPAClient(IPAClient): + '''The global policy will be selected when `name` is `None`''' + def __init__(self, module, host, port, protocol): + super(PwPolicyIPAClient, self).__init__(module, host, port, protocol) + + def pwpolicy_find(self, name): + if name is None: + # Manually set the cn to the global policy because pwpolicy_find will return a random + # different policy if cn is `None` + name = 'global_policy' + return self._post_json(method='pwpolicy_find', name=None, item={'all': True, 'cn': name}) + + def pwpolicy_add(self, name, item): + return self._post_json(method='pwpolicy_add', name=name, item=item) + + def pwpolicy_mod(self, name, item): + return self._post_json(method='pwpolicy_mod', name=name, item=item) + + def pwpolicy_del(self, name): + return self._post_json(method='pwpolicy_del', name=name) + + +def get_pwpolicy_dict(maxpwdlife=None, minpwdlife=None, historylength=None, minclasses=None, + minlength=None, priority=None, maxfailcount=None, failinterval=None, + lockouttime=None, gracelimit=None, maxrepeat=None, maxsequence=None, dictcheck=None, usercheck=None): + pwpolicy = {} + pwpolicy_options = { + 'krbmaxpwdlife': maxpwdlife, + 'krbminpwdlife': minpwdlife, + 'krbpwdhistorylength': historylength, + 'krbpwdmindiffchars': minclasses, + 'krbpwdminlength': minlength, + 'cospriority': priority, + 'krbpwdmaxfailure': maxfailcount, + 'krbpwdfailurecountinterval': failinterval, + 'krbpwdlockoutduration': lockouttime, + 'passwordgracelimit': gracelimit, + 'ipapwdmaxrepeat': maxrepeat, + 'ipapwdmaxsequence': maxsequence, + } + + pwpolicy_boolean_options = { + 'ipapwddictcheck': dictcheck, + 'ipapwdusercheck': usercheck, + } + + for option, value in pwpolicy_options.items(): + if value is not None: + pwpolicy[option] = to_native(value) + + for option, value in pwpolicy_boolean_options.items(): + if value is not None: + pwpolicy[option] = bool(value) + + return pwpolicy + + +def get_pwpolicy_diff(client, ipa_pwpolicy, module_pwpolicy): + return client.get_diff(ipa_data=ipa_pwpolicy, module_data=module_pwpolicy) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['group'] + + module_pwpolicy = get_pwpolicy_dict(maxpwdlife=module.params.get('maxpwdlife'), + minpwdlife=module.params.get('minpwdlife'), + historylength=module.params.get('historylength'), + minclasses=module.params.get('minclasses'), + minlength=module.params.get('minlength'), + priority=module.params.get('priority'), + maxfailcount=module.params.get('maxfailcount'), + failinterval=module.params.get('failinterval'), + lockouttime=module.params.get('lockouttime'), + gracelimit=module.params.get('gracelimit'), + maxrepeat=module.params.get('maxrepeat'), + maxsequence=module.params.get('maxsequence'), + dictcheck=module.params.get('dictcheck'), + usercheck=module.params.get('usercheck'), + ) + + ipa_pwpolicy = client.pwpolicy_find(name=name) + + changed = False + if state == 'present': + if not ipa_pwpolicy: + changed = True + if not module.check_mode: + ipa_pwpolicy = client.pwpolicy_add(name=name, item=module_pwpolicy) + else: + diff = get_pwpolicy_diff(client, ipa_pwpolicy, module_pwpolicy) + if len(diff) > 0: + changed = True + if not module.check_mode: + ipa_pwpolicy = client.pwpolicy_mod(name=name, item=module_pwpolicy) + else: + if ipa_pwpolicy: + changed = True + if not module.check_mode: + client.pwpolicy_del(name=name) + + return changed, ipa_pwpolicy + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(group=dict(type='str', aliases=['name']), + state=dict(type='str', default='present', choices=['present', 'absent']), + maxpwdlife=dict(type='str'), + minpwdlife=dict(type='str'), + historylength=dict(type='str'), + minclasses=dict(type='str'), + minlength=dict(type='str'), + priority=dict(type='str'), + maxfailcount=dict(type='str'), + failinterval=dict(type='str'), + lockouttime=dict(type='str'), + gracelimit=dict(type='int'), + maxrepeat=dict(type='int'), + maxsequence=dict(type='int'), + dictcheck=dict(type='bool'), + usercheck=dict(type='bool'), + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = PwPolicyIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, pwpolicy = ensure(module, client) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, pwpolicy=pwpolicy) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_role.py b/plugins/modules/ipa_role.py similarity index 77% rename from plugins/modules/identity/ipa/ipa_role.py rename to plugins/modules/ipa_role.py index c602614ef9..130036ebd1 100644 --- a/plugins/modules/identity/ipa/ipa_role.py +++ b/plugins/modules/ipa_role.py @@ -1,68 +1,71 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_role author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA role description: -- Add, modify and delete a role within FreeIPA server using FreeIPA API. + - Add, modify and delete a role within FreeIPA server using FreeIPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none options: cn: description: - - Role name. - - Can not be changed as it is the unique identifier. + - Role name. + - Can not be changed as it is the unique identifier. required: true aliases: ['name'] type: str description: description: - - A description of this role-group. + - A description of this role-group. type: str group: description: - - List of group names assign to this role. - - If an empty list is passed all assigned groups will be unassigned from the role. - - If option is omitted groups will not be checked or changed. - - If option is passed all assigned groups that are not passed will be unassigned from the role. + - List of group names assign to this role. + - If an empty list is passed all assigned groups are unassigned from the role. + - If option is omitted groups are not checked nor changed. + - If option is passed all assigned groups that are not passed are unassigned from the role. type: list elements: str host: description: - - List of host names to assign. - - If an empty list is passed all assigned hosts will be unassigned from the role. - - If option is omitted hosts will not be checked or changed. - - If option is passed all assigned hosts that are not passed will be unassigned from the role. + - List of host names to assign. + - If an empty list is passed all assigned hosts are unassigned from the role. + - If option is omitted hosts are not checked nor changed. + - If option is passed all assigned hosts that are not passed are unassigned from the role. type: list elements: str hostgroup: description: - - List of host group names to assign. - - If an empty list is passed all assigned host groups will be removed from the role. - - If option is omitted host groups will not be checked or changed. - - If option is passed all assigned hostgroups that are not passed will be unassigned from the role. + - List of host group names to assign. + - If an empty list is passed all assigned host groups are removed from the role. + - If option is omitted host groups are not checked nor changed. + - If option is passed all assigned hostgroups that are not passed are unassigned from the role. type: list elements: str privilege: description: - - List of privileges granted to the role. - - If an empty list is passed all assigned privileges will be removed. - - If option is omitted privileges will not be checked or changed. - - If option is passed all assigned privileges that are not passed will be removed. + - List of privileges granted to the role. + - If an empty list is passed all assigned privileges are removed. + - If option is omitted privileges are not checked nor changed. + - If option is passed all assigned privileges that are not passed are removed. type: list elements: str service: description: - - List of service names to assign. - - If an empty list is passed all assigned services will be removed from the role. - - If option is omitted services will not be checked or changed. - - If option is passed all assigned services that are not passed will be removed from the role. + - List of service names to assign. + - If an empty list is passed all assigned services are removed from the role. + - If option is omitted services are not checked nor changed. + - If option is passed all assigned services that are not passed are removed from the role. type: list elements: str state: @@ -72,25 +75,26 @@ options: type: str user: description: - - List of user names to assign. - - If an empty list is passed all assigned users will be removed from the role. - - If option is omitted users will not be checked or changed. + - List of user names to assign. + - If an empty list is passed all assigned users are removed from the role. + - If option is omitted users are not checked nor changed. type: list elements: str extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure role is present community.general.ipa_role: name: dba description: Database Administrators state: present user: - - pinky - - brain + - pinky + - brain ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret @@ -100,16 +104,16 @@ EXAMPLES = r''' name: another-role description: Just another role group: - - editors + - editors host: - - host01.example.com + - host01.example.com hostgroup: - - hostgroup01 + - hostgroup01 privilege: - - Group Administrators - - User Administrators + - Group Administrators + - User Administrators service: - - service01 + - service01 - name: Ensure role is absent community.general.ipa_role: @@ -118,14 +122,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" role: description: Role as returned by IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/identity/ipa/ipa_service.py b/plugins/modules/ipa_service.py similarity index 84% rename from plugins/modules/identity/ipa/ipa_service.py rename to plugins/modules/ipa_service.py index 63c6d8216d..089d49fc88 100644 --- a/plugins/modules/identity/ipa/ipa_service.py +++ b/plugins/modules/ipa_service.py @@ -1,41 +1,44 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_service author: Cédric Parent (@cprh) short_description: Manage FreeIPA service description: -- Add and delete an IPA service using IPA API. + - Add and delete an IPA service using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none options: krbcanonicalname: description: - - Principal of the service. - - Can not be changed as it is the unique identifier. + - Principal of the service. + - Can not be changed as it is the unique identifier. required: true aliases: ["name"] type: str hosts: description: - - Defines the list of 'ManagedBy' hosts. + - Defines the list of C(ManagedBy) hosts. required: false type: list elements: str force: description: - - Force principal name even if host is not in DNS. + - Force principal name even if host is not in DNS. required: false type: bool skip_host_check: description: - - Force service to be created even when host object does not exist to manage it. - - This is only used on creation, not for updating existing services. + - Force service to be created even when host object does not exist to manage it. + - This is only used on creation, not for updating existing services. required: false type: bool default: false @@ -47,11 +50,12 @@ options: choices: ["absent", "present"] type: str extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure service is present community.general.ipa_service: name: http/host01.example.com @@ -72,19 +76,19 @@ EXAMPLES = r''' community.general.ipa_service: name: http/host01.example.com hosts: - - host01.example.com - - host02.example.com + - host01.example.com + - host02.example.com ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" service: description: Service as returned by IPA API. returned: always type: dict -''' +""" import traceback @@ -192,10 +196,10 @@ def main(): argument_spec = ipa_argument_spec() argument_spec.update( krbcanonicalname=dict(type='str', required=True, aliases=['name']), - force=dict(type='bool', required=False), - skip_host_check=dict(type='bool', default=False, required=False), - hosts=dict(type='list', required=False, elements='str'), - state=dict(type='str', required=False, default='present', + force=dict(type='bool'), + skip_host_check=dict(type='bool', default=False), + hosts=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, diff --git a/plugins/modules/identity/ipa/ipa_subca.py b/plugins/modules/ipa_subca.py similarity index 88% rename from plugins/modules/identity/ipa/ipa_subca.py rename to plugins/modules/ipa_subca.py index 387d63c513..1442f9d7ea 100644 --- a/plugins/modules/identity/ipa/ipa_subca.py +++ b/plugins/modules/ipa_subca.py @@ -1,48 +1,52 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_subca author: Abhijeet Kasurde (@Akasurde) -short_description: Manage FreeIPA Lightweight Sub Certificate Authorities. +short_description: Manage FreeIPA Lightweight Sub Certificate Authorities description: -- Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API. + - Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none options: subca_name: description: - - The Sub Certificate Authority name which needs to be managed. + - The Sub Certificate Authority name which needs to be managed. required: true aliases: ["name"] type: str subca_subject: description: - - The Sub Certificate Authority's Subject. e.g., 'CN=SampleSubCA1,O=testrelm.test'. + - The Sub Certificate Authority's Subject, for example V(CN=SampleSubCA1,O=testrelm.test). required: true type: str subca_desc: description: - - The Sub Certificate Authority's description. + - The Sub Certificate Authority's description. type: str state: description: - - State to ensure. - - State 'disable' and 'enable' is available for FreeIPA 4.4.2 version and onwards. + - State to ensure. + - States V(disable) and V(enable) are available for FreeIPA 4.4.2 version and onwards. required: false default: present choices: ["absent", "disabled", "enabled", "present"] type: str extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure IPA Sub CA is present community.general.ipa_subca: ipa_host: spider.example.com @@ -65,14 +69,14 @@ EXAMPLES = ''' ipa_pass: Passw0rd! state: disable subca_name: AnsibleSubCA1 -''' +""" -RETURN = r''' +RETURN = r""" subca: description: IPA Sub CA record as returned by IPA API. returned: always type: dict -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec diff --git a/plugins/modules/identity/ipa/ipa_sudocmd.py b/plugins/modules/ipa_sudocmd.py similarity index 85% rename from plugins/modules/identity/ipa/ipa_sudocmd.py rename to plugins/modules/ipa_sudocmd.py index d75aff44ce..1aabeb07a3 100644 --- a/plugins/modules/identity/ipa/ipa_sudocmd.py +++ b/plugins/modules/ipa_sudocmd.py @@ -1,28 +1,31 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_sudocmd author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA sudo command description: -- Add, modify or delete sudo command within FreeIPA server using FreeIPA API. + - Add, modify or delete sudo command within FreeIPA server using FreeIPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none options: sudocmd: description: - - Sudo command. + - Sudo command. aliases: ['name'] required: true type: str description: description: - - A description of this command. + - A description of this command. type: str state: description: State to ensure. @@ -30,11 +33,12 @@ options: choices: ['absent', 'disabled', 'enabled', 'present'] type: str extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure sudo command exists community.general.ipa_sudocmd: name: su @@ -50,14 +54,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" sudocmd: - description: Sudo command as return from IPA API + description: Sudo command as return from IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/identity/ipa/ipa_sudocmdgroup.py b/plugins/modules/ipa_sudocmdgroup.py similarity index 85% rename from plugins/modules/identity/ipa/ipa_sudocmdgroup.py rename to plugins/modules/ipa_sudocmdgroup.py index 65fdd4f75f..af3f4c9547 100644 --- a/plugins/modules/identity/ipa/ipa_sudocmdgroup.py +++ b/plugins/modules/ipa_sudocmdgroup.py @@ -1,28 +1,31 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_sudocmdgroup author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA sudo command group description: -- Add, modify or delete sudo command group within IPA server using IPA API. + - Add, modify or delete sudo command group within IPA server using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none options: cn: description: - - Sudo Command Group. + - Sudo Command Group. aliases: ['name'] required: true type: str description: description: - - Group description. + - Group description. type: str state: description: State to ensure. @@ -31,23 +34,24 @@ options: type: str sudocmd: description: - - List of sudo commands to assign to the group. - - If an empty list is passed all assigned commands will be removed from the group. - - If option is omitted sudo commands will not be checked or changed. + - List of sudo commands to assign to the group. + - If an empty list is passed all assigned commands are removed from the group. + - If option is omitted sudo commands are not checked nor changed. type: list elements: str extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure sudo command group exists community.general.ipa_sudocmdgroup: name: group01 description: Group of important commands sudocmd: - - su + - su ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret @@ -59,14 +63,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" sudocmdgroup: - description: Sudo command group as returned by IPA API + description: Sudo command group as returned by IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/identity/ipa/ipa_sudorule.py b/plugins/modules/ipa_sudorule.py similarity index 77% rename from plugins/modules/identity/ipa/ipa_sudorule.py rename to plugins/modules/ipa_sudorule.py index 2054599f9d..96ea6bfa30 100644 --- a/plugins/modules/identity/ipa/ipa_sudorule.py +++ b/plugins/modules/ipa_sudorule.py @@ -1,82 +1,101 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_sudorule author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA sudo rule description: -- Add, modify or delete sudo rule within IPA server using IPA API. + - Add, modify or delete sudo rule within IPA server using IPA API. +attributes: + check_mode: + support: full + diff_mode: + support: none options: cn: description: - - Canonical name. - - Can not be changed as it is the unique identifier. + - Canonical name. + - Can not be changed as it is the unique identifier. required: true aliases: ['name'] type: str cmdcategory: description: - - Command category the rule applies to. + - Command category the rule applies to. choices: ['all'] type: str cmd: description: - - List of commands assigned to the rule. - - If an empty list is passed all commands will be removed from the rule. - - If option is omitted commands will not be checked or changed. + - List of commands assigned to the rule. + - If an empty list is passed all commands are removed from the rule. + - If option is omitted commands are not checked nor changed. type: list elements: str cmdgroup: description: - - List of command groups assigned to the rule. - - If an empty list is passed all command groups will be removed from the rule. - - If option is omitted command groups will not be checked or changed. + - List of command groups assigned to the rule. + - If an empty list is passed all command groups are removed from the rule. + - If option is omitted command groups are not checked nor changed. type: list elements: str version_added: 2.0.0 + deny_cmd: + description: + - List of denied commands assigned to the rule. + - If an empty list is passed all commands are removed from the rule. + - If option is omitted commands are not checked nor changed. + type: list + elements: str + version_added: 8.1.0 + deny_cmdgroup: + description: + - List of denied command groups assigned to the rule. + - If an empty list is passed all command groups are removed from the rule. + - If option is omitted command groups are not checked nor changed. + type: list + elements: str + version_added: 8.1.0 description: description: - - Description of the sudo rule. + - Description of the sudo rule. type: str host: description: - - List of hosts assigned to the rule. - - If an empty list is passed all hosts will be removed from the rule. - - If option is omitted hosts will not be checked or changed. - - Option C(hostcategory) must be omitted to assign hosts. + - List of hosts assigned to the rule. + - If an empty list is passed all hosts are removed from the rule. + - If option is omitted hosts are not checked nor changed. + - Option O(hostcategory) must be omitted to assign hosts. type: list elements: str hostcategory: description: - - Host category the rule applies to. - - If 'all' is passed one must omit C(host) and C(hostgroup). - - Option C(host) and C(hostgroup) must be omitted to assign 'all'. + - Host category the rule applies to. + - If V(all) is passed one must omit O(host) and O(hostgroup). + - Option O(host) and O(hostgroup) must be omitted to assign V(all). choices: ['all'] type: str hostgroup: description: - - List of host groups assigned to the rule. - - If an empty list is passed all host groups will be removed from the rule. - - If option is omitted host groups will not be checked or changed. - - Option C(hostcategory) must be omitted to assign host groups. + - List of host groups assigned to the rule. + - If an empty list is passed all host groups are removed from the rule. + - If option is omitted host groups are not checked nor changed. + - Option O(hostcategory) must be omitted to assign host groups. type: list elements: str runasextusers: description: - - List of external RunAs users + - List of external RunAs users. type: list elements: str version_added: 2.3.0 runasusercategory: description: - - RunAs User category the rule applies to. + - RunAs User category the rule applies to. choices: ['all'] type: str runasgroupcategory: @@ -91,21 +110,21 @@ options: elements: str user: description: - - List of users assigned to the rule. - - If an empty list is passed all users will be removed from the rule. - - If option is omitted users will not be checked or changed. + - List of users assigned to the rule. + - If an empty list is passed all users are removed from the rule. + - If option is omitted users are not checked nor changed. type: list elements: str usercategory: description: - - User category the rule applies to. + - User category the rule applies to. choices: ['all'] type: str usergroup: description: - - List of user groups assigned to the rule. - - If an empty list is passed all user groups will be removed from the rule. - - If option is omitted user groups will not be checked or changed. + - List of user groups assigned to the rule. + - If an empty list is passed all user groups are removed from the rule. + - If option is omitted user groups are not checked nor changed. type: list elements: str state: @@ -114,19 +133,21 @@ options: choices: ['absent', 'disabled', 'enabled', 'present'] type: str extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -''' - -EXAMPLES = r''' -- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked for a password. +EXAMPLES = r""" +- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked + for a password. community.general.ipa_sudorule: name: sudo_all_nopasswd cmdcategory: all description: Allow to run every command with sudo without password hostcategory: all sudoopt: - - '!authenticate' + - '!authenticate' usercategory: all ipa_host: ipa.example.com ipa_user: admin @@ -138,13 +159,13 @@ EXAMPLES = r''' description: Allow developers to run every command with sudo on all database server cmdcategory: all host: - - db01.example.com + - db01.example.com hostgroup: - - db-server + - db-server sudoopt: - - '!authenticate' + - '!authenticate' usergroup: - - developers + - developers ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret @@ -154,31 +175,32 @@ EXAMPLES = r''' name: sudo_operations_all description: Allow operators to run any commands that is part of operations-cmdgroup on any host as user root. cmdgroup: - - operations-cmdgroup + - operations-cmdgroup hostcategory: all runasextusers: - - root + - root sudoopt: - - '!authenticate' + - '!authenticate' usergroup: - - operators + - operators ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" sudorule: - description: Sudorule as returned by IPA + description: Sudorule as returned by IPA. returned: always type: dict -''' +""" import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion class SudoRuleIPAClient(IPAClient): @@ -239,6 +261,12 @@ class SudoRuleIPAClient(IPAClient): def sudorule_add_allow_command_group(self, name, item): return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmdgroup': item}) + def sudorule_add_deny_command(self, name, item): + return self._post_json(method='sudorule_add_deny_command', name=name, item={'sudocmd': item}) + + def sudorule_add_deny_command_group(self, name, item): + return self._post_json(method='sudorule_add_deny_command', name=name, item={'sudocmdgroup': item}) + def sudorule_remove_allow_command(self, name, item): return self._post_json(method='sudorule_remove_allow_command', name=name, item=item) @@ -296,6 +324,8 @@ def ensure(module, client): cmd = module.params['cmd'] cmdgroup = module.params['cmdgroup'] cmdcategory = module.params['cmdcategory'] + deny_cmd = module.params['deny_cmd'] + deny_cmdgroup = module.params['deny_cmdgroup'] host = module.params['host'] hostcategory = module.params['hostcategory'] hostgroup = module.params['hostgroup'] @@ -303,10 +333,17 @@ def ensure(module, client): runasgroupcategory = module.params['runasgroupcategory'] runasextusers = module.params['runasextusers'] + ipa_version = client.get_ipa_version() if state in ['present', 'enabled']: - ipaenabledflag = 'TRUE' + if LooseVersion(ipa_version) < LooseVersion('4.9.10'): + ipaenabledflag = 'TRUE' + else: + ipaenabledflag = True else: - ipaenabledflag = 'FALSE' + if LooseVersion(ipa_version) < LooseVersion('4.9.10'): + ipaenabledflag = 'FALSE' + else: + ipaenabledflag = False sudoopt = module.params['sudoopt'] user = module.params['user'] @@ -352,6 +389,16 @@ def ensure(module, client): if not module.check_mode: client.sudorule_add_allow_command_group(name=name, item=cmdgroup) + if deny_cmd is not None: + changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed + if not module.check_mode: + client.sudorule_add_deny_command(name=name, item=deny_cmd) + + if deny_cmdgroup is not None: + changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed + if not module.check_mode: + client.sudorule_add_deny_command_group(name=name, item=deny_cmdgroup) + if runasusercategory is not None: changed = category_changed(module, client, 'iparunasusercategory', ipa_sudorule) or changed @@ -426,6 +473,8 @@ def main(): cmdgroup=dict(type='list', elements='str'), cmdcategory=dict(type='str', choices=['all']), cn=dict(type='str', required=True, aliases=['name']), + deny_cmd=dict(type='list', elements='str'), + deny_cmdgroup=dict(type='list', elements='str'), description=dict(type='str'), host=dict(type='list', elements='str'), hostcategory=dict(type='str', choices=['all']), @@ -440,7 +489,9 @@ def main(): runasextusers=dict(type='list', elements='str')) module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['cmdcategory', 'cmd'], + ['cmdcategory', 'deny_cmd'], ['cmdcategory', 'cmdgroup'], + ['cmdcategory', 'deny_cmdgroup'], ['hostcategory', 'host'], ['hostcategory', 'hostgroup'], ['usercategory', 'user'], diff --git a/plugins/modules/identity/ipa/ipa_user.py b/plugins/modules/ipa_user.py similarity index 83% rename from plugins/modules/identity/ipa/ipa_user.py rename to plugins/modules/ipa_user.py index 8a7b3abea2..4fbef766c5 100644 --- a/plugins/modules/identity/ipa/ipa_user.py +++ b/plugins/modules/ipa_user.py @@ -1,60 +1,67 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_user author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA users description: -- Add, modify and delete user within IPA server. + - Add, modify and delete user within IPA server. +attributes: + check_mode: + support: full + diff_mode: + support: none options: displayname: description: Display name. type: str update_password: description: - - Set password for a user. + - Set password for a user. type: str default: 'always' - choices: [ always, on_create ] + choices: [always, on_create] givenname: - description: First name. + description: + - First name. + - If user does not exist and O(state=present), the usage of O(givenname) is required. type: str krbpasswordexpiration: description: - - Date at which the user password will expire. - - In the format YYYYMMddHHmmss. - - e.g. 20180121182022 will expire on 21 January 2018 at 18:20:22. + - Date at which the user password expires. + - In the format YYYYMMddHHmmss. + - For example V(20180121182022) expires on 21 January 2018 at 18:20:22. type: str loginshell: description: Login shell. type: str mail: description: - - List of mail addresses assigned to the user. - - If an empty list is passed all assigned email addresses will be deleted. - - If None is passed email addresses will not be checked or changed. + - List of mail addresses assigned to the user. + - If an empty list is passed all assigned email addresses are deleted. + - If None is passed email addresses are not checked nor changed. type: list elements: str password: description: - - Password for a user. - - Will not be set for an existing user unless I(update_password=always), which is the default. + - Password for a user. + - It is not set for an existing user unless O(update_password=always), which is the default. type: str sn: - description: Surname. + description: + - Surname. + - If user does not exist and O(state=present), the usage of O(sn) is required. type: str sshpubkey: description: - - List of public SSH key. - - If an empty list is passed all assigned public keys will be deleted. - - If None is passed SSH public keys will not be checked or changed. + - List of public SSH key. + - If an empty list is passed all assigned public keys are deleted. + - If None is passed SSH public keys are not checked nor changed. type: list elements: str state: @@ -64,48 +71,52 @@ options: type: str telephonenumber: description: - - List of telephone numbers assigned to the user. - - If an empty list is passed all assigned telephone numbers will be deleted. - - If None is passed telephone numbers will not be checked or changed. + - List of telephone numbers assigned to the user. + - If an empty list is passed all assigned telephone numbers are deleted. + - If None is passed telephone numbers are not checked nor changed. type: list elements: str title: description: Title. type: str uid: - description: uid of the user. + description: Uid of the user. required: true aliases: ["name"] type: str uidnumber: description: - - Account Settings UID/Posix User ID number. + - Account Settings UID/Posix User ID number. type: str gidnumber: description: - - Posix Group ID. + - Posix Group ID. type: str homedirectory: description: - - Default home directory of the user. + - Default home directory of the user. type: str version_added: '0.2.0' userauthtype: description: - - The authentication type to use for the user. - choices: ["password", "radius", "otp", "pkinit", "hardened"] + - The authentication type to use for the user. + - To remove all authentication types from the user, use an empty list V([]). + - The choice V(idp) and V(passkey) has been added in community.general 8.1.0. + choices: ["password", "radius", "otp", "pkinit", "hardened", "idp", "passkey"] type: list elements: str version_added: '1.2.0' extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes requirements: -- base64 -- hashlib -''' + - base64 + - hashlib +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure pinky is present and always reset password community.general.ipa_user: name: pinky @@ -114,12 +125,12 @@ EXAMPLES = r''' givenname: Pinky sn: Acme mail: - - pinky@acme.com + - pinky@acme.com telephonenumber: - - '+555123456' + - '+555123456' sshpubkey: - - ssh-rsa .... - - ssh-dsa .... + - ssh-rsa .... + - ssh-dsa .... uidnumber: '1001' gidnumber: '100' homedirectory: /home/pinky @@ -157,14 +168,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" user: - description: User as returned by IPA API + description: User as returned by IPA API. returned: always type: dict -''' +""" import base64 import hashlib @@ -256,7 +267,7 @@ def get_user_diff(client, ipa_user, module_user): if 'sshpubkeyfp' in ipa_user and ipa_user['sshpubkeyfp'][0][:7].upper() == 'SHA256:': hash_algo = 'sha256' module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey, hash_algo) for pubkey in module_user['ipasshpubkey']] - # Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on + # Remove the ipasshpubkey element as it is not returned from IPA but save its value to be used later on sshpubkey = module_user['ipasshpubkey'] del module_user['ipasshpubkey'] @@ -367,7 +378,7 @@ def main(): title=dict(type='str'), homedirectory=dict(type='str'), userauthtype=dict(type='list', elements='str', - choices=['password', 'radius', 'otp', 'pkinit', 'hardened'])) + choices=['password', 'radius', 'otp', 'pkinit', 'hardened', 'idp', 'passkey'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) diff --git a/plugins/modules/identity/ipa/ipa_vault.py b/plugins/modules/ipa_vault.py similarity index 74% rename from plugins/modules/identity/ipa/ipa_vault.py rename to plugins/modules/ipa_vault.py index 7a6a601fa9..54cbdce235 100644 --- a/plugins/modules/identity/ipa/ipa_vault.py +++ b/plugins/modules/ipa_vault.py @@ -1,83 +1,87 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Juan Manuel Parrilla -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Juan Manuel Parrilla +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipa_vault author: Juan Manuel Parrilla (@jparrill) short_description: Manage FreeIPA vaults description: -- Add, modify and delete vaults and secret vaults. -- KRA service should be enabled to use this module. + - Add, modify and delete vaults and secret vaults. + - KRA service should be enabled to use this module. +attributes: + check_mode: + support: full + diff_mode: + support: none options: - cn: - description: - - Vault name. - - Can not be changed as it is the unique identifier. - required: true - aliases: ["name"] - type: str + cn: description: - description: - - Description. - type: str - ipavaulttype: - description: - - Vault types are based on security level. - default: "symmetric" - choices: ["asymmetric", "standard", "symmetric"] - aliases: ["vault_type"] - type: str - ipavaultpublickey: - description: - - Public key. - aliases: ["vault_public_key"] - type: str - ipavaultsalt: - description: - - Vault Salt. - aliases: ["vault_salt"] - type: str - username: - description: - - Any user can own one or more user vaults. - - Mutually exclusive with service. - aliases: ["user"] - type: list - elements: str - service: - description: - - Any service can own one or more service vaults. - - Mutually exclusive with user. - type: str - state: - description: - - State to ensure. - default: "present" - choices: ["absent", "present"] - type: str - replace: - description: - - Force replace the existant vault on IPA server. - type: bool - default: False - choices: ["True", "False"] - validate_certs: - description: - - Validate IPA server certificates. - type: bool - default: true + - Vault name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + type: str + description: + description: + - Description. + type: str + ipavaulttype: + description: + - Vault types are based on security level. + default: "symmetric" + choices: ["asymmetric", "standard", "symmetric"] + aliases: ["vault_type"] + type: str + ipavaultpublickey: + description: + - Public key. + aliases: ["vault_public_key"] + type: str + ipavaultsalt: + description: + - Vault Salt. + aliases: ["vault_salt"] + type: str + username: + description: + - Any user can own one or more user vaults. + - Mutually exclusive with O(service). + aliases: ["user"] + type: list + elements: str + service: + description: + - Any service can own one or more service vaults. + - Mutually exclusive with O(user). + type: str + state: + description: + - State to ensure. + default: "present" + choices: ["absent", "present"] + type: str + replace: + description: + - Force replace the existent vault on IPA server. + type: bool + default: false + choices: ["True", "False"] + validate_certs: + description: + - Validate IPA server certificates. + type: bool + default: true extends_documentation_fragment: -- community.general.ipa.documentation + - community.general.ipa.documentation + - community.general.ipa.connection_notes + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure vault is present community.general.ipa_vault: name: vault01 @@ -86,7 +90,6 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret - validate_certs: false - name: Ensure vault is present for Admin user community.general.ipa_vault: @@ -114,7 +117,7 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret - replace: True + replace: true - name: Get vault info if already exists community.general.ipa_vault: @@ -122,14 +125,14 @@ EXAMPLES = r''' ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret -''' +""" -RETURN = r''' +RETURN = r""" vault: - description: Vault as returned by IPA API + description: Vault as returned by IPA API. returned: always type: dict -''' +""" import traceback diff --git a/plugins/modules/ipbase_info.py b/plugins/modules/ipbase_info.py new file mode 100644 index 0000000000..e2d73333fa --- /dev/null +++ b/plugins/modules/ipbase_info.py @@ -0,0 +1,300 @@ +#!/usr/bin/python +# +# Copyright (c) 2023, Dominik Kukacka +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: "ipbase_info" +version_added: "7.0.0" +short_description: "Retrieve IP geolocation and other facts of a host's IP address using the ipbase.com API" +description: + - Retrieve IP geolocation and other facts of a host's IP address using the ipbase.com API. +author: "Dominik Kukacka (@dominikkukacka)" +extends_documentation_fragment: + - "community.general.attributes" + - "community.general.attributes.info_module" +options: + ip: + description: + - The IP you want to get the info for. If not specified the API detects the IP automatically. + required: false + type: str + apikey: + description: + - The API key for the request if you need more requests. + required: false + type: str + hostname: + description: + - If the O(hostname) parameter is set to V(true), the API response contains the hostname of the IP. + required: false + type: bool + default: false + language: + description: + - An ISO Alpha 2 Language Code for localizing the IP data. + required: false + type: str + default: "en" +notes: + - Check U(https://ipbase.com/) for more information. +""" + +EXAMPLES = r""" +- name: "Get IP geolocation information of the primary outgoing IP" + community.general.ipbase_info: + register: my_ip_info + +- name: "Get IP geolocation information of a specific IP" + community.general.ipbase_info: + ip: "8.8.8.8" + register: my_ip_info + + +- name: "Get IP geolocation information of a specific IP with all other possible parameters" + community.general.ipbase_info: + ip: "8.8.8.8" + apikey: "xxxxxxxxxxxxxxxxxxxxxx" + hostname: true + language: "de" + register: my_ip_info +""" + +RETURN = r""" +data: + description: "JSON parsed response from ipbase.com. Please refer to U(https://ipbase.com/docs/info) for the detailed structure + of the response." + returned: success + type: dict + sample: + { + "ip": "1.1.1.1", + "hostname": "one.one.one.one", + "type": "v4", + "range_type": { + "type": "PUBLIC", + "description": "Public address" + }, + "connection": { + "asn": 13335, + "organization": "Cloudflare, Inc.", + "isp": "APNIC Research and Development", + "range": "1.1.1.1/32" + }, + "location": { + "geonames_id": 5332870, + "latitude": 34.053611755371094, + "longitude": -118.24549865722656, + "zip": "90012", + "continent": { + "code": "NA", + "name": "North America", + "name_translated": "North America" + }, + "country": { + "alpha2": "US", + "alpha3": "USA", + "calling_codes": [ + "+1" + ], + "currencies": [ + { + "symbol": "$", + "name": "US Dollar", + "symbol_native": "$", + "decimal_digits": 2, + "rounding": 0, + "code": "USD", + "name_plural": "US dollars" + } + ], + "emoji": "...", + "ioc": "USA", + "languages": [ + { + "name": "English", + "name_native": "English" + } + ], + "name": "United States", + "name_translated": "United States", + "timezones": [ + "America/New_York", + "America/Detroit", + "America/Kentucky/Louisville", + "America/Kentucky/Monticello", + "America/Indiana/Indianapolis", + "America/Indiana/Vincennes", + "America/Indiana/Winamac", + "America/Indiana/Marengo", + "America/Indiana/Petersburg", + "America/Indiana/Vevay", + "America/Chicago", + "America/Indiana/Tell_City", + "America/Indiana/Knox", + "America/Menominee", + "America/North_Dakota/Center", + "America/North_Dakota/New_Salem", + "America/North_Dakota/Beulah", + "America/Denver", + "America/Boise", + "America/Phoenix", + "America/Los_Angeles", + "America/Anchorage", + "America/Juneau", + "America/Sitka", + "America/Metlakatla", + "America/Yakutat", + "America/Nome", + "America/Adak", + "Pacific/Honolulu" + ], + "is_in_european_union": false, + "fips": "US", + "geonames_id": 6252001, + "hasc_id": "US", + "wikidata_id": "Q30" + }, + "city": { + "fips": "644000", + "alpha2": null, + "geonames_id": 5368753, + "hasc_id": null, + "wikidata_id": "Q65", + "name": "Los Angeles", + "name_translated": "Los Angeles" + }, + "region": { + "fips": "US06", + "alpha2": "US-CA", + "geonames_id": 5332921, + "hasc_id": "US.CA", + "wikidata_id": "Q99", + "name": "California", + "name_translated": "California" + } + }, + "tlds": [ + ".us" + ], + "timezone": { + "id": "America/Los_Angeles", + "current_time": "2023-05-04T04:30:28-07:00", + "code": "PDT", + "is_daylight_saving": true, + "gmt_offset": -25200 + }, + "security": { + "is_anonymous": false, + "is_datacenter": false, + "is_vpn": false, + "is_bot": false, + "is_abuser": true, + "is_known_attacker": true, + "is_proxy": false, + "is_spam": false, + "is_tor": false, + "is_icloud_relay": false, + "threat_score": 100 + }, + "domains": { + "count": 10943, + "domains": [ + "eliwise.academy", + "accountingprose.academy", + "pistola.academy", + "1and1-test-ntlds-fr.accountant", + "omnergy.africa" + ] + } + } +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from urllib.parse import urlencode + + +USER_AGENT = 'ansible-community.general.ipbase_info/0.1.0' +BASE_URL = 'https://api.ipbase.com/v2/info' + + +class IpbaseInfo(object): + + def __init__(self, module): + self.module = module + + def _get_url_data(self, url): + response, info = fetch_url( + self.module, + url, + force=True, + timeout=10, + headers={ + 'Accept': 'application/json', + 'User-Agent': USER_AGENT, + }) + + if info['status'] != 200: + self.module.fail_json(msg='The API request to ipbase.com returned an error status code {0}'.format(info['status'])) + else: + try: + content = response.read() + result = self.module.from_json(content.decode('utf8')) + except ValueError: + self.module.fail_json( + msg='Failed to parse the ipbase.com response: ' + '{0} {1}'.format(url, content)) + else: + return result + + def info(self): + + ip = self.module.params['ip'] + apikey = self.module.params['apikey'] + hostname = self.module.params['hostname'] + language = self.module.params['language'] + + url = BASE_URL + + params = {} + if ip: + params['ip'] = ip + + if apikey: + params['apikey'] = apikey + + if hostname: + params['hostname'] = 1 + + if language: + params['language'] = language + + if params: + url += '?' + urlencode(params) + + return self._get_url_data(url) + + +def main(): + module_args = dict( + ip=dict(type='str', no_log=False), + apikey=dict(type='str', no_log=True), + hostname=dict(type='bool', no_log=False, default=False), + language=dict(type='str', no_log=False, default='en'), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + ipbase = IpbaseInfo(module) + module.exit_json(**ipbase.info()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/ipify_facts.py b/plugins/modules/ipify_facts.py similarity index 78% rename from plugins/modules/net_tools/ipify_facts.py rename to plugins/modules/ipify_facts.py index 2ae0348cb1..73a94db2c7 100644 --- a/plugins/modules/net_tools/ipify_facts.py +++ b/plugins/modules/ipify_facts.py @@ -1,26 +1,28 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: (c) 2015, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, René Moser +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ipify_facts short_description: Retrieve the public IP of your internet gateway description: - If behind NAT and need to know the public IP of your internet gateway. author: -- René Moser (@resmo) + - René Moser (@resmo) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module options: api_url: description: - URL of the ipify.org API service. - - C(?format=json) will be appended per default. + - C(?format=json) is appended by default. type: str default: https://api.ipify.org/ timeout: @@ -30,14 +32,14 @@ options: default: 10 validate_certs: description: - - When set to C(NO), SSL certificates will not be validated. + - When set to V(false), SSL certificates are not validated. type: bool - default: yes + default: true notes: - Visit https://www.ipify.org to get more information. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Gather IP facts from ipify.org - name: Get my public IP community.general.ipify_facts: @@ -47,16 +49,15 @@ EXAMPLES = r''' community.general.ipify_facts: api_url: http://api.example.com/ipify timeout: 20 -''' +""" -RETURN = r''' ---- +RETURN = r""" ipify_public_ip: description: Public IP of the internet gateway. returned: success type: str sample: 1.2.3.4 -''' +""" import json diff --git a/plugins/modules/net_tools/ipinfoio_facts.py b/plugins/modules/ipinfoio_facts.py similarity index 68% rename from plugins/modules/net_tools/ipinfoio_facts.py rename to plugins/modules/ipinfoio_facts.py index ee1d49f3ac..4d5d8b25a8 100644 --- a/plugins/modules/net_tools/ipinfoio_facts.py +++ b/plugins/modules/ipinfoio_facts.py @@ -1,82 +1,84 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Aleksei Kostiuk -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Aleksei Kostiuk +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ipinfoio_facts -short_description: "Retrieve IP geolocation facts of a host's IP address" +short_description: Retrieve IP geolocation facts of a host's IP address description: - - "Gather IP geolocation facts of a host's IP address using ipinfo.io API" + - Gather IP geolocation facts of a host's IP address using ipinfo.io API. author: "Aleksei Kostiuk (@akostyuk)" +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module options: timeout: description: - - HTTP connection timeout in seconds + - HTTP connection timeout in seconds. required: false default: 10 type: int http_agent: description: - - Set http user agent + - Set http user agent. required: false default: "ansible-ipinfoio-module/0.0.1" type: str notes: - - "Check http://ipinfo.io/ for more information" -''' + - Check U(http://ipinfo.io/) for more information. +""" -EXAMPLES = ''' +EXAMPLES = r""" # Retrieve geolocation data of a host's IP address - name: Get IP geolocation data community.general.ipinfoio_facts: -''' +""" -RETURN = ''' +RETURN = r""" ansible_facts: - description: "Dictionary of ip geolocation facts for a host's IP address" + description: "Dictionary of IP geolocation facts for a host's IP address." returned: changed type: complex contains: ip: - description: "Public IP address of a host" + description: "Public IP address of a host." type: str sample: "8.8.8.8" hostname: - description: Domain name + description: Domain name. type: str sample: "google-public-dns-a.google.com" country: - description: ISO 3166-1 alpha-2 country code + description: ISO 3166-1 alpha-2 country code. type: str sample: "US" region: - description: State or province name + description: State or province name. type: str sample: "California" city: - description: City name + description: City name. type: str sample: "Mountain View" loc: - description: Latitude and Longitude of the location + description: Latitude and Longitude of the location. type: str sample: "37.3860,-122.0838" org: - description: "organization's name" + description: "Organization's name." type: str sample: "AS3356 Level 3 Communications, Inc." postal: - description: Postal code + description: Postal code. type: str sample: "94035" -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/remote_management/ipmi/ipmi_boot.py b/plugins/modules/ipmi_boot.py similarity index 71% rename from plugins/modules/remote_management/ipmi/ipmi_boot.py rename to plugins/modules/ipmi_boot.py index f8cff0e7e0..30fcfb161d 100644 --- a/plugins/modules/remote_management/ipmi/ipmi_boot.py +++ b/plugins/modules/ipmi_boot.py @@ -1,23 +1,28 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ipmi_boot short_description: Management of order of boot devices description: - - Use this module to manage order of boot devices + - Use this module to manage order of boot devices. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - - Hostname or ip address of the BMC. + - Hostname or IP address of the BMC. required: true type: str port: @@ -43,15 +48,15 @@ options: version_added: 4.1.0 bootdev: description: - - Set boot device to use on next reboot - - "The choices for the device are: - - network -- Request network boot - - floppy -- Boot from floppy - - hd -- Boot from hard drive - - safe -- Boot from hard drive, requesting 'safe mode' - - optical -- boot from CD/DVD/BD drive - - setup -- Boot into setup utility - - default -- remove any IPMI directed boot device request" + - Set boot device to use on next reboot. + - 'The choices for the device are:' + - V(network) -- Request network boot. + - V(floppy) -- Boot from floppy. + - V(hd) -- Boot from hard drive. + - V(safe) -- Boot from hard drive, requesting 'safe mode'. + - V(optical) -- boot from CD/DVD/BD drive. + - V(setup) -- Boot into setup utility. + - V(default) -- remove any IPMI directed boot device request. required: true choices: - network @@ -65,50 +70,46 @@ options: state: description: - Whether to ensure that boot devices is desired. - - "The choices for the state are: - - present -- Request system turn on - - absent -- Request system turn on" + - 'The choices for the state are: - present -- Request system turn on - absent -- Request system turn on.' default: present - choices: [ present, absent ] + choices: [present, absent] type: str persistent: description: - - If set, ask that system firmware uses this device beyond next boot. - Be aware many systems do not honor this. + - If set, ask that system firmware uses this device beyond next boot. Be aware many systems do not honor this. type: bool - default: 'no' + default: false uefiboot: description: - - If set, request UEFI boot explicitly. - Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option. - In practice, this flag not being set does not preclude UEFI boot on any system I've encountered. + - If set, request UEFI boot explicitly. Strictly speaking, the spec suggests that if not set, the system should BIOS + boot and offers no "do not care" option. In practice, this flag not being set does not preclude UEFI boot on any system + I have encountered. type: bool - default: 'no' + default: false requirements: - - "python >= 2.6" - pyghmi author: "Bulat Gaifullin (@bgaifullin) " -''' +""" -RETURN = ''' +RETURN = r""" bootdev: - description: The boot device name which will be used beyond next boot. - returned: success - type: str - sample: default + description: The boot device name which is used beyond next boot. + returned: success + type: str + sample: default persistent: - description: If True, system firmware will use this device beyond next boot. - returned: success - type: bool - sample: false + description: If V(true), system firmware uses this device beyond next boot. + returned: success + type: bool + sample: false uefimode: - description: If True, system firmware will use UEFI boot explicitly beyond next boot. - returned: success - type: bool - sample: false -''' + description: If V(true), system firmware uses UEFI boot explicitly beyond next boot. + returned: success + type: bool + sample: false +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure bootdevice is HD community.general.ipmi_boot: name: test.testdomain.com @@ -124,7 +125,7 @@ EXAMPLES = ''' key: 1234567890AABBCCDEFF000000EEEE12 bootdev: network state: absent -''' +""" import traceback import binascii diff --git a/plugins/modules/remote_management/ipmi/ipmi_power.py b/plugins/modules/ipmi_power.py similarity index 75% rename from plugins/modules/remote_management/ipmi/ipmi_power.py rename to plugins/modules/ipmi_power.py index 9abf167f60..b88fba07be 100644 --- a/plugins/modules/remote_management/ipmi/ipmi_power.py +++ b/plugins/modules/ipmi_power.py @@ -1,23 +1,28 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ipmi_power short_description: Power management for machine description: - - Use this module for power management + - Use this module for power management. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - - Hostname or ip address of the BMC. + - Hostname or IP address of the BMC. required: true type: str port: @@ -44,13 +49,13 @@ options: state: description: - Whether to ensure that the machine in desired state. - - "The choices for state are: - - on -- Request system turn on - - off -- Request system turn off without waiting for OS to shutdown - - shutdown -- Have system request OS proper shutdown - - reset -- Request system reset without waiting for OS - - boot -- If system is off, then 'on', else 'reset'" - - Either this option or I(machine) is required. + - 'The choices for state are:' + - V(on) -- Request system turn on. + - V(off) -- Request system turn off without waiting for OS to shutdown. + - V(shutdown) -- Have system request OS proper shutdown. + - V(reset) -- Request system reset without waiting for OS. + - V(boot) -- If system is off, then V(on), else V(reset). + - Either this option or O(machine) is required. choices: ['on', 'off', shutdown, reset, boot] type: str timeout: @@ -60,9 +65,8 @@ options: type: int machine: description: - - Provide a list of the remote target address for the bridge IPMI request, - and the power status. - - Either this option or I(state) is required. + - Provide a list of the remote target address for the bridge IPMI request, and the power status. + - Either this option or O(state) is required. required: false type: list elements: dict @@ -75,63 +79,63 @@ options: required: true state: description: - - Whether to ensure that the machine specified by I(targetAddress) in desired state. - - If this option is not set, the power state is set by I(state). - - If both this option and I(state) are set, this option takes precedence over I(state). + - Whether to ensure that the machine specified by O(machine[].targetAddress) in desired state. + - If this option is not set, the power state is set by O(state). + - If both this option and O(state) are set, this option takes precedence over O(state). choices: ['on', 'off', shutdown, reset, boot] type: str requirements: - - "python >= 2.6" - pyghmi author: "Bulat Gaifullin (@bgaifullin) " -''' +""" -RETURN = ''' +RETURN = r""" powerstate: - description: The current power state of the machine. - returned: success and I(machine) is not provided - type: str - sample: on + description: The current power state of the machine. + returned: success and O(machine) is not provided + type: str + sample: 'on' status: - description: The current power state of the machine when the machine option is set. - returned: success and I(machine) is provided - type: list - elements: dict - version_added: 4.3.0 - contains: - powerstate: - description: The current power state of the machine specified by I(targetAddress). - type: str - targetAddress: - description: The remote target address. - type: int - sample: [ - { - "powerstate": "on", - "targetAddress": 48, - }, - { - "powerstate": "on", - "targetAddress": 50, - }, + description: The current power state of the machine when the machine option is set. + returned: success and O(machine) is provided + type: list + elements: dict + version_added: 4.3.0 + contains: + powerstate: + description: The current power state of the machine specified by RV(status[].targetAddress). + type: str + targetAddress: + description: The remote target address. + type: int + sample: + [ + { + "powerstate": "on", + "targetAddress": 48 + }, + { + "powerstate": "on", + "targetAddress": 50 + } ] -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure machine is powered on community.general.ipmi_power: name: test.testdomain.com user: admin password: password - state: on + state: 'on' - name: Ensure machines of which remote target address is 48 and 50 are powered off community.general.ipmi_power: name: test.testdomain.com user: admin password: password - state: off + state: 'off' machine: - targetAddress: 48 - targetAddress: 50 @@ -143,10 +147,10 @@ EXAMPLES = ''' password: password machine: - targetAddress: 48 - state: on + state: 'on' - targetAddress: 50 - state: off -''' + state: 'off' +""" import traceback import binascii diff --git a/plugins/modules/system/iptables_state.py b/plugins/modules/iptables_state.py similarity index 81% rename from plugins/modules/system/iptables_state.py rename to plugins/modules/iptables_state.py index 1f35edc04b..0119465007 100644 --- a/plugins/modules/system/iptables_state.py +++ b/plugins/modules/iptables_state.py @@ -1,68 +1,64 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, quidame -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, quidame +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: iptables_state short_description: Save iptables state into a file or restore it from a file version_added: '1.1.0' author: quidame (@quidame) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.flow description: - - C(iptables) is used to set up, maintain, and inspect the tables of IP - packet filter rules in the Linux kernel. - - This module handles the saving and/or loading of rules. This is the same - as the behaviour of the C(iptables-save) and C(iptables-restore) (or - C(ip6tables-save) and C(ip6tables-restore) for IPv6) commands which this - module uses internally. - - Modifying the state of the firewall remotely may lead to loose access to - the host in case of mistake in new ruleset. This module embeds a rollback - feature to avoid this, by telling the host to restore previous rules if a - cookie is still there after a given delay, and all this time telling the - controller to try to remove this cookie on the host through a new - connection. + - C(iptables) is used to set up, maintain, and inspect the tables of IP packet filter rules in the Linux kernel. + - This module handles the saving and/or loading of rules. This is the same as the behaviour of the C(iptables-save) and + C(iptables-restore) (or C(ip6tables-save) and C(ip6tables-restore) for IPv6) commands which this module uses internally. + - Modifying the state of the firewall remotely may lead to loose access to the host in case of mistake in new ruleset. This + module embeds a rollback feature to avoid this, by telling the host to restore previous rules if a cookie is still there + after a given delay, and all this time telling the controller to try to remove this cookie on the host through a new connection. notes: - - The rollback feature is not a module option and depends on task's - attributes. To enable it, the module must be played asynchronously, i.e. - by setting task attributes I(poll) to C(0), and I(async) to a value less - or equal to C(ANSIBLE_TIMEOUT). If I(async) is greater, the rollback will - still happen if it shall happen, but you will experience a connection - timeout instead of more relevant info returned by the module after its - failure. - - This module supports I(check_mode). + - The rollback feature is not a module option and depends on task's attributes. To enable it, the module must be played + asynchronously, in other words by setting task attributes C(poll) to V(0), and C(async) to a value less or equal to C(ANSIBLE_TIMEOUT). + If C(async) is greater, the rollback still happens when needed, but you experience a connection timeout instead of more + relevant info returned by the module after its failure. +attributes: + check_mode: + support: full + diff_mode: + support: none + action: + support: full + async: + support: full options: counters: description: - Save or restore the values of all packet and byte counters. - - When C(true), the module is not idempotent. + - When V(true), the module is not idempotent. type: bool default: false ip_version: description: - Which version of the IP protocol this module should apply to. type: str - choices: [ ipv4, ipv6 ] + choices: [ipv4, ipv6] default: ipv4 modprobe: description: - - Specify the path to the C(modprobe) program internally used by iptables - related commands to load kernel modules. - - By default, C(/proc/sys/kernel/modprobe) is inspected to determine the - executable's path. + - Specify the path to the C(modprobe) program internally used by iptables related commands to load kernel modules. + - By default, V(/proc/sys/kernel/modprobe) is inspected to determine the executable's path. type: path noflush: description: - - For I(state=restored), ignored otherwise. - - If C(false), restoring iptables rules from a file flushes (deletes) - all previous contents of the respective table(s). If C(true), the - previous rules are left untouched (but policies are updated anyway, - for all built-in chains). + - For O(state=restored), ignored otherwise. + - If V(false), restoring iptables rules from a file flushes (deletes) all previous contents of the respective table(s). + If V(true), the previous rules are left untouched (but policies are updated anyway, for all built-in chains). type: bool default: false path: @@ -70,32 +66,29 @@ options: - The file the iptables state should be saved to. - The file the iptables state should be restored from. type: path - required: yes + required: true state: description: - - Whether the firewall state should be saved (into a file) or restored - (from a file). + - Whether the firewall state should be saved (into a file) or restored (from a file). type: str - choices: [ saved, restored ] - required: yes + choices: [saved, restored] + required: true table: description: - - When I(state=restored), restore only the named table even if the input - file contains other tables. Fail if the named table is not declared in - the file. - - When I(state=saved), restrict output to the specified table. If not - specified, output includes all active tables. + - When O(state=restored), restore only the named table even if the input file contains other tables. Fail if the named + table is not declared in the file. + - When O(state=saved), restrict output to the specified table. If not specified, output includes all active tables. type: str - choices: [ filter, nat, mangle, raw, security ] + choices: [filter, nat, mangle, raw, security] wait: description: - - Wait N seconds for the xtables lock to prevent instant failure in case - multiple instances of the program are running concurrently. + - Wait N seconds for the xtables lock to prevent instant failure in case multiple instances of the program are running + concurrently. type: int requirements: [iptables, ip6tables] -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # This will apply to all loaded/active IPv4 tables. - name: Save current state of the firewall in system file community.general.iptables_state: @@ -132,16 +125,16 @@ EXAMPLES = r''' community.general.iptables_state: state: saved path: /tmp/iptables - check_mode: yes + check_mode: true changed_when: false register: iptables_state - name: show current state of the firewall ansible.builtin.debug: var: iptables_state.initial_state -''' +""" -RETURN = r''' +RETURN = r""" applied: description: Whether or not the wanted state has been successfully restored. type: bool @@ -152,7 +145,8 @@ initial_state: type: list elements: str returned: always - sample: [ + sample: + [ "# Generated by xtables-save v1.8.2", "*filter", ":INPUT ACCEPT [0:0]", @@ -166,7 +160,8 @@ restored: type: list elements: str returned: always - sample: [ + sample: + [ "# Generated by xtables-save v1.8.2", "*filter", ":INPUT DROP [0:0]", @@ -185,7 +180,8 @@ saved: type: list elements: str returned: always - sample: [ + sample: + [ "# Generated by xtables-save v1.8.2", "*filter", ":INPUT ACCEPT [0:0]", @@ -195,7 +191,9 @@ saved: "# Completed" ] tables: - description: The iptables we have interest for when module starts. + description: + - The iptables on the system before the module has run, separated by table. + - If the option O(table) is used, only this table is included. type: dict contains: table: @@ -221,7 +219,7 @@ tables: ] } returned: always -''' +""" import re @@ -259,10 +257,7 @@ def read_state(b_path): ''' with open(b_path, 'r') as f: text = f.read() - lines = text.splitlines() - while '' in lines: - lines.remove('') - return lines + return [t for t in text.splitlines() if t != ''] def write_state(b_path, lines, changed): @@ -272,8 +267,7 @@ def write_state(b_path, lines, changed): # Populate a temporary file tmpfd, tmpfile = tempfile.mkstemp() with os.fdopen(tmpfd, 'w') as f: - for line in lines: - f.write('%s\n' % line) + f.write("{0}\n".format("\n".join(lines))) # Prepare to copy temporary file to the final destination if not os.path.exists(b_path): @@ -334,29 +328,31 @@ def filter_and_format_state(string): string = re.sub(r'((^|\n)# (Generated|Completed)[^\n]*) on [^\n]*', r'\1', string) if not module.params['counters']: string = re.sub(r'\[[0-9]+:[0-9]+\]', r'[0:0]', string) - lines = string.splitlines() - while '' in lines: - lines.remove('') + lines = [line for line in string.splitlines() if line != ''] return lines -def per_table_state(command, state): +def parse_per_table_state(all_states_dump): ''' Convert raw iptables-save output into usable datastructure, for reliable comparisons between initial and final states. ''' + lines = filter_and_format_state(all_states_dump) tables = dict() - for t in TABLES: - COMMAND = list(command) - if '*%s' % t in state.splitlines(): - COMMAND.extend(['--table', t]) - dummy, out, dummy = module.run_command(COMMAND, check_rc=True) - out = re.sub(r'(^|\n)(# Generated|# Completed|[*]%s|COMMIT)[^\n]*' % t, r'', out) - out = re.sub(r' *\[[0-9]+:[0-9]+\] *', r'', out) - table = out.splitlines() - while '' in table: - table.remove('') - tables[t] = table + current_table = '' + current_list = list() + for line in lines: + if re.match(r'^[*](filter|mangle|nat|raw|security)$', line): + current_table = line[1:] + continue + if line == 'COMMIT': + tables[current_table] = current_list + current_table = '' + current_list = list() + continue + if line.startswith('# '): + continue + current_list.append(line) return tables @@ -416,7 +412,7 @@ def main(): COMMANDARGS.extend(['--table', table]) if wait is not None: - TESTCOMMAND.extend(['--wait', '%s' % wait]) + TESTCOMMAND.extend(['--wait', '%d' % wait]) if modprobe is not None: b_modprobe = to_bytes(modprobe, errors='surrogate_or_strict') @@ -447,6 +443,7 @@ def main(): if not os.access(b_path, os.R_OK): module.fail_json(msg="Source %s not readable" % path) state_to_restore = read_state(b_path) + cmd = None else: cmd = ' '.join(SAVECOMMAND) @@ -455,7 +452,7 @@ def main(): # The issue comes when wanting to restore state from empty iptable-save's # output... what happens when, say: # - no table is specified, and iptables-save's output is only nat table; - # - we give filter's ruleset to iptables-restore, that locks ourselve out + # - we give filter's ruleset to iptables-restore, that locks ourselves out # of the host; # then trying to roll iptables state back to the previous (working) setup # doesn't override current filter table because no filter table is stored @@ -483,7 +480,7 @@ def main(): # Depending on the value of 'table', initref_state may differ from # initial_state. (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) - tables_before = per_table_state(SAVECOMMAND, stdout) + tables_before = parse_per_table_state(stdout) initref_state = filter_and_format_state(stdout) if state == 'saved': @@ -503,7 +500,7 @@ def main(): MAINCOMMAND.insert(0, bin_iptables_restore) if wait is not None: - MAINCOMMAND.extend(['--wait', '%s' % wait]) + MAINCOMMAND.extend(['--wait', '%d' % wait]) if _back is not None: b_back = to_bytes(_back, errors='surrogate_or_strict') @@ -547,8 +544,7 @@ def main(): if module.check_mode: tmpfd, tmpfile = tempfile.mkstemp() with os.fdopen(tmpfd, 'w') as f: - for line in initial_state: - f.write('%s\n' % line) + f.write("{0}\n".format("\n".join(initial_state))) if filecmp.cmp(tmpfile, b_path): restored_state = initial_state @@ -581,14 +577,17 @@ def main(): (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) restored_state = filter_and_format_state(stdout) - + tables_after = parse_per_table_state('\n'.join(restored_state)) if restored_state not in (initref_state, initial_state): - if module.check_mode: - changed = True - else: - tables_after = per_table_state(SAVECOMMAND, stdout) - if tables_after != tables_before: + for table_name, table_content in tables_after.items(): + if table_name not in tables_before: + # Would initialize a table, which doesn't exist yet changed = True + break + if tables_before[table_name] != table_content: + # Content of some table changes + changed = True + break if _back is None or module.check_mode: module.exit_json( @@ -631,7 +630,7 @@ def main(): os.remove(b_back) (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) - tables_rollback = per_table_state(SAVECOMMAND, stdout) + tables_rollback = parse_per_table_state(stdout) msg = ( "Failed to confirm state restored from %s after %ss. " diff --git a/plugins/modules/net_tools/ipwcli_dns.py b/plugins/modules/ipwcli_dns.py similarity index 65% rename from plugins/modules/net_tools/ipwcli_dns.py rename to plugins/modules/ipwcli_dns.py index 8a6122edff..909da24ddf 100644 --- a/plugins/modules/net_tools/ipwcli_dns.py +++ b/plugins/modules/ipwcli_dns.py @@ -1,124 +1,129 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Christian Wollinger -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, Christian Wollinger +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ipwcli_dns -short_description: Manage DNS Records for Ericsson IPWorks via ipwcli +short_description: Manage DNS Records for Ericsson IPWorks using C(ipwcli) version_added: '0.2.0' description: - - "Manage DNS records for the Ericsson IPWorks DNS server. The module will use the ipwcli to deploy the DNS records." - + - Manage DNS records for the Ericsson IPWorks DNS server. The module uses the C(ipwcli) to deploy the DNS records. requirements: - - ipwcli (installed on Ericsson IPWorks) + - ipwcli (installed on Ericsson IPWorks) notes: - - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli. + - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli. +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: - dnsname: - description: - - Name of the record. - required: true - type: str - type: - description: - - Type of the record. - required: true - type: str - choices: [ NAPTR, SRV, A, AAAA ] - container: - description: - - Sets the container zone for the record. - required: true - type: str - address: - description: - - The IP address for the A or AAAA record. - - Required for C(type=A) or C(type=AAAA) - type: str - ttl: - description: - - Sets the TTL of the record. - type: int - default: 3600 - state: - description: - - Whether the record should exist or not. - type: str - choices: [ absent, present ] - default: present - priority: - description: - - Sets the priority of the SRV record. - type: int - default: 10 - weight: - description: - - Sets the weight of the SRV record. - type: int - default: 10 - port: - description: - - Sets the port of the SRV record. - - Required for C(type=SRV) - type: int - target: - description: - - Sets the target of the SRV record. - - Required for C(type=SRV) - type: str - order: - description: - - Sets the order of the NAPTR record. - - Required for C(type=NAPTR) - type: int - preference: - description: - - Sets the preference of the NAPTR record. - - Required for C(type=NAPTR) - type: int - flags: - description: - - Sets one of the possible flags of NAPTR record. - - Required for C(type=NAPTR) - type: str - choices: ['S', 'A', 'U', 'P'] - service: - description: - - Sets the service of the NAPTR record. - - Required for C(type=NAPTR) - type: str - replacement: - description: - - Sets the replacement of the NAPTR record. - - Required for C(type=NAPTR) - type: str - username: - description: - - Username to login on ipwcli. - type: str - required: true - password: - description: - - Password to login on ipwcli. - type: str - required: true + dnsname: + description: + - Name of the record. + required: true + type: str + type: + description: + - Type of the record. + required: true + type: str + choices: [NAPTR, SRV, A, AAAA] + container: + description: + - Sets the container zone for the record. + required: true + type: str + address: + description: + - The IP address for the A or AAAA record. + - Required for O(type=A) or O(type=AAAA). + type: str + ttl: + description: + - Sets the TTL of the record. + type: int + default: 3600 + state: + description: + - Whether the record should exist or not. + type: str + choices: [absent, present] + default: present + priority: + description: + - Sets the priority of the SRV record. + type: int + default: 10 + weight: + description: + - Sets the weight of the SRV record. + type: int + default: 10 + port: + description: + - Sets the port of the SRV record. + - Required for O(type=SRV). + type: int + target: + description: + - Sets the target of the SRV record. + - Required for O(type=SRV). + type: str + order: + description: + - Sets the order of the NAPTR record. + - Required for O(type=NAPTR). + type: int + preference: + description: + - Sets the preference of the NAPTR record. + - Required for O(type=NAPTR). + type: int + flags: + description: + - Sets one of the possible flags of NAPTR record. + - Required for O(type=NAPTR). + type: str + choices: ['S', 'A', 'U', 'P'] + service: + description: + - Sets the service of the NAPTR record. + - Required for O(type=NAPTR). + type: str + replacement: + description: + - Sets the replacement of the NAPTR record. + - Required for O(type=NAPTR). + type: str + username: + description: + - Username to login on ipwcli. + type: str + required: true + password: + description: + - Password to login on ipwcli. + type: str + required: true author: - - Christian Wollinger (@cwollinger) -''' + - Christian Wollinger (@cwollinger) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create A record community.general.ipwcli_dns: dnsname: example.com @@ -147,17 +152,16 @@ EXAMPLES = ''' service: 'SIP+D2T' replacement: '_sip._tcp.test.example.com.' flags: S -''' +""" -RETURN = ''' +RETURN = r""" record: - description: The created record from the input params - type: str - returned: always -''' + description: The created record from the input params. + type: str + returned: always +""" from ansible.module_utils.basic import AnsibleModule -import os class ResourceRecord(object): @@ -265,18 +269,18 @@ def run_module(): dnsname=dict(type='str', required=True), type=dict(type='str', required=True, choices=['A', 'AAAA', 'SRV', 'NAPTR']), container=dict(type='str', required=True), - address=dict(type='str', required=False), - ttl=dict(type='int', required=False, default=3600), + address=dict(type='str'), + ttl=dict(type='int', default=3600), state=dict(type='str', default='present', choices=['absent', 'present']), - priority=dict(type='int', required=False, default=10), - weight=dict(type='int', required=False, default=10), - port=dict(type='int', required=False), - target=dict(type='str', required=False), - order=dict(type='int', required=False), - preference=dict(type='int', required=False), - flags=dict(type='str', required=False, choices=['S', 'A', 'U', 'P']), - service=dict(type='str', required=False), - replacement=dict(type='str', required=False), + priority=dict(type='int', default=10), + weight=dict(type='int', default=10), + port=dict(type='int'), + target=dict(type='str'), + order=dict(type='int'), + preference=dict(type='int'), + flags=dict(type='str', choices=['S', 'A', 'U', 'P']), + service=dict(type='str'), + replacement=dict(type='str'), username=dict(type='str', required=True), password=dict(type='str', required=True, no_log=True) ) diff --git a/plugins/modules/notification/irc.py b/plugins/modules/irc.py similarity index 67% rename from plugins/modules/notification/irc.py rename to plugins/modules/irc.py index 9b1b91f586..537b26e0bc 100644 --- a/plugins/modules/notification/irc.py +++ b/plugins/modules/irc.py @@ -1,29 +1,34 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, Jan-Piet Mens -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, Jan-Piet Mens +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: irc short_description: Send a message to an IRC channel or a nick description: - - Send a message to an IRC channel or a nick. This is a very simplistic implementation. + - Send a message to an IRC channel or a nick. This is a very simplistic implementation. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: server: type: str description: - - IRC server name/address + - IRC server name/address. default: localhost port: type: int description: - - IRC server port number + - IRC server port number. default: 6667 nick: type: str @@ -38,69 +43,102 @@ options: topic: type: str description: - - Set the channel topic + - Set the channel topic. color: type: str description: - - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). - Added 11 more colors in version 2.0. + - Text color for the message. default: "none" - choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan", - "light_blue", "pink", "gray", "light_gray"] + choices: + - none + - white + - black + - blue + - green + - red + - brown + - purple + - orange + - yellow + - light_green + - teal + - light_cyan + - light_blue + - pink + - gray + - light_gray aliases: [colour] channel: type: str description: - - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them. + - Channel name. One of nick_to or channel needs to be set. When both are set, the message is sent to both of them. nick_to: type: list elements: str description: - - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them. + - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the + message is sent to both of them. key: type: str description: - - Channel key + - Channel key. passwd: type: str description: - - Server password + - Server password. timeout: type: int description: - - Timeout to use while waiting for successful registration and join - messages, this is to prevent an endless loop + - Timeout to use while waiting for successful registration and join messages, this is to prevent an endless loop. default: 30 - use_ssl: + use_tls: description: - - Designates whether TLS/SSL should be used when connecting to the IRC server + - Designates whether TLS/SSL should be used when connecting to the IRC server. + - O(use_tls) is available since community.general 8.1.0, before the option was exlusively called O(use_ssl). The latter + is now an alias of O(use_tls). + - B(Note:) for security reasons, you should always set O(use_tls=true) and O(validate_certs=true) whenever possible. + - The default of this option changed to V(true) in community.general 10.0.0. type: bool - default: 'no' + default: true + aliases: + - use_ssl part: description: - - Designates whether user should part from channel after sending message or not. - Useful for when using a faux bot and not wanting join/parts between messages. + - Designates whether user should part from channel after sending message or not. Useful for when using a mock bot and + not wanting join/parts between messages. type: bool - default: 'yes' + default: true style: type: str description: - - Text style for the message. Note italic does not work on some clients - choices: [ "bold", "underline", "reverse", "italic", "none" ] + - Text style for the message. Note italic does not work on some clients. + choices: ["bold", "underline", "reverse", "italic", "none"] default: none + validate_certs: + description: + - If set to V(false), the SSL certificates are not validated. + - This should always be set to V(true). Using V(false) is unsafe and should only be done if the network between between + Ansible and the IRC server is known to be safe. + - B(Note:) for security reasons, you should always set O(use_tls=true) and O(validate_certs=true) whenever possible. + - The default of this option changed to V(true) in community.general 10.0.0. + type: bool + default: true + version_added: 8.1.0 # informational: requirements for nodes -requirements: [ socket ] +requirements: [socket] author: - - "Jan-Piet Mens (@jpmens)" - - "Matt Martz (@sivel)" -''' + - "Jan-Piet Mens (@jpmens)" + - "Matt Martz (@sivel)" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Send a message to an IRC channel from nick ansible community.general.irc: server: irc.example.net - channel: #t1 + use_tls: true + validate_certs: true + channel: '#t1' msg: Hello world - name: Send a message to an IRC channel @@ -108,7 +146,9 @@ EXAMPLES = ''' module: irc port: 6669 server: irc.example.net - channel: #t1 + use_tls: true + validate_certs: true + channel: '#t1' msg: 'All finished at {{ ansible_date_time.iso8601 }}' color: red nick: ansibleIRC @@ -118,14 +158,16 @@ EXAMPLES = ''' module: irc port: 6669 server: irc.example.net - channel: #t1 + use_tls: true + validate_certs: true + channel: '#t1' nick_to: - nick1 - nick2 msg: 'All finished at {{ ansible_date_time.iso8601 }}' color: red nick: ansibleIRC -''' +""" # =========================================== # IRC module support methods. @@ -142,7 +184,8 @@ from ansible.module_utils.basic import AnsibleModule def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, key=None, topic=None, - nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None): + nick="ansible", color='none', passwd=False, timeout=30, use_tls=False, validate_certs=True, + part=True, style=None): '''send message to IRC''' nick_to = [] if nick_to is None else nick_to @@ -186,8 +229,15 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, k message = styletext + colortext + msg irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - if use_ssl: - irc = ssl.wrap_socket(irc) + if use_tls: + kwargs = {} + if validate_certs: + context = ssl.create_default_context() + kwargs["server_hostname"] = server + else: + context = ssl.SSLContext(ssl.PROTOCOL_TLS) + context.verify_mode = ssl.CERT_NONE + irc = context.wrap_socket(irc, **kwargs) irc.connect((server, int(port))) if passwd: @@ -252,7 +302,7 @@ def main(): server=dict(default='localhost'), port=dict(type='int', default=6667), nick=dict(default='ansible'), - nick_to=dict(required=False, type='list', elements='str'), + nick_to=dict(type='list', elements='str'), msg=dict(required=True), color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue", "green", "red", "brown", @@ -261,13 +311,14 @@ def main(): "light_blue", "pink", "gray", "light_gray", "none"]), style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]), - channel=dict(required=False), + channel=dict(), key=dict(no_log=True), topic=dict(), passwd=dict(no_log=True), timeout=dict(type='int', default=30), part=dict(type='bool', default=True), - use_ssl=dict(type='bool', default=False) + use_tls=dict(type='bool', default=True, aliases=['use_ssl']), + validate_certs=dict(type='bool', default=True), ), supports_check_mode=True, required_one_of=[['channel', 'nick_to']] @@ -286,12 +337,13 @@ def main(): key = module.params["key"] passwd = module.params["passwd"] timeout = module.params["timeout"] - use_ssl = module.params["use_ssl"] + use_tls = module.params["use_tls"] part = module.params["part"] style = module.params["style"] + validate_certs = module.params["validate_certs"] try: - send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style) + send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_tls, validate_certs, part, style) except Exception as e: module.fail_json(msg="unable to send to IRC: %s" % to_native(e), exception=traceback.format_exc()) diff --git a/plugins/modules/files/iso_create.py b/plugins/modules/iso_create.py similarity index 65% rename from plugins/modules/files/iso_create.py rename to plugins/modules/iso_create.py index 3fa456339e..8d11bb2248 100644 --- a/plugins/modules/files/iso_create.py +++ b/plugins/modules/iso_create.py @@ -1,81 +1,87 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Ansible Project -# Copyright: (c) 2020, VMware, Inc. All Rights Reserved. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, Ansible Project +# Copyright (c) 2020, VMware, Inc. All Rights Reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: iso_create short_description: Generate ISO file with specified files or folders description: - - This module is used to generate ISO file with specified path of files. + - This module is used to generate ISO file with specified path of files. author: - - Diane Wang (@Tomorrow9) + - Diane Wang (@Tomorrow9) requirements: -- "pycdlib" -- "python >= 2.7" + - "pycdlib" version_added: '0.2.0' -options: - src_files: - description: - - This is a list of absolute paths of source files or folders which will be contained in the new generated ISO file. - - Will fail if specified file or folder in C(src_files) does not exist on local machine. - - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and - underscores (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path - names are limited to 255 characters.' - type: list - required: yes - elements: path - dest_iso: - description: - - The absolute path with file name of the new generated ISO file on local machine. - - Will create intermediate folders when they does not exist. - type: path - required: yes - interchange_level: - description: - - The ISO9660 interchange level to use, it dictates the rules on the names of files. - - Levels and valid values C(1), C(2), C(3), C(4) are supported. - - The default value is level C(1), which is the most conservative, level C(3) is recommended. - - ISO9660 file names at interchange level C(1) cannot have more than 8 characters or 3 characters in the extension. - type: int - default: 1 - choices: [1, 2, 3, 4] - vol_ident: - description: - - The volume identification string to use on the new generated ISO image. - type: str - rock_ridge: - description: - - Whether to make this ISO have the Rock Ridge extensions or not. - - Valid values are C(1.09), C(1.10) or C(1.12), means adding the specified Rock Ridge version to the ISO. - - If unsure, set C(1.09) to ensure maximum compatibility. - - If not specified, then not add Rock Ridge extension to the ISO. - type: str - choices: ['1.09', '1.10', '1.12'] - joliet: - description: - - Support levels and valid values are C(1), C(2), or C(3). - - Level C(3) is by far the most common. - - If not specified, then no Joliet support is added. - type: int - choices: [1, 2, 3] - udf: - description: - - Whether to add UDF support to this ISO. - - If set to C(True), then version 2.60 of the UDF spec is used. - - If not specified or set to C(False), then no UDF support is added. - type: bool - default: False -''' +extends_documentation_fragment: + - community.general.attributes -EXAMPLES = r''' +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + src_files: + description: + - This is a list of absolute paths of source files or folders to be contained in the new generated ISO file. + - The module fails if specified file or folder in O(src_files) does not exist on local machine. + - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and underscores + (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path names are limited + to 255 characters.' + type: list + required: true + elements: path + dest_iso: + description: + - The absolute path with file name of the new generated ISO file on local machine. + - It creates intermediate folders when they do not exist. + type: path + required: true + interchange_level: + description: + - The ISO9660 interchange level to use, it dictates the rules on the names of files. + - Levels and valid values V(1), V(2), V(3), V(4) are supported. + - The default value is level V(1), which is the most conservative, level V(3) is recommended. + - ISO9660 file names at interchange level V(1) cannot have more than 8 characters or 3 characters in the extension. + type: int + default: 1 + choices: [1, 2, 3, 4] + vol_ident: + description: + - The volume identification string to use on the new generated ISO image. + type: str + rock_ridge: + description: + - Whether to make this ISO have the Rock Ridge extensions or not. + - Valid values are V(1.09), V(1.10) or V(1.12), means adding the specified Rock Ridge version to the ISO. + - If unsure, set V(1.09) to ensure maximum compatibility. + - If not specified, then not add Rock Ridge extension to the ISO. + type: str + choices: ['1.09', '1.10', '1.12'] + joliet: + description: + - Support levels and valid values are V(1), V(2), or V(3). + - Level V(3) is by far the most common. + - If not specified, then no Joliet support is added. + type: int + choices: [1, 2, 3] + udf: + description: + - Whether to add UDF support to this ISO. + - If set to V(true), then version 2.60 of the UDF spec is used. + - If not specified or set to V(false), then no UDF support is added. + type: bool + default: false +""" + +EXAMPLES = r""" - name: Create an ISO file community.general.iso_create: src_files: @@ -100,46 +106,46 @@ EXAMPLES = r''' interchange_level: 3 joliet: 3 vol_ident: WIN_AUTOINSTALL -''' +""" -RETURN = r''' +RETURN = r""" source_file: - description: Configured source files or directories list. - returned: on success - type: list - elements: path - sample: ["/path/to/file.txt", "/path/to/folder"] + description: Configured source files or directories list. + returned: on success + type: list + elements: path + sample: ["/path/to/file.txt", "/path/to/folder"] created_iso: - description: Created iso file path. - returned: on success - type: str - sample: "/path/to/test.iso" + description: Created iso file path. + returned: on success + type: str + sample: "/path/to/test.iso" interchange_level: - description: Configured interchange level. - returned: on success - type: int - sample: 3 + description: Configured interchange level. + returned: on success + type: int + sample: 3 vol_ident: - description: Configured volume identification string. - returned: on success - type: str - sample: "OEMDRV" + description: Configured volume identification string. + returned: on success + type: str + sample: "OEMDRV" joliet: - description: Configured Joliet support level. - returned: on success - type: int - sample: 3 + description: Configured Joliet support level. + returned: on success + type: int + sample: 3 rock_ridge: - description: Configured Rock Ridge version. - returned: on success - type: str - sample: "1.09" + description: Configured Rock Ridge version. + returned: on success + type: str + sample: "1.09" udf: - description: Configured UDF support. - returned: on success - type: bool - sample: False -''' + description: Configured UDF support. + returned: on success + type: bool + sample: false +""" import os import traceback @@ -187,9 +193,9 @@ def add_directory(module, iso_file=None, dir_path=None, rock_ridge=None, use_jol if rock_ridge: rr_name = os.path.basename(dir_path) if use_joliet: - joliet_path = iso_dir_path + joliet_path = dir_path if use_udf: - udf_path = iso_dir_path + udf_path = dir_path try: iso_file.add_directory(iso_path=iso_dir_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path) except Exception as err: @@ -253,7 +259,7 @@ def main(): udf=use_udf ) if not module.check_mode: - iso_file = pycdlib.PyCdlib() + iso_file = pycdlib.PyCdlib(always_consistent=True) iso_file.new(interchange_level=inter_level, vol_ident=volume_id, rock_ridge=rock_ridge, joliet=use_joliet, udf=use_udf) for src_file in src_file_list: diff --git a/plugins/modules/iso_customize.py b/plugins/modules/iso_customize.py new file mode 100644 index 0000000000..7e64f949bd --- /dev/null +++ b/plugins/modules/iso_customize.py @@ -0,0 +1,342 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Ansible Project +# Copyright (c) 2022, VMware, Inc. All Rights Reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: iso_customize +short_description: Add/remove/change files in ISO file +description: + - This module is used to add/remove/change files in ISO file. + - The file inside ISO is overwritten if it exists by option O(add_files). +author: + - Yuhua Zou (@ZouYuhua) +requirements: + - "pycdlib" +version_added: '5.8.0' + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + src_iso: + description: + - This is the path of source ISO file. + type: path + required: true + dest_iso: + description: + - The path of the customized ISO file. + type: path + required: true + delete_files: + description: + - Absolute paths for files inside the ISO file that should be removed. + type: list + required: false + elements: str + default: [] + add_files: + description: + - Allows to add and replace files in the ISO file. + - It creates intermediate folders inside the ISO file when they do not exist. + type: list + required: false + elements: dict + default: [] + suboptions: + src_file: + description: + - The path with file name on the machine the module is executed on. + type: path + required: true + dest_file: + description: + - The absolute path of the file inside the ISO file. + type: str + required: true +notes: + - The C(pycdlib) library states it supports Python 2.7 and 3.4+. + - The function C(add_file) in pycdlib is designed to overwrite the existing file in ISO with type ISO9660 / Rock Ridge 1.12 + / Joliet / UDF. But it does not overwrite the existing file in ISO with Rock Ridge 1.09 / 1.10. So we take workaround + "delete the existing file and then add file for ISO with Rock Ridge". +""" + +EXAMPLES = r""" +- name: "Customize ISO file" + community.general.iso_customize: + src_iso: "/path/to/ubuntu-22.04-desktop-amd64.iso" + dest_iso: "/path/to/ubuntu-22.04-desktop-amd64-customized.iso" + delete_files: + - "/boot.catalog" + add_files: + - src_file: "/path/to/grub.cfg" + dest_file: "/boot/grub/grub.cfg" + - src_file: "/path/to/ubuntu.seed" + dest_file: "/preseed/ubuntu.seed" + register: customize_iso_result +""" + +RETURN = r""" +src_iso: + description: Path of source ISO file. + returned: on success + type: str + sample: "/path/to/file.iso" +dest_iso: + description: Path of the customized ISO file. + returned: on success + type: str + sample: "/path/to/customized.iso" +""" + +import os + +from ansible_collections.community.general.plugins.module_utils import deps +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +with deps.declare("pycdlib"): + import pycdlib + + +# The upper dir exist, we only add subdirectoy +def iso_add_dir(module, opened_iso, iso_type, dir_path): + parent_dir, check_dirname = dir_path.rsplit("/", 1) + if not parent_dir.strip(): + parent_dir = "/" + check_dirname = check_dirname.strip() + + for dirname, dirlist, dummy_filelist in opened_iso.walk(iso_path=parent_dir.upper()): + if dirname == parent_dir.upper(): + if check_dirname.upper() in dirlist: + return + + if parent_dir == "/": + current_dirpath = "/%s" % check_dirname + else: + current_dirpath = "%s/%s" % (parent_dir, check_dirname) + + current_dirpath_upper = current_dirpath.upper() + try: + if iso_type == "iso9660": + opened_iso.add_directory(current_dirpath_upper) + elif iso_type == "rr": + opened_iso.add_directory(current_dirpath_upper, rr_name=check_dirname) + elif iso_type == "joliet": + opened_iso.add_directory(current_dirpath_upper, joliet_path=current_dirpath) + elif iso_type == "udf": + opened_iso.add_directory(current_dirpath_upper, udf_path=current_dirpath) + except Exception as err: + msg = "Failed to create dir %s with error: %s" % (current_dirpath, to_native(err)) + module.fail_json(msg=msg) + + +def iso_add_dirs(module, opened_iso, iso_type, dir_path): + dirnames = dir_path.strip().split("/") + + current_dirpath = "/" + for item in dirnames: + if not item.strip(): + continue + if current_dirpath == "/": + current_dirpath = "/%s" % item + else: + current_dirpath = "%s/%s" % (current_dirpath, item) + + iso_add_dir(module, opened_iso, iso_type, current_dirpath) + + +def iso_check_file_exists(opened_iso, dest_file): + file_dir = os.path.dirname(dest_file).strip() + file_name = os.path.basename(dest_file) + dirnames = file_dir.strip().split("/") + + parent_dir = "/" + for item in dirnames: + if not item.strip(): + continue + + for dirname, dirlist, dummy_filelist in opened_iso.walk(iso_path=parent_dir.upper()): + if dirname != parent_dir.upper(): + break + + if item.upper() not in dirlist: + return False + + if parent_dir == "/": + parent_dir = "/%s" % item + else: + parent_dir = "%s/%s" % (parent_dir, item) + + if '.' not in file_name: + file_in_iso_path = file_name.upper() + '.;1' + else: + file_in_iso_path = file_name.upper() + ';1' + + for dirname, dummy_dirlist, filelist in opened_iso.walk(iso_path=parent_dir.upper()): + if dirname != parent_dir.upper(): + return False + + return file_name.upper() in filelist or file_in_iso_path in filelist + + +def iso_add_file(module, opened_iso, iso_type, src_file, dest_file): + dest_file = dest_file.strip() + if dest_file[0] != "/": + dest_file = "/%s" % dest_file + + file_local = src_file.strip() + + file_dir = os.path.dirname(dest_file).strip() + file_name = os.path.basename(dest_file) + if '.' not in file_name: + file_in_iso_path = dest_file.upper() + '.;1' + else: + file_in_iso_path = dest_file.upper() + ';1' + + if file_dir and file_dir != "/": + iso_add_dirs(module, opened_iso, iso_type, file_dir) + + try: + if iso_type == "iso9660": + opened_iso.add_file(file_local, iso_path=file_in_iso_path) + elif iso_type == "rr": + # For ISO with Rock Ridge 1.09 / 1.10, it won't overwrite the existing file + # So we take workaround here: delete the existing file and then add file + if iso_check_file_exists(opened_iso, dest_file): + opened_iso.rm_file(iso_path=file_in_iso_path) + opened_iso.add_file(file_local, iso_path=file_in_iso_path, rr_name=file_name) + elif iso_type == "joliet": + opened_iso.add_file(file_local, iso_path=file_in_iso_path, joliet_path=dest_file) + elif iso_type == "udf": + # For ISO with UDF, it won't always succeed to overwrite the existing file + # So we take workaround here: delete the existing file and then add file + if iso_check_file_exists(opened_iso, dest_file): + opened_iso.rm_file(udf_path=dest_file) + opened_iso.add_file(file_local, iso_path=file_in_iso_path, udf_path=dest_file) + except Exception as err: + msg = "Failed to add local file %s to ISO with error: %s" % (file_local, to_native(err)) + module.fail_json(msg=msg) + + +def iso_delete_file(module, opened_iso, iso_type, dest_file): + dest_file = dest_file.strip() + if dest_file[0] != "/": + dest_file = "/%s" % dest_file + file_name = os.path.basename(dest_file) + + if not iso_check_file_exists(opened_iso, dest_file): + module.fail_json(msg="The file %s does not exist." % dest_file) + + if '.' not in file_name: + file_in_iso_path = dest_file.upper() + '.;1' + else: + file_in_iso_path = dest_file.upper() + ';1' + + try: + if iso_type == "iso9660": + opened_iso.rm_file(iso_path=file_in_iso_path) + elif iso_type == "rr": + opened_iso.rm_file(iso_path=file_in_iso_path) + elif iso_type == "joliet": + opened_iso.rm_file(joliet_path=dest_file) + elif iso_type == "udf": + opened_iso.rm_file(udf_path=dest_file) + except Exception as err: + msg = "Failed to delete iso file %s with error: %s" % (dest_file, to_native(err)) + module.fail_json(msg=msg) + + +def iso_rebuild(module, src_iso, dest_iso, delete_files_list, add_files_list): + iso = None + iso_type = "iso9660" + + try: + iso = pycdlib.PyCdlib(always_consistent=True) + iso.open(src_iso) + if iso.has_rock_ridge(): + iso_type = "rr" + elif iso.has_joliet(): + iso_type = "joliet" + elif iso.has_udf(): + iso_type = "udf" + + for item in delete_files_list: + iso_delete_file(module, iso, iso_type, item) + + for item in add_files_list: + iso_add_file(module, iso, iso_type, item['src_file'], item['dest_file']) + + iso.write(dest_iso) + except Exception as err: + msg = "Failed to rebuild ISO %s with error: %s" % (src_iso, to_native(err)) + module.fail_json(msg=msg) + finally: + if iso: + iso.close() + + +def main(): + argument_spec = dict( + src_iso=dict(type='path', required=True), + dest_iso=dict(type='path', required=True), + delete_files=dict(type='list', elements='str', default=[]), + add_files=dict( + type='list', elements='dict', default=[], + options=dict( + src_file=dict(type='path', required=True), + dest_file=dict(type='str', required=True), + ), + ), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=[('delete_files', 'add_files'), ], + supports_check_mode=True, + ) + deps.validate(module) + + src_iso = module.params['src_iso'] + if not os.path.exists(src_iso): + module.fail_json(msg="ISO file %s does not exist." % src_iso) + + dest_iso = module.params['dest_iso'] + dest_iso_dir = os.path.dirname(dest_iso) + if dest_iso_dir and not os.path.exists(dest_iso_dir): + module.fail_json(msg="The dest directory %s does not exist" % dest_iso_dir) + + delete_files_list = [s.strip() for s in module.params['delete_files']] + add_files_list = module.params['add_files'] + if add_files_list: + for item in add_files_list: + if not os.path.exists(item['src_file']): + module.fail_json(msg="The file %s does not exist." % item['src_file']) + + result = dict( + src_iso=src_iso, + customized_iso=dest_iso, + delete_files=delete_files_list, + add_files=add_files_list, + changed=True, + ) + + if not module.check_mode: + iso_rebuild(module, src_iso, dest_iso, delete_files_list, add_files_list) + + result['changed'] = True + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/files/iso_extract.py b/plugins/modules/iso_extract.py similarity index 62% rename from plugins/modules/files/iso_extract.py rename to plugins/modules/iso_extract.py index 81fe6b662f..11897744a8 100644 --- a/plugins/modules/files/iso_extract.py +++ b/plugins/modules/iso_extract.py @@ -1,85 +1,93 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2013, Jeroen Hoekx -# Copyright: (c) 2016, Matt Robinson -# Copyright: (c) 2017, Dag Wieers -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, Jeroen Hoekx +# Copyright (c) 2016, Matt Robinson +# Copyright (c) 2017, Dag Wieers +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: -- Jeroen Hoekx (@jhoekx) -- Matt Robinson (@ribbons) -- Dag Wieers (@dagwieers) + - Jeroen Hoekx (@jhoekx) + - Matt Robinson (@ribbons) + - Dag Wieers (@dagwieers) module: iso_extract short_description: Extract files from an ISO image description: -- This module has two possible ways of operation. -- If 7zip is installed on the system, this module extracts files from an ISO - into a temporary directory and copies files to a given destination, - if needed. -- If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module - mounts the ISO image to a temporary location, and copies files to a given - destination, if needed. + - This module has two possible ways of operation. + - If 7zip is installed on the system, this module extracts files from an ISO into a temporary directory and copies files + to a given destination, if needed. + - If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module mounts the ISO image to a temporary location, + and copies files to a given destination, if needed. requirements: -- Either 7z (from I(7zip) or I(p7zip) package) -- Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux) + - Either 7z (from C(7zip) or C(p7zip) package) + - Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: image: description: - - The ISO image to extract files from. + - The ISO image to extract files from. type: path - required: yes - aliases: [ path, src ] + required: true + aliases: [path, src] dest: description: - - The destination directory to extract files to. + - The destination directory to extract files to. type: path - required: yes + required: true files: description: - - A list of files to extract from the image. - - Extracting directories does not work. + - A list of files to extract from the image. + - Extracting directories does not work. type: list elements: str - required: yes + required: true force: description: - - If C(yes), which will replace the remote file when contents are different than the source. - - If C(no), the file will only be extracted and copied if the destination does not already exist. + - If V(true), it replaces the remote file when contents are different than the source. + - If V(false), the file is only extracted and copied if the destination does not already exist. type: bool - default: yes + default: true executable: description: - - The path to the C(7z) executable to use for extracting files from the ISO. - - If not provided, it will assume the value C(7z). + - The path to the C(7z) executable to use for extracting files from the ISO. + - If not provided, it assumes the value V(7z). type: path + password: + description: + - Password used to decrypt files from the ISO. + - It is only used if C(7z) is used. + - The password is used as a command line argument to 7z. This is a B(potential security risk) that allows passwords + to be revealed if someone else can list running processes on the same machine in the right moment. + type: str + version_added: 10.1.0 notes: -- Only the file checksum (content) is taken into account when extracting files - from the ISO image. If C(force=no), only checks the presence of the file. -- In Ansible 2.3 this module was using C(mount) and C(umount) commands only, - requiring root access. This is no longer needed with the introduction of 7zip - for extraction. -''' + - Only the file checksum (content) is taken into account when extracting files from the ISO image. If O(force=false), only + checks the presence of the file. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Extract kernel and ramdisk from a LiveCD community.general.iso_extract: image: /tmp/rear-test.iso dest: /tmp/virt-rear/ files: - - isolinux/kernel - - isolinux/initrd.cgz -''' + - isolinux/kernel + - isolinux/initrd.cgz +""" -RETURN = r''' +RETURN = r""" # -''' +""" import os.path import shutil @@ -95,6 +103,7 @@ def main(): dest=dict(type='path', required=True), files=dict(type='list', elements='str', required=True), force=dict(type='bool', default=True), + password=dict(type='str', no_log=True), executable=dict(type='path'), # No default on purpose ), supports_check_mode=True, @@ -103,6 +112,7 @@ def main(): dest = module.params['dest'] files = module.params['files'] force = module.params['force'] + password = module.params['password'] executable = module.params['executable'] result = dict( @@ -149,7 +159,10 @@ def main(): # Use 7zip when we have a binary, otherwise try to mount if binary: - cmd = [binary, 'x', image, '-o%s' % tmp_dir] + extract_files + cmd = [binary, 'x', image, '-o%s' % tmp_dir] + if password: + cmd += ["-p%s" % password] + cmd += extract_files else: cmd = [module.get_bin_path('mount'), '-o', 'loop,ro', image, tmp_dir] diff --git a/plugins/modules/notification/jabber.py b/plugins/modules/jabber.py similarity index 81% rename from plugins/modules/notification/jabber.py rename to plugins/modules/jabber.py index 9b6811b3fa..096a9c6ef2 100644 --- a/plugins/modules/notification/jabber.py +++ b/plugins/modules/jabber.py @@ -1,34 +1,39 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# (c) 2015, Brian Coca -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Brian Coca +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: jabber short_description: Send a message to jabber user or chat room description: - - Send a message to jabber + - Send a message to jabber. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: user: type: str description: - - User as which to connect + - User as which to connect. required: true password: type: str description: - - password for user to connect + - Password for user to connect. required: true to: type: str description: - - user ID or name of the room, when using room use a slash to indicate your nick. + - User ID or name of the room, when using room use a slash to indicate your nick. required: true msg: type: str @@ -38,24 +43,22 @@ options: host: type: str description: - - host to connect, overrides user info + - Host to connect, overrides user info. port: type: int description: - - port to connect to, overrides default + - Port to connect to, overrides default. default: 5222 encoding: type: str description: - - message encoding - -# informational: requirements for nodes + - Message encoding. requirements: - - python xmpp (xmpppy) + - python xmpp (xmpppy) author: "Brian Coca (@bcoca)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Send a message to a user community.general.jabber: user: mybot@example.net @@ -78,7 +81,7 @@ EXAMPLES = ''' password: secret to: mychaps@example.net msg: Ansible task finished -''' +""" import time import traceback @@ -103,9 +106,9 @@ def main(): password=dict(required=True, no_log=True), to=dict(required=True), msg=dict(required=True), - host=dict(required=False), - port=dict(required=False, default=5222, type='int'), - encoding=dict(required=False), + host=dict(), + port=dict(default=5222, type='int'), + encoding=dict(), ), supports_check_mode=True ) diff --git a/plugins/modules/system/java_cert.py b/plugins/modules/java_cert.py similarity index 76% rename from plugins/modules/system/java_cert.py rename to plugins/modules/java_cert.py index afeab9d9e7..2f1f33f782 100644 --- a/plugins/modules/system/java_cert.py +++ b/plugins/modules/java_cert.py @@ -1,37 +1,49 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2013, RSD Services S.A -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, RSD Services S.A +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: java_cert short_description: Uses keytool to import/remove certificate to/from java keystore (cacerts) description: - - This is a wrapper module around keytool, which can be used to import certificates - and optionally private keys to a given java keystore, or remove them from it. + - This is a wrapper module around keytool, which can be used to import certificates and optionally private keys to a given + java keystore, or remove them from it. +extends_documentation_fragment: + - community.general.attributes + - ansible.builtin.files +attributes: + check_mode: + support: full + diff_mode: + support: full options: cert_url: description: - Basic URL to fetch SSL certificate from. - - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate. + - Exactly one of O(cert_url), O(cert_path), O(cert_content), or O(pkcs12_path) is required to load certificate. type: str cert_port: description: - Port to connect to URL. - - This will be used to create server URL:PORT. + - This is used to create server URL:PORT. type: int default: 443 cert_path: description: - Local path to load certificate from. - - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate. + - Exactly one of O(cert_url), O(cert_path), O(cert_content), or O(pkcs12_path) is required to load certificate. type: path + cert_content: + description: + - Content of the certificate used to create the keystore. + - Exactly one of O(cert_url), O(cert_path), O(cert_content), or O(pkcs12_path) is required to load certificate. + type: str + version_added: 8.6.0 cert_alias: description: - Imported certificate alias. @@ -41,21 +53,19 @@ options: description: - Trust imported cert as CAcert. type: bool - default: False + default: false version_added: '0.2.0' pkcs12_path: description: - Local path to load PKCS12 keystore from. - - Unlike C(cert_url) and C(cert_path), the PKCS12 keystore embeds the private key matching - the certificate, and is used to import both the certificate and its private key into the - java keystore. - - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate. + - Unlike O(cert_url), O(cert_path) and O(cert_content), the PKCS12 keystore embeds the private key matching the certificate, + and is used to import both the certificate and its private key into the java keystore. + - Exactly one of O(cert_url), O(cert_path), O(cert_content), or O(pkcs12_path) is required to load certificate. type: path pkcs12_password: description: - Password for importing from PKCS12 keystore. type: str - default: '' pkcs12_alias: description: - Alias in the PKCS12 keystore. @@ -86,17 +96,35 @@ options: state: description: - Defines action which can be either certificate import or removal. - - When state is present, the certificate will always idempotently be inserted - into the keystore, even if there already exists a cert alias that is different. + - When O(state=present), the certificate is always inserted into the keystore, even if there already exists a cert alias + that is different. type: str - choices: [ absent, present ] + choices: [absent, present] default: present + mode: + version_added: 8.5.0 + owner: + version_added: 8.5.0 + group: + version_added: 8.5.0 + seuser: + version_added: 8.5.0 + serole: + version_added: 8.5.0 + setype: + version_added: 8.5.0 + selevel: + version_added: 8.5.0 + unsafe_writes: + version_added: 8.5.0 + attributes: + version_added: 8.5.0 requirements: [openssl, keytool] author: -- Adam Hamsik (@haad) -''' + - Adam Hamsik (@haad) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Import SSL certificate from google.com to a given cacerts keystore community.general.java_cert: cert_url: google.com @@ -118,17 +146,30 @@ EXAMPLES = r''' cert_path: /opt/certs/rootca.crt keystore_path: /tmp/cacerts keystore_pass: changeit - keystore_create: yes + keystore_create: true state: present cert_alias: LE_RootCA - trust_cacert: True + trust_cacert: true + +- name: Import trusted CA from the SSL certificate stored in the cert_content variable + community.general.java_cert: + cert_content: | + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- + keystore_path: /tmp/cacerts + keystore_pass: changeit + keystore_create: true + state: present + cert_alias: LE_RootCA + trust_cacert: true - name: Import SSL certificate from google.com to a keystore, create it if it doesn't exist community.general.java_cert: cert_url: google.com keystore_path: /tmp/cacerts keystore_pass: changeit - keystore_create: yes + keystore_create: true state: present - name: Import a pkcs12 keystore with a specified alias, create it if it doesn't exist @@ -137,7 +178,7 @@ EXAMPLES = r''' cert_alias: default keystore_path: /opt/wildfly/standalone/configuration/defaultkeystore.jks keystore_pass: changeit - keystore_create: yes + keystore_create: true state: present - name: Import SSL certificate to JCEKS keystore @@ -149,41 +190,26 @@ EXAMPLES = r''' keystore_path: /opt/someapp/security/keystore.jceks keystore_type: "JCEKS" keystore_pass: changeit - keystore_create: yes + keystore_create: true state: present -''' - -RETURN = r''' -msg: - description: Output from stdout of keytool command after execution of given command. - returned: success - type: str - sample: "Module require existing keystore at keystore_path '/tmp/test/cacerts'" - -rc: - description: Keytool command execution return value. - returned: success - type: int - sample: "0" +""" +RETURN = r""" cmd: description: Executed command to get action done. returned: success type: str sample: "keytool -importcert -noprompt -keystore" -''' +""" import os import tempfile -import random -import string import re - +from urllib.parse import urlparse +from urllib.request import getproxies # import module snippets from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlparse -from ansible.module_utils.six.moves.urllib.request import getproxies def _get_keystore_type_keytool_parameters(keystore_type): @@ -237,7 +263,7 @@ def _get_first_certificate_from_x509_file(module, pem_certificate_file, pem_cert (extract_rc, dummy, extract_stderr) = module.run_command(extract_cmd, check_rc=False) if extract_rc != 0: - # this time it's a real failure + # this time it is a real failure module.fail_json(msg="Internal module failure, cannot extract certificate, error: %s" % extract_stderr, rc=extract_rc, cmd=extract_cmd) @@ -274,16 +300,18 @@ def _export_public_cert_from_pkcs12(module, executable, pkcs_file, alias, passwo "-noprompt", "-keystore", pkcs_file, - "-alias", - alias, "-storetype", "pkcs12", "-rfc" ] + # Append optional alias + if alias: + export_cmd.extend(["-alias", alias]) (export_rc, export_stdout, export_err) = module.run_command(export_cmd, data=password, check_rc=False) if export_rc != 0: - module.fail_json(msg="Internal module failure, cannot extract public certificate from pkcs12, error: %s" % export_stdout, + module.fail_json(msg="Internal module failure, cannot extract public certificate from PKCS12, message: %s" % export_stdout, + stderr=export_err, rc=export_rc) with open(dest, 'w') as f: @@ -325,6 +353,12 @@ def build_proxy_options(): return proxy_opts +def _update_permissions(module, keystore_path): + """ Updates keystore file attributes as necessary """ + file_args = module.load_file_common_arguments(module.params, path=keystore_path) + return module.set_fs_attributes_if_different(file_args, False) + + def _download_cert_url(module, executable, url, port): """ Fetches the certificate from the remote URL using `keytool -printcert...` The PEM formatted string is returned """ @@ -345,6 +379,10 @@ def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alia keystore_path, keystore_pass, keystore_alias, keystore_type): ''' Import pkcs12 from path into keystore located on keystore_path as alias ''' + optional_aliases = { + "-destalias": keystore_alias, + "-srcalias": pkcs12_alias + } import_cmd = [ executable, "-importkeystore", @@ -353,13 +391,14 @@ def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alia "pkcs12", "-srckeystore", pkcs12_path, - "-srcalias", - pkcs12_alias, "-destkeystore", keystore_path, - "-destalias", - keystore_alias ] + # Append optional aliases + for flag, value in optional_aliases.items(): + if value: + import_cmd.extend([flag, value]) + import_cmd += _get_keystore_type_keytool_parameters(keystore_type) secret_data = "%s\n%s" % (keystore_pass, pkcs12_pass) @@ -369,15 +408,15 @@ def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alia # Use local certificate from local path and import it to a java keystore (import_rc, import_out, import_err) = module.run_command(import_cmd, data=secret_data, check_rc=False) - diff = {'before': '\n', 'after': '%s\n' % keystore_alias} - if import_rc == 0 and os.path.exists(keystore_path): - module.exit_json(changed=True, msg=import_out, - rc=import_rc, cmd=import_cmd, stdout=import_out, - error=import_err, diff=diff) - else: + + if import_rc != 0 or not os.path.exists(keystore_path): module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, error=import_err) + return dict(changed=True, msg=import_out, + rc=import_rc, cmd=import_cmd, stdout=import_out, + error=import_err, diff=diff) + def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert): ''' Import certificate from path into keystore located on @@ -402,17 +441,17 @@ def import_cert_path(module, executable, path, keystore_path, keystore_pass, ali (import_rc, import_out, import_err) = module.run_command(import_cmd, data="%s\n%s" % (keystore_pass, keystore_pass), check_rc=False) - diff = {'before': '\n', 'after': '%s\n' % alias} - if import_rc == 0: - module.exit_json(changed=True, msg=import_out, - rc=import_rc, cmd=import_cmd, stdout=import_out, - error=import_err, diff=diff) - else: - module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd) + + if import_rc != 0: + module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, error=import_err) + + return dict(changed=True, msg=import_out, + rc=import_rc, cmd=import_cmd, stdout=import_out, + error=import_err, diff=diff) -def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type, exit_after=True): +def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type): ''' Delete certificate identified with alias from keystore on keystore_path ''' del_cmd = [ executable, @@ -428,13 +467,13 @@ def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystor # Delete SSL certificate from keystore (del_rc, del_out, del_err) = module.run_command(del_cmd, data=keystore_pass, check_rc=True) + diff = {'before': '%s\n' % alias, 'after': None} - if exit_after: - diff = {'before': '%s\n' % alias, 'after': None} + if del_rc != 0: + module.fail_json(msg=del_out, rc=del_rc, cmd=del_cmd, error=del_err) - module.exit_json(changed=True, msg=del_out, - rc=del_rc, cmd=del_cmd, stdout=del_out, - error=del_err, diff=diff) + return dict(changed=True, msg=del_out, rc=del_rc, cmd=del_cmd, + stdout=del_out, error=del_err, diff=diff) def test_keytool(module, executable): @@ -456,6 +495,7 @@ def main(): argument_spec = dict( cert_url=dict(type='str'), cert_path=dict(type='path'), + cert_content=dict(type='str'), pkcs12_path=dict(type='path'), pkcs12_password=dict(type='str', no_log=True), pkcs12_alias=dict(type='str'), @@ -472,17 +512,19 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, - required_if=[['state', 'present', ('cert_path', 'cert_url', 'pkcs12_path'), True], + required_if=[['state', 'present', ('cert_path', 'cert_url', 'cert_content', 'pkcs12_path'), True], ['state', 'absent', ('cert_url', 'cert_alias'), True]], required_together=[['keystore_path', 'keystore_pass']], mutually_exclusive=[ - ['cert_url', 'cert_path', 'pkcs12_path'] + ['cert_url', 'cert_path', 'cert_content', 'pkcs12_path'] ], supports_check_mode=True, + add_file_common_args=True, ) url = module.params.get('cert_url') path = module.params.get('cert_path') + content = module.params.get('cert_content') port = module.params.get('cert_port') pkcs12_path = module.params.get('pkcs12_path') @@ -520,12 +562,14 @@ def main(): module.add_cleanup_file(new_certificate) module.add_cleanup_file(old_certificate) + result = dict() + if state == 'absent' and alias_exists: if module.check_mode: module.exit_json(changed=True) - # delete and exit - delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) + # delete + result = delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) # dump certificate to enroll in the keystore on disk and compute digest if state == 'present': @@ -548,6 +592,10 @@ def main(): # certificate to stdout so we don't need to do any transformations. new_certificate = path + elif content: + with open(new_certificate, "w") as f: + f.write(content) + elif url: # Getting the X509 digest from a URL is the same as from a path, we just have # to download the cert first @@ -563,16 +611,20 @@ def main(): if alias_exists: # The certificate in the keystore does not match with the one we want to be present # The existing certificate must first be deleted before we insert the correct one - delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type, exit_after=False) + delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) if pkcs12_path: - import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias, - keystore_path, keystore_pass, cert_alias, keystore_type) + result = import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias, + keystore_path, keystore_pass, cert_alias, keystore_type) else: - import_cert_path(module, executable, new_certificate, keystore_path, - keystore_pass, cert_alias, keystore_type, trust_cacert) + result = import_cert_path(module, executable, new_certificate, keystore_path, + keystore_pass, cert_alias, keystore_type, trust_cacert) - module.exit_json(changed=False) + if os.path.exists(keystore_path): + changed_permissions = _update_permissions(module, keystore_path) + result['changed'] = result.get('changed', False) or changed_permissions + + module.exit_json(**result) if __name__ == "__main__": diff --git a/plugins/modules/system/java_keystore.py b/plugins/modules/java_keystore.py similarity index 83% rename from plugins/modules/system/java_keystore.py rename to plugins/modules/java_keystore.py index 772d3a69b1..6cb063e883 100644 --- a/plugins/modules/system/java_keystore.py +++ b/plugins/modules/java_keystore.py @@ -1,55 +1,55 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, quidame -# Copyright: (c) 2016, Guillaume Grossetie -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, quidame +# Copyright (c) 2016, Guillaume Grossetie +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: java_keystore short_description: Create a Java keystore in JKS format description: - Bundle a x509 certificate and its private key into a Java Keystore in JKS format. +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - Name of the certificate in the keystore. - - If the provided name does not exist in the keystore, the module - will re-create the keystore. This behavior changed in community.general 3.0.0, - before that the module would fail when the name did not match. + - If the provided name does not exist in the keystore, the module re-creates the keystore. This behavior changed in + community.general 3.0.0, before that the module would fail when the name did not match. type: str required: true certificate: description: - Content of the certificate used to create the keystore. - - If the fingerprint of the provided certificate does not match the - fingerprint of the certificate bundled in the keystore, the keystore - is regenerated with the provided certificate. - - Exactly one of I(certificate) or I(certificate_path) is required. + - If the fingerprint of the provided certificate does not match the fingerprint of the certificate bundled in the keystore, + the keystore is regenerated with the provided certificate. + - Exactly one of O(certificate) or O(certificate_path) is required. type: str certificate_path: description: - Location of the certificate used to create the keystore. - - If the fingerprint of the provided certificate does not match the - fingerprint of the certificate bundled in the keystore, the keystore - is regenerated with the provided certificate. - - Exactly one of I(certificate) or I(certificate_path) is required. + - If the fingerprint of the provided certificate does not match the fingerprint of the certificate bundled in the keystore, + the keystore is regenerated with the provided certificate. + - Exactly one of O(certificate) or O(certificate_path) is required. type: path version_added: '3.0.0' private_key: description: - Content of the private key used to create the keystore. - - Exactly one of I(private_key) or I(private_key_path) is required. + - Exactly one of O(private_key) or O(private_key_path) is required. type: str private_key_path: description: - Location of the private key used to create the keystore. - - Exactly one of I(private_key) or I(private_key_path) is required. + - Exactly one of O(private_key) or O(private_key_path) is required. type: path version_added: '3.0.0' private_key_passphrase: @@ -60,10 +60,8 @@ options: password: description: - Password that should be used to secure the keystore. - - If the provided password fails to unlock the keystore, the module - will re-create the keystore with the new passphrase. This behavior - changed in community.general 3.0.0, before that the module would fail - when the password did not match. + - If the provided password fails to unlock the keystore, the module re-creates the keystore with the new passphrase. + This behavior changed in community.general 3.0.0, before that the module would fail when the password did not match. type: str required: true dest: @@ -75,7 +73,7 @@ options: description: - Keystore is created even if it already exists. type: bool - default: 'no' + default: false owner: description: - Name of the user that should own jks file. @@ -100,56 +98,50 @@ options: keystore_type: description: - Type of the Java keystore. - - When this option is omitted and the keystore doesn't already exist, the - behavior follows C(keytool)'s default store type which depends on - Java version; C(pkcs12) since Java 9 and C(jks) prior (may also - be C(pkcs12) if new default has been backported to this version). - - When this option is omitted and the keystore already exists, the current - type is left untouched, unless another option leads to overwrite the - keystore (in that case, this option behaves like for keystore creation). - - When I(keystore_type) is set, the keystore is created with this type if - it doesn't already exist, or is overwritten to match the given type in - case of mismatch. + - When this option is omitted and the keystore does not already exist, the behavior follows C(keytool)'s default store + type which depends on Java version; V(pkcs12) since Java 9 and V(jks) prior (may also be V(pkcs12) if new default + has been backported to this version). + - When this option is omitted and the keystore already exists, the current type is left untouched, unless another option + leads to overwrite the keystore (in that case, this option behaves like for keystore creation). + - When O(keystore_type) is set, the keystore is created with this type if it does not already exist, or is overwritten + to match the given type in case of mismatch. type: str choices: - jks - pkcs12 version_added: 3.3.0 requirements: - - openssl in PATH (when I(ssl_backend=openssl)) + - openssl in PATH (when O(ssl_backend=openssl)) - keytool in PATH - - cryptography >= 3.0 (when I(ssl_backend=cryptography)) + - cryptography >= 3.0 (when O(ssl_backend=cryptography)) author: - Guillaume Grossetie (@Mogztter) - quidame (@quidame) extends_documentation_fragment: - - files + - ansible.builtin.files + - community.general.attributes seealso: - module: community.crypto.openssl_pkcs12 - module: community.general.java_cert notes: - - I(certificate) and I(private_key) require that their contents are available - on the controller (either inline in a playbook, or with the C(file) lookup), - while I(certificate_path) and I(private_key_path) require that the files are - available on the target host. - - By design, any change of a value of options I(keystore_type), I(name) or - I(password), as well as changes of key or certificate materials will cause - the existing I(dest) to be overwritten. -''' + - O(certificate) and O(private_key) require that their contents are available on the controller (either inline in a playbook, + or with the P(ansible.builtin.file#lookup) lookup), while O(certificate_path) and O(private_key_path) require that the + files are available on the target host. + - By design, any change of a value of options O(keystore_type), O(name) or O(password), as well as changes of key or certificate + materials causes the existing O(dest) to be overwritten. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a keystore for the given certificate/private key pair (inline) community.general.java_keystore: name: example certificate: | -----BEGIN CERTIFICATE----- - h19dUZ2co2fI/ibYiwxWk4aeNE6KWvCaTQOMQ8t6Uo2XKhpL/xnjoAgh1uCQN/69 - MG+34+RhUWzCfdZH7T8/qDxJw2kEPKluaYh7KnMsba+5jHjmtzix5QIDAQABo4IB + h19dUZ2co2f... -----END CERTIFICATE----- private_key: | -----BEGIN RSA PRIVATE KEY----- - DBVFTEVDVFJJQ0lURSBERSBGUkFOQ0UxFzAVBgNVBAsMDjAwMDIgNTUyMDgxMzE3 - GLlDNMw/uHyME7gHFsqJA7O11VY6O5WQ4IDP3m/s5ZV6s+Nn6Lerz17VZ99 + DBVFTEVDVFJ... -----END RSA PRIVATE KEY----- password: changeit dest: /etc/security/keystore.jks @@ -169,40 +161,27 @@ EXAMPLES = ''' private_key_path: /etc/ssl/private/ssl-cert-snakeoil.key password: changeit dest: /etc/security/keystore.jks -''' - -RETURN = ''' -msg: - description: Output from stdout of keytool/openssl command after execution of given command or an error. - returned: changed and failure - type: str - sample: "Unable to find the current certificate fingerprint in ..." +""" +RETURN = r""" err: description: Output from stderr of keytool/openssl command after error of given command. returned: failure type: str sample: "Keystore password is too short - must be at least 6 characters\n" -rc: - description: keytool/openssl command execution return value - returned: changed and failure - type: int - sample: "0" - cmd: - description: Executed command to get action done + description: Executed command to get action done. returned: changed and failure type: str sample: "/usr/bin/openssl x509 -noout -in /tmp/user/1000/tmp8jd_lh23 -fingerprint -sha256" -''' +""" import os import re import tempfile -from ansible.module_utils.six import PY2 from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_bytes, to_native @@ -280,7 +259,7 @@ class JavaKeystore: except (OSError, ValueError) as e: self.module.fail_json(msg="Unable to read the provided certificate: %s" % to_native(e)) - fp = hex_decode(cert.fingerprint(hashes.SHA256())).upper() + fp = cert.fingerprint(hashes.SHA256()).hex().upper() fingerprint = ':'.join([fp[i:i + 2] for i in range(0, len(fp), 2)]) else: current_certificate_fingerprint_cmd = [ @@ -465,7 +444,7 @@ class JavaKeystore: if self.keystore_type == 'pkcs12': # Preserve properties of the destination file, if any. - self.module.atomic_move(keystore_p12_path, self.keystore_path) + self.module.atomic_move(os.path.abspath(keystore_p12_path), os.path.abspath(self.keystore_path)) self.update_permissions() self.result['changed'] = True return self.result @@ -528,12 +507,6 @@ def create_file(content): return tmpfile -def hex_decode(s): - if PY2: - return s.decode('hex') - return s.hex() - - def main(): choose_between = (['certificate', 'certificate_path'], ['private_key', 'private_key_path']) diff --git a/plugins/modules/web_infrastructure/jboss.py b/plugins/modules/jboss.py similarity index 85% rename from plugins/modules/web_infrastructure/jboss.py rename to plugins/modules/jboss.py index 5512e10ee4..c26e0188a1 100644 --- a/plugins/modules/web_infrastructure/jboss.py +++ b/plugins/modules/jboss.py @@ -1,18 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, Jeroen Hoekx -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, Jeroen Hoekx +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: jboss short_description: Deploy applications to JBoss description: - Deploy applications to JBoss standalone using the filesystem. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: deployment: required: true @@ -22,8 +28,8 @@ options: src: description: - The remote path of the application ear or war to deploy. - - Required when I(state=present). - - Ignored when I(state=absent). + - Required when O(state=present). + - Ignored when O(state=absent). type: path deploy_path: default: /var/lib/jbossas/standalone/deployments @@ -31,23 +37,23 @@ options: - The location in the filesystem where the deployment scanner listens. type: path state: - choices: [ present, absent ] + choices: [present, absent] default: "present" description: - Whether the application should be deployed or undeployed. type: str notes: - - The JBoss standalone deployment-scanner has to be enabled in standalone.xml - - The module can wait until I(deployment) file is deployed/undeployed by deployment-scanner. - Duration of waiting time depends on scan-interval parameter from standalone.xml. - - Ensure no identically named application is deployed through the JBoss CLI + - The JBoss standalone deployment-scanner has to be enabled in C(standalone.xml). + - The module can wait until O(deployment) file is deployed/undeployed by deployment-scanner. Duration of waiting time depends + on scan-interval parameter from C(standalone.xml). + - Ensure no identically named application is deployed through the JBoss CLI. seealso: -- name: WildFly reference - description: Complete reference of the WildFly documentation. - link: https://docs.wildfly.org + - name: WildFly reference + description: Complete reference of the WildFly documentation. + link: https://docs.wildfly.org author: - Jeroen Hoekx (@jhoekx) -''' +""" EXAMPLES = r""" - name: Deploy a hello world application to the default deploy_path @@ -72,7 +78,6 @@ EXAMPLES = r""" RETURN = r""" # """ import os -import shutil import time from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/web_infrastructure/jenkins_build.py b/plugins/modules/jenkins_build.py similarity index 76% rename from plugins/modules/web_infrastructure/jenkins_build.py rename to plugins/modules/jenkins_build.py index 0141185342..a088ce7dae 100644 --- a/plugins/modules/web_infrastructure/jenkins_build.py +++ b/plugins/modules/jenkins_build.py @@ -1,24 +1,30 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: (c) Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: jenkins_build short_description: Manage jenkins builds version_added: 2.2.0 description: - - Manage Jenkins builds with Jenkins REST API. + - Manage Jenkins builds with Jenkins REST API. requirements: - "python-jenkins >= 0.4.12" author: - Brett Milford (@brettmilford) - Tong He (@unnecessary-username) + - Juan Casanova (@juanmcasanova) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: args: description: @@ -40,7 +46,7 @@ options: state: description: - Attribute that specifies if the build is to be created, deleted or stopped. - - The C(stopped) state has been added in community.general 3.3.0. + - The V(stopped) state has been added in community.general 3.3.0. default: present choices: ['present', 'absent', 'stopped'] type: str @@ -55,11 +61,24 @@ options: type: str user: description: - - User to authenticate with the Jenkins server. + - User to authenticate with the Jenkins server. type: str -''' + detach: + description: + - Enable detached mode to not wait for the build end. + default: false + type: bool + version_added: 7.4.0 + time_between_checks: + description: + - Time in seconds to wait between requests to the Jenkins server. + - This times must be higher than the configured quiet time for the job. + default: 10 + type: int + version_added: 7.4.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a jenkins build using basic authentication community.general.jenkins_build: name: "test-check" @@ -78,6 +97,16 @@ EXAMPLES = ''' state: stopped url: http://localhost:8080 +- name: Trigger Jenkins build in detached mode + community.general.jenkins_build: + name: "detached-build" + state: present + user: admin + token: abcdefghijklmnopqrstuvwxyz123456 + url: http://localhost:8080 + detach: true + time_between_checks: 20 + - name: Delete a jenkins build using token authentication community.general.jenkins_build: name: "delete-experiment" @@ -86,10 +115,9 @@ EXAMPLES = ''' user: Jenkins token: abcdefghijklmnopqrstuvwxyz123456 url: http://localhost:8080 -''' +""" -RETURN = ''' ---- +RETURN = r""" name: description: Name of the jenkins job. returned: success @@ -106,7 +134,7 @@ user: type: str sample: admin url: - description: Url to connect to the Jenkins server. + description: URL to connect to the Jenkins server. returned: success type: str sample: https://jenkins.mydomain.com @@ -114,7 +142,7 @@ build_info: description: Build info of the jenkins job. returned: success type: dict -''' +""" import traceback from time import sleep @@ -144,6 +172,8 @@ class JenkinsBuild: self.user = module.params.get('user') self.jenkins_url = module.params.get('url') self.build_number = module.params.get('build_number') + self.detach = module.params.get('detach') + self.time_between_checks = module.params.get('time_between_checks') self.server = self.get_jenkins_connection() self.result = { @@ -158,11 +188,11 @@ class JenkinsBuild: def get_jenkins_connection(self): try: - if (self.user and self.password): + if self.user and self.password: return jenkins.Jenkins(self.jenkins_url, self.user, self.password) - elif (self.user and self.token): + elif self.user and self.token: return jenkins.Jenkins(self.jenkins_url, self.user, self.token) - elif (self.user and not (self.password or self.token)): + elif self.user and not (self.password or self.token): return jenkins.Jenkins(self.jenkins_url, self.user) else: return jenkins.Jenkins(self.jenkins_url) @@ -182,7 +212,10 @@ class JenkinsBuild: try: response = self.server.get_build_info(self.name, self.build_number) return response - + except jenkins.JenkinsException as e: + response = {} + response["result"] = "ABSENT" + return response except Exception as e: self.module.fail_json(msg='Unable to fetch build information, %s' % to_native(e), exception=traceback.format_exc()) @@ -224,13 +257,23 @@ class JenkinsBuild: build_status = self.get_build_status() if build_status['result'] is None: - sleep(10) + # If detached mode is active mark as success, we wouldn't be able to get here if it didn't exist + if self.detach: + result['changed'] = True + result['build_info'] = build_status + + return result + + sleep(self.time_between_checks) self.get_result() else: if self.state == "stopped" and build_status['result'] == "ABORTED": result['changed'] = True result['build_info'] = build_status - elif build_status['result'] == "SUCCESS": + elif self.state == "absent" and build_status['result'] == "ABSENT": + result['changed'] = True + result['build_info'] = build_status + elif self.state != "absent" and build_status['result'] == "SUCCESS": result['changed'] = True result['build_info'] = build_status else: @@ -259,6 +302,8 @@ def main(): token=dict(no_log=True), url=dict(default="http://localhost:8080"), user=dict(), + detach=dict(type='bool', default=False), + time_between_checks=dict(type='int', default=10), ), mutually_exclusive=[['password', 'token']], required_if=[['state', 'absent', ['build_number'], True], ['state', 'stopped', ['build_number'], True]], @@ -274,7 +319,7 @@ def main(): else: jenkins_build.absent_build() - sleep(10) + sleep(jenkins_build.time_between_checks) result = jenkins_build.get_result() module.exit_json(**result) diff --git a/plugins/modules/jenkins_build_info.py b/plugins/modules/jenkins_build_info.py new file mode 100644 index 0000000000..1e032af423 --- /dev/null +++ b/plugins/modules/jenkins_build_info.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: jenkins_build_info +short_description: Get information about Jenkins builds +version_added: 7.4.0 +description: + - Get information about Jenkins builds with Jenkins REST API. +requirements: + - "python-jenkins >= 0.4.12" +author: + - Juan Casanova (@juanmcasanova) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + name: + description: + - Name of the Jenkins job to which the build belongs. + required: true + type: str + build_number: + description: + - An integer which specifies a build of a job. + - If not specified the last build information is returned. + type: int + password: + description: + - Password to authenticate with the Jenkins server. + type: str + token: + description: + - API token used to authenticate with the Jenkins server. + type: str + url: + description: + - URL of the Jenkins server. + default: http://localhost:8080 + type: str + user: + description: + - User to authenticate with the Jenkins server. + type: str +""" + +EXAMPLES = r""" +- name: Get information about a jenkins build using basic authentication + community.general.jenkins_build_info: + name: "test-check" + build_number: 1 + user: admin + password: asdfg + url: http://localhost:8080 + +- name: Get information about a jenkins build anonymously + community.general.jenkins_build_info: + name: "stop-check" + build_number: 3 + url: http://localhost:8080 + +- name: Get information about a jenkins build using token authentication + community.general.jenkins_build_info: + name: "delete-experiment" + build_number: 30 + user: Jenkins + token: abcdefghijklmnopqrstuvwxyz123456 + url: http://localhost:8080 +""" + +RETURN = r""" +name: + description: Name of the jenkins job. + returned: success + type: str + sample: "test-job" +state: + description: State of the jenkins job. + returned: success + type: str + sample: present +user: + description: User used for authentication. + returned: success + type: str + sample: admin +url: + description: URL to connect to the Jenkins server. + returned: success + type: str + sample: https://jenkins.mydomain.com +build_info: + description: Build info of the jenkins job. + returned: success + type: dict +""" + +import traceback + +JENKINS_IMP_ERR = None +try: + import jenkins + python_jenkins_installed = True +except ImportError: + JENKINS_IMP_ERR = traceback.format_exc() + python_jenkins_installed = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + + +class JenkinsBuildInfo: + + def __init__(self, module): + self.module = module + + self.name = module.params.get('name') + self.password = module.params.get('password') + self.token = module.params.get('token') + self.user = module.params.get('user') + self.jenkins_url = module.params.get('url') + self.build_number = module.params.get('build_number') + self.server = self.get_jenkins_connection() + + self.result = { + 'changed': False, + 'url': self.jenkins_url, + 'name': self.name, + 'user': self.user, + } + + def get_jenkins_connection(self): + try: + if self.user and self.password: + return jenkins.Jenkins(self.jenkins_url, self.user, self.password) + elif self.user and self.token: + return jenkins.Jenkins(self.jenkins_url, self.user, self.token) + elif self.user and not (self.password or self.token): + return jenkins.Jenkins(self.jenkins_url, self.user) + else: + return jenkins.Jenkins(self.jenkins_url) + except Exception as e: + self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e)) + + def get_build_status(self): + try: + if self.build_number is None: + job_info = self.server.get_job_info(self.name) + self.build_number = job_info['lastBuild']['number'] + + return self.server.get_build_info(self.name, self.build_number) + except jenkins.JenkinsException as e: + response = {} + response["result"] = "ABSENT" + return response + except Exception as e: + self.module.fail_json(msg='Unable to fetch build information, %s' % to_native(e), + exception=traceback.format_exc()) + + def get_result(self): + result = self.result + build_status = self.get_build_status() + + if build_status['result'] == "ABSENT": + result['failed'] = True + result['build_info'] = build_status + + return result + + +def test_dependencies(module): + if not python_jenkins_installed: + module.fail_json( + msg=missing_required_lib("python-jenkins", + url="https://python-jenkins.readthedocs.io/en/latest/install.html"), + exception=JENKINS_IMP_ERR) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + build_number=dict(type='int'), + name=dict(required=True), + password=dict(no_log=True), + token=dict(no_log=True), + url=dict(default="http://localhost:8080"), + user=dict(), + ), + mutually_exclusive=[['password', 'token']], + supports_check_mode=True, + ) + + test_dependencies(module) + jenkins_build_info = JenkinsBuildInfo(module) + + result = jenkins_build_info.get_result() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/jenkins_credential.py b/plugins/modules/jenkins_credential.py new file mode 100644 index 0000000000..b40c3546ea --- /dev/null +++ b/plugins/modules/jenkins_credential.py @@ -0,0 +1,861 @@ +#!/usr/bin/python +# +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: jenkins_credential +short_description: Manage Jenkins credentials and domains through API +version_added: 11.1.0 +description: + - This module allows managing Jenkins credentials and domain scopes through the Jenkins HTTP API. + - Create, update, and delete different credential types such as C(username/password), C(secret text), C(SSH key), C(certificates), + C(GitHub App), and domains. + - For scoped domains (O(type=scope)), it supports restrictions based on V(hostname), V(hostname:port), V(path), and V(scheme). +requirements: + - urllib3 >= 1.26.0 +author: + - Youssef Ali (@YoussefKhalidAli) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + id: + description: + - The ID of the Jenkins credential or domain. + type: str + type: + description: + - Type of the credential or action. + choices: + - user_and_pass + - file + - text + - github_app + - ssh_key + - certificate + - scope + - token + type: str + state: + description: + - The state of the credential. + choices: + - present + - absent + default: present + type: str + scope: + description: + - Jenkins credential domain scope. + - Deleting a domain scope deletes all credentials within it. + type: str + default: '_' + force: + description: + - Force update if the credential already exists, used with O(state=present). + - If set to V(true), it deletes the existing credential before creating a new one. + - Always returns RV(ignore:changed=true). + type: bool + default: false + url: + description: + - Jenkins server URL. + type: str + default: http://localhost:8080 + jenkins_user: + description: + - Jenkins user for authentication. + required: true + type: str + jenkins_password: + description: + - Jenkins password for token creation. Required if O(type=token). + type: str + token: + description: + - Jenkins API token. Required unless O(type=token). + type: str + description: + description: + - Description of the credential or domain. + default: '' + type: str + location: + description: + - Location of the credential. Either V(system) or V(folder). + - If O(location=folder) then O(url) must be set to V(/job/). + choices: + - system + - folder + default: 'system' + type: str + name: + description: + - Name of the token to generate. Required if O(type=token). + - When generating a new token, do not pass O(id). It is generated automatically. + - Creating two tokens with the same name generates two distinct tokens with different RV(token_uuid) values. + - Replacing a token with another one of the same name requires deleting the original first using O(force=True). + type: str + username: + description: + - Username for credentials types that require it (for example O(type=ssh_key) or O(type=user_and_pass)). + type: str + password: + description: + - Password for credentials types that require it (for example O(type=user_and_passs) or O(type=certificate)). + type: str + secret: + description: + - Secret text (used when O(type=text)). + type: str + appID: + description: + - GitHub App ID. + type: str + api_uri: + description: + - Link to Github API. + default: 'https://api.github.com' + type: str + owner: + description: + - GitHub App owner. + type: str + file_path: + description: + - File path to secret file (for example O(type=file) or O(type=certificate)). + - For O(type=certificate), this can be a V(.p12) or V(.pem) file. + type: path + private_key_path: + description: + - Path to private key file for PEM certificates or GitHub Apps. + type: path + passphrase: + description: + - SSH passphrase if needed. + type: str + inc_hostname: + description: + - List of hostnames to include in scope. + type: list + elements: str + exc_hostname: + description: + - List of hostnames to exclude from scope. + - If a hostname appears in both this list and O(inc_hostname), the hostname is excluded. + type: list + elements: str + inc_hostname_port: + description: + - List of V(host:port) to include in scope. + type: list + elements: str + exc_hostname_port: + description: + - List of host:port to exclude from scope. + - If a hostname and port appears in both this list and O(inc_hostname_port), it is excluded. + type: list + elements: str + inc_path: + description: + - List of URL paths to include when matching credentials to domains. + - 'B(Matching is hierarchical): subpaths of excluded paths are also excluded, even if explicitly included.' + type: list + elements: str + exc_path: + description: + - List of URL paths to exclude. + - If a path is also matched by O(exc_path), it is excluded. + - If you exclude a subpath of a path previously included, that subpath alone is excluded. + type: list + elements: str + schemes: + description: + - List of schemes (for example V(http) or V(https)) to match. + type: list + elements: str +""" + +EXAMPLES = r""" +- name: Generate token + community.general.jenkins_credential: + id: "test-token" + jenkins_user: "admin" + jenkins_password: "password" + type: "token" + register: token_result + +- name: Add CUSTOM scope credential + community.general.jenkins_credential: + id: "CUSTOM" + type: "scope" + jenkins_user: "admin" + token: "{{ token }}" + description: "Custom scope credential" + inc_path: + - "include/path" + - "include/path2" + exc_path: + - "exclude/path" + - "exclude/path2" + inc_hostname: + - "included-hostname" + - "included-hostname2" + exc_hostname: + - "excluded-hostname" + - "excluded-hostname2" + schemes: + - "http" + - "https" + inc_hostname_port: + - "included-hostname:7000" + - "included-hostname2:7000" + exc_hostname_port: + - "excluded-hostname:7000" + - "excluded-hostname2:7000" + +- name: Add user_and_pass credential + community.general.jenkins_credential: + id: "userpass-id" + type: "user_and_pass" + jenkins_user: "admin" + token: "{{ token }}" + description: "User and password credential" + username: "user1" + password: "pass1" + +- name: Add file credential to custom scope + community.general.jenkins_credential: + id: "file-id" + type: "file" + jenkins_user: "admin" + token: "{{ token }}" + scope: "CUSTOM" + description: "File credential" + file_path: "../vars/my-secret.pem" + +- name: Add text credential to folder + community.general.jenkins_credential: + id: "text-id" + type: "text" + jenkins_user: "admin" + token: "{{ token }}" + description: "Text credential" + secret: "mysecrettext" + location: "folder" + url: "http://localhost:8080/job/test" + +- name: Add githubApp credential + community.general.jenkins_credential: + id: "githubapp-id" + type: "github_app" + jenkins_user: "admin" + token: "{{ token }}" + description: "GitHub app credential" + appID: "12345" + file_path: "../vars/github.pem" + owner: "github_owner" + +- name: Add sshKey credential + community.general.jenkins_credential: + id: "sshkey-id" + type: "ssh_key" + jenkins_user: "admin" + token: "{{ token }}" + description: "SSH key credential" + username: "sshuser" + file_path: "../vars/ssh_key" + passphrase: 1234 + +- name: Add certificate credential (p12) + community.general.jenkins_credential: + id: "certificate-id" + type: "certificate" + jenkins_user: "admin" + token: "{{ token }}" + description: "Certificate credential" + password: "12345678901234" + file_path: "../vars/certificate.p12" + +- name: Add certificate credential (pem) + community.general.jenkins_credential: + id: "certificate-id-pem" + type: "certificate" + jenkins_user: "admin" + token: "{{ token }}" + description: "Certificate credential (pem)" + file_path: "../vars/cert.pem" + private_key_path: "../vars/private.key" +""" +RETURN = r""" +details: + description: Return more details in case of errors. + type: str + returned: failed +token: + description: + - The generated API token if O(type=token). + - This is needed to authenticate API calls later. + - This should be stored securely, as it is the only time it is returned. + type: str + returned: success +token_uuid: + description: + - The generated ID of the token. + - You pass this value back to the module as O(id) to edit or revoke the token later. + - This should be stored securely, as it is the only time it is returned. + type: str + returned: success +""" + +from urllib.parse import urlencode +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url, basic_auth_header +from ansible_collections.community.general.plugins.module_utils import deps + +import json +import os +import base64 + +with deps.declare("urllib3", reason="urllib3 is required to embed files into requests"): + import urllib3 + + +# Function to validate file paths exist on disk +def validate_file_exist(module, path): + + if path and not os.path.exists(path): + module.fail_json(msg="File not found: {}".format(path)) + + +# Gets the Jenkins crumb for CSRF protection which is required for API calls +def get_jenkins_crumb(module, headers): + type = module.params["type"] + url = module.params["url"] + + if "/job" in url: + url = url.split("/job")[0] + + crumb_url = "{}/crumbIssuer/api/json".format(url) + + response, info = fetch_url(module, crumb_url, headers=headers) + + if info["status"] != 200: + module.fail_json(msg="Failed to fetch Jenkins crumb. Confirm token is real.") + + # Cookie is needed to generate API token + cookie = info.get("set-cookie", "") + session_cookie = cookie.split(";")[0] if cookie else None + + try: + data = response.read() + json_data = json.loads(data) + crumb_request_field = json_data["crumbRequestField"] + crumb = json_data["crumb"] + headers[crumb_request_field] = crumb # Set the crumb in headers + headers["Content-Type"] = ( + "application/x-www-form-urlencoded" # Set Content-Type for form data + ) + if type == "token": + headers["Cookie"] = ( + session_cookie # Set session cookie for token operations + ) + return crumb_request_field, crumb, session_cookie # Return for test purposes + + except Exception: + return None + + +# Function to clean the data sent via API by removing unwanted keys and None values +def clean_data(data): + # Keys to remove (including those with None values) + keys_to_remove = { + "url", + "token", + "jenkins_user", + "jenkins_password", + "file_path", + "private_key_path", + "type", + "state", + "force", + "name", + "scope", + "location", + "api_uri", + } + + # Filter out None values and unwanted keys + cleaned_data = { + key: value + for key, value in data.items() + if value is not None and key not in keys_to_remove + } + + return cleaned_data + + +# Function to check if credentials/domain exists +def target_exists(module, check_domain=False): + url = module.params["url"] + location = module.params["location"] + scope = module.params["scope"] + name = module.params["id"] + user = module.params["jenkins_user"] + token = module.params["token"] + + headers = {"Authorization": basic_auth_header(user, token)} + + if module.params["type"] == "scope" or check_domain: + target_url = "{}/credentials/store/{}/domain/{}/api/json".format( + url, location, scope if check_domain else name + ) + elif module.params["type"] == "token": + return False # Can't check token + else: + target_url = "{}/credentials/store/{}/domain/{}/credential/{}/api/json".format( + url, location, scope, name + ) + + response, info = fetch_url(module, target_url, headers=headers) + status = info.get("status", 0) + + if status == 200: + return True + elif status == 404: + return False + else: + module.fail_json( + msg="Unexpected status code {} when checking {} existence.".format( + status, name + ) + ) + + +# Function to delete the scope or credential provided +def delete_target(module, headers): + user = module.params["jenkins_user"] + type = module.params["type"] + url = module.params["url"] + location = module.params["location"] + id = module.params["id"] + scope = module.params["scope"] + + body = False + + try: + + if type == "token": + delete_url = "{}/user/{}/descriptorByName/jenkins.security.ApiTokenProperty/revoke".format( + url, user + ) + body = urlencode({"tokenUuid": id}) + + elif type == "scope": + delete_url = "{}/credentials/store/{}/domain/{}/doDelete".format( + url, location, id + ) + + else: + delete_url = ( + "{}/credentials/store/{}/domain/{}/credential/{}/doDelete".format( + url, location, scope, id + ) + ) + + response, info = fetch_url( + module, + delete_url, + headers=headers, + data=body if body else None, + method="POST", + ) + + status = info.get("status", 0) + if not status == 200: + module.fail_json( + msg="Failed to delete: HTTP {}, {}, {}".format( + status, response, headers + ) + ) + + except Exception as e: + module.fail_json(msg="Exception during delete: {}".format(str(e))) + + +# Function to read the private key for types texts and ssh_key +def read_privateKey(module): + try: + with open(module.params["private_key_path"], "r") as f: + private_key = f.read().strip() + return private_key + except Exception as e: + module.fail_json(msg="Failed to read private key file: {}".format(str(e))) + + +# Function to builds multipart form-data body and content-type header for file credential upload. +# Returns: +# body (bytes): Encoded multipart data +# content_type (str): Content-Type header including boundary +def embed_file_into_body(module, file_path, credentials): + + filename = os.path.basename(file_path) + + try: + with open(file_path, "rb") as f: + file_bytes = f.read() + except Exception as e: + module.fail_json(msg="Failed to read file: {}".format(str(e))) + return "", "" # Return for test purposes + + credentials.update( + { + "file": "file0", + "fileName": filename, + } + ) + + payload = {"credentials": credentials} + + fields = {"file0": (filename, file_bytes), "json": json.dumps(payload)} + + body, content_type = urllib3.encode_multipart_formdata(fields) + return body, content_type + + +# Main function to run the Ansible module +def run_module(): + + module = AnsibleModule( + argument_spec=dict( + id=dict(type="str"), + type=dict( + type="str", + choices=[ + "user_and_pass", + "file", + "text", + "github_app", + "ssh_key", + "certificate", + "scope", + "token", + ], + ), + state=dict(type="str", default="present", choices=["present", "absent"]), + force=dict(type="bool", default=False), + scope=dict(type="str", default="_"), + url=dict(type="str", default="http://localhost:8080"), + jenkins_user=dict(type="str", required=True), + jenkins_password=dict(type="str", no_log=True), + token=dict(type="str", no_log=True), + description=dict(type="str", default=""), + location=dict(type="str", default="system", choices=["system", "folder"]), + name=dict(type="str"), + username=dict(type="str"), + password=dict(type="str", no_log=True), + file_path=dict(type="path"), + secret=dict(type="str", no_log=True), + appID=dict(type="str"), + api_uri=dict(type="str", default="https://api.github.com"), + owner=dict(type="str"), + passphrase=dict(type="str", no_log=True), + private_key_path=dict(type="path", no_log=True), + # Scope specifications parameters + inc_hostname=dict(type="list", elements="str"), + exc_hostname=dict(type="list", elements="str"), + inc_hostname_port=dict(type="list", elements="str"), + exc_hostname_port=dict(type="list", elements="str"), + inc_path=dict(type="list", elements="str"), + exc_path=dict(type="list", elements="str"), + schemes=dict(type="list", elements="str"), + ), + supports_check_mode=True, + required_if=[ + ("state", "present", ["type"]), + ("state", "absent", ["id"]), + ("type", "token", ["name", "jenkins_password"]), + ("type", "user_and_pass", ["username", "password", "id", "token"]), + ("type", "file", ["file_path", "id", "token"]), + ("type", "text", ["secret", "id", "token"]), + ("type", "github_app", ["appID", "private_key_path", "id", "token"]), + ("type", "ssh_key", ["username", "private_key_path", "id", "token"]), + ("type", "certificate", ["file_path", "id", "token"]), + ("type", "scope", ["id", "token"]), + ], + ) + + # Parameters + id = module.params["id"] + type = module.params["type"] + state = module.params["state"] + force = module.params["force"] + scope = module.params["scope"] + url = module.params["url"] + jenkins_user = module.params["jenkins_user"] + jenkins_password = module.params["jenkins_password"] + name = module.params["name"] + token = module.params["token"] + description = module.params["description"] + location = module.params["location"] + filePath = module.params["file_path"] + private_key_path = module.params["private_key_path"] + api_uri = module.params["api_uri"] + inc_hostname = module.params["inc_hostname"] + exc_hostname = module.params["exc_hostname"] + inc_hostname_port = module.params["inc_hostname_port"] + exc_hostname_port = module.params["exc_hostname_port"] + inc_path = module.params["inc_path"] + exc_path = module.params["exc_path"] + schemes = module.params["schemes"] + + deps.validate(module) + + headers = { + "Authorization": basic_auth_header(jenkins_user, token or jenkins_password), + } + + # Get the crumb for CSRF protection + get_jenkins_crumb(module, headers) + + result = dict( + changed=False, + msg="", + ) + + credentials = clean_data(module.params) + + does_exist = target_exists(module) + + # Check if the credential/domain doesn't exist and the user wants to delete + if not does_exist and state == "absent" and not type == "token": + result["changed"] = False + result["msg"] = "{} does not exist.".format(id) + module.exit_json(**result) + + if state == "present": + + # If updating, we need to delete the existing credential/domain first based on force parameter + if force and (does_exist or type == "token"): + delete_target(module, headers) + elif does_exist and not force: + result["changed"] = False + result["msg"] = "{} already exists. Use force=True to update.".format(id) + module.exit_json(**result) + + if type == "token": + + post_url = "{}/user/{}/descriptorByName/jenkins.security.ApiTokenProperty/generateNewToken".format( + url, jenkins_user + ) + + body = "newTokenName={}".format(name) + + elif type == "scope": + + post_url = "{}/credentials/store/{}/createDomain".format(url, location) + + specifications = [] + + # Create a domain in Jenkins + if inc_hostname or exc_hostname: + specifications.append( + { + "stapler-class": "com.cloudbees.plugins.credentials.domains.HostnameSpecification", + "includes": ",".join(inc_hostname), + "excludes": ",".join(exc_hostname), + } + ) + + if inc_hostname_port or exc_hostname_port: + specifications.append( + { + "stapler-class": "com.cloudbees.plugins.credentials.domains.HostnamePortSpecification", + "includes": ",".join(inc_hostname_port), + "excludes": ",".join(exc_hostname_port), + } + ) + + if schemes: + specifications.append( + { + "stapler-class": "com.cloudbees.plugins.credentials.domains.SchemeSpecification", + "schemes": ",".join(schemes), + }, + ) + + if inc_path or exc_path: + specifications.append( + { + "stapler-class": "com.cloudbees.plugins.credentials.domains.PathSpecification", + "includes": ",".join(inc_path), + "excludes": ",".join(exc_path), + } + ) + + payload = { + "name": id, + "description": description, + "specifications": specifications, + } + + else: + if filePath: + validate_file_exist(module, filePath) + elif private_key_path: + validate_file_exist(module, private_key_path) + + post_url = "{}/credentials/store/{}/domain/{}/createCredentials".format( + url, location, scope + ) + + cred_class = { + "user_and_pass": "com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl", + "file": "org.jenkinsci.plugins.plaincredentials.impl.FileCredentialsImpl", + "text": "org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl", + "github_app": "org.jenkinsci.plugins.github_branch_source.GitHubAppCredentials", + "ssh_key": "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey", + "certificate": "com.cloudbees.plugins.credentials.impl.CertificateCredentialsImpl", + } + credentials.update({"$class": cred_class[type]}) + + if type == "file": + + # Build multipart body and content-type + body, content_type = embed_file_into_body(module, filePath, credentials) + headers["Content-Type"] = content_type + + elif type == "github_app": + + private_key = read_privateKey(module) + + credentials.update( + { + "privateKey": private_key, + "apiUri": api_uri, + } + ) + + elif type == "ssh_key": + + private_key = read_privateKey(module) + + credentials.update( + { + "privateKeySource": { + "stapler-class": "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey$DirectEntryPrivateKeySource", + "privateKey": private_key, + }, + } + ) + + elif type == "certificate": + + name, ext = os.path.splitext(filePath) + + if ext.lower() in [".p12", ".pfx"]: + try: + with open(filePath, "rb") as f: + file_content = f.read() + uploaded_keystore = base64.b64encode(file_content).decode( + "utf-8" + ) + except Exception as e: + module.fail_json( + msg="Failed to read or encode keystore file: {}".format( + str(e) + ) + ) + + credentials.update( + { + "keyStoreSource": { + "$class": "com.cloudbees.plugins.credentials.impl.CertificateCredentialsImpl$UploadedKeyStoreSource", + "uploadedKeystore": uploaded_keystore, + }, + } + ) + + elif ext.lower() in [".pem", ".crt"]: # PEM mode + try: + with open(filePath, "r") as f: + cert_chain = f.read() + with open(private_key_path, "r") as f: + private_key = f.read() + except Exception as e: + module.fail_json( + msg="Failed to read PEM files: {}".format(str(e)) + ) + + credentials.update( + { + "keyStoreSource": { + "$class": "com.cloudbees.plugins.credentials.impl.CertificateCredentialsImpl$PEMEntryKeyStoreSource", + "certChain": cert_chain, + "privateKey": private_key, + }, + } + ) + + else: + module.fail_json( + msg="Unsupported certificate file type. Only .p12, .pfx, .pem or .crt are supported." + ) + + payload = {"credentials": credentials} + + if not type == "file" and not type == "token": + body = urlencode({"json": json.dumps(payload)}) + + else: # Delete + + delete_target(module, headers) + + module.exit_json(changed=True, msg="{} deleted successfully.".format(id)) + + if ( + not type == "scope" and not scope == "_" + ): # Check if custom scope exists if adding to a custom scope + if not target_exists(module, True): + module.fail_json(msg="Domain {} doesn't exists".format(scope)) + + try: + response, info = fetch_url( + module, post_url, headers=headers, data=body, method="POST" + ) + except Exception as e: + module.fail_json(msg="Request to {} failed: {}".format(post_url, str(e))) + + status = info.get("status", 0) + + if not status == 200: + body = response.read() if response else b"" + module.fail_json( + msg="Failed to {} credential".format( + "add/update" if state == "present" else "delete" + ), + details=body.decode("utf-8", errors="ignore"), + ) + + if type == "token": + response_data = json.loads(response.read()) + result["token"] = response_data["data"]["tokenValue"] + result["token_uuid"] = response_data["data"]["tokenUuid"] + + result["changed"] = True + result["msg"] = response.read().decode("utf-8") + + module.exit_json(**result) + + +if __name__ == "__main__": + run_module() diff --git a/plugins/modules/web_infrastructure/jenkins_job.py b/plugins/modules/jenkins_job.py similarity index 87% rename from plugins/modules/web_infrastructure/jenkins_job.py rename to plugins/modules/jenkins_job.py index 88a8766133..ec8941ea93 100644 --- a/plugins/modules/web_infrastructure/jenkins_job.py +++ b/plugins/modules/jenkins_job.py @@ -1,35 +1,40 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: (c) Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: jenkins_job short_description: Manage jenkins jobs description: - - Manage Jenkins jobs by using Jenkins REST API. + - Manage Jenkins jobs by using Jenkins REST API. requirements: - "python-jenkins >= 0.4.12" author: "Sergio Millan Rodriguez (@sermilrod)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full options: config: type: str description: - - config in XML format. + - Config in XML format. - Required if job does not yet exist. - - Mutually exclusive with C(enabled). - - Considered if C(state=present). + - Mutually exclusive with O(enabled). + - Considered if O(state=present). required: false enabled: description: - Whether the job should be enabled or disabled. - - Mutually exclusive with C(config). - - Considered if C(state=present). + - Mutually exclusive with O(config). + - Considered if O(state=present). type: bool required: false name: @@ -63,20 +68,19 @@ options: user: type: str description: - - User to authenticate with the Jenkins server. + - User to authenticate with the Jenkins server. required: false validate_certs: type: bool - default: yes + default: true description: - - If set to C(no), the SSL certificates will not be validated. - This should only set to C(no) used on personally controlled sites - using self-signed certificates as it avoids verifying the source site. - - The C(python-jenkins) library only handles this by using the environment variable C(PYTHONHTTPSVERIFY). + - If set to V(false), the SSL certificates are not validated. This should only set to V(false) used on personally controlled + sites using self-signed certificates as it avoids verifying the source site. + - The C(python-jenkins) library only handles this by using the environment variable E(PYTHONHTTPSVERIFY). version_added: 2.3.0 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a jenkins job using basic authentication community.general.jenkins_job: config: "{{ lookup('file', 'templates/test.xml') }}" @@ -113,7 +117,7 @@ EXAMPLES = ''' community.general.jenkins_job: name: test password: admin - enabled: False + enabled: false url: http://localhost:8080 user: admin @@ -121,13 +125,12 @@ EXAMPLES = ''' community.general.jenkins_job: name: test token: asdfasfasfasdfasdfadfasfasdfasdfc - enabled: False + enabled: false url: http://localhost:8080 user: admin -''' +""" -RETURN = ''' ---- +RETURN = r""" name: description: Name of the jenkins job. returned: success @@ -149,11 +152,11 @@ user: type: str sample: admin url: - description: Url to connect to the Jenkins server. + description: URL to connect to the Jenkins server. returned: success type: str sample: https://jenkins.mydomain.com -''' +""" import os import traceback @@ -345,14 +348,14 @@ def job_config_to_string(xml_str): def main(): module = AnsibleModule( argument_spec=dict( - config=dict(type='str', required=False), + config=dict(type='str'), name=dict(type='str', required=True), - password=dict(type='str', required=False, no_log=True), - state=dict(type='str', required=False, choices=['present', 'absent'], default="present"), - enabled=dict(required=False, type='bool'), - token=dict(type='str', required=False, no_log=True), - url=dict(type='str', required=False, default="http://localhost:8080"), - user=dict(type='str', required=False), + password=dict(type='str', no_log=True), + state=dict(type='str', choices=['present', 'absent'], default="present"), + enabled=dict(type='bool'), + token=dict(type='str', no_log=True), + url=dict(type='str', default="http://localhost:8080"), + user=dict(type='str'), validate_certs=dict(type='bool', default=True), ), mutually_exclusive=[ diff --git a/plugins/modules/web_infrastructure/jenkins_job_info.py b/plugins/modules/jenkins_job_info.py similarity index 84% rename from plugins/modules/web_infrastructure/jenkins_job_info.py rename to plugins/modules/jenkins_job_info.py index 503fbbf159..7c3feafee2 100644 --- a/plugins/modules/web_infrastructure/jenkins_job_info.py +++ b/plugins/modules/jenkins_job_info.py @@ -1,22 +1,22 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: (c) Ansible Project +# Copyright (c) Ansible Project # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: jenkins_job_info short_description: Get information about Jenkins jobs description: - This module can be used to query information about which Jenkins jobs which already exists. - - This module was called C(jenkins_job_info) before Ansible 2.9. The usage did not change. requirements: - "python-jenkins >= 0.4.12" +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module options: name: type: str @@ -34,12 +34,12 @@ options: type: str description: - Password to authenticate with the Jenkins server. - - This is mutually exclusive with I(token). + - This is mutually exclusive with O(token). token: type: str description: - API token used to authenticate with the Jenkins server. - - This is mutually exclusive with I(password). + - This is mutually exclusive with O(password). url: type: str description: @@ -48,18 +48,18 @@ options: user: type: str description: - - User to authenticate with the Jenkins server. + - User to authenticate with the Jenkins server. validate_certs: description: - - If set to C(False), the SSL certificates will not be validated. - - This should only set to C(False) used on personally controlled sites using self-signed certificates. + - If set to V(false), the SSL certificates are not validated. + - This should only set to V(false) used on personally controlled sites using self-signed certificates. default: true type: bool author: - "Chris St. Pierre (@stpierre)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Get all Jenkins jobs anonymously - community.general.jenkins_job_info: user: admin @@ -118,26 +118,24 @@ EXAMPLES = ''' user: admin token: 126df5c60d66c66e3b75b11104a16a8a url: https://jenkins.example.com - validate_certs: False register: my_jenkins_job_info -''' +""" -RETURN = ''' ---- +RETURN = r""" jobs: - description: All jobs found matching the specified criteria + description: All jobs found matching the specified criteria. returned: success type: list sample: [ - { - "name": "test-job", - "fullname": "test-folder/test-job", - "url": "http://localhost:8080/job/test-job/", - "color": "blue" - }, + { + "name": "test-job", + "fullname": "test-folder/test-job", + "url": "http://localhost:8080/job/test-job/", + "color": "blue" + } ] -''' +""" import ssl import fnmatch @@ -210,8 +208,8 @@ def get_jobs(module): jobs = all_jobs # python-jenkins includes the internal Jenkins class used for each job # in its return value; we strip that out because the leading underscore - # (and the fact that it's not documented in the python-jenkins docs) - # indicates that it's not part of the dependable public interface. + # (and the fact that it is not documented in the python-jenkins docs) + # indicates that it is not part of the dependable public interface. for job in jobs: if "_class" in job: del job["_class"] diff --git a/plugins/modules/jenkins_node.py b/plugins/modules/jenkins_node.py new file mode 100644 index 0000000000..2ebcdf967d --- /dev/null +++ b/plugins/modules/jenkins_node.py @@ -0,0 +1,484 @@ +#!/usr/bin/python +# +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: jenkins_node +short_description: Manage Jenkins nodes +version_added: 10.0.0 +description: + - Manage Jenkins nodes with Jenkins REST API. +requirements: + - "python-jenkins >= 0.4.12" +author: + - Connor Newton (@phyrwork) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: partial + details: + - Check mode is unable to show configuration changes for a node that is not yet present. + diff_mode: + support: none +options: + url: + description: + - URL of the Jenkins server. + default: http://localhost:8080 + type: str + name: + description: + - Name of the Jenkins node to manage. + required: true + type: str + user: + description: + - User to authenticate with the Jenkins server. + type: str + token: + description: + - API token to authenticate with the Jenkins server. + type: str + state: + description: + - Specifies whether the Jenkins node should be V(present) (created), V(absent) (deleted), V(enabled) (online) or V(disabled) + (offline). + default: present + choices: ['enabled', 'disabled', 'present', 'absent'] + type: str + num_executors: + description: + - When specified, sets the Jenkins node executor count. + type: int + labels: + description: + - When specified, sets the Jenkins node labels. + type: list + elements: str + offline_message: + description: + - Specifies the offline reason message to be set when configuring the Jenkins node state. + - If O(offline_message) is given and requested O(state) is not V(disabled), an error is raised. + - Internally O(offline_message) is set using the V(toggleOffline) API, so updating the message when the node is already + offline (current state V(disabled)) is not possible. In this case, a warning is issued. + type: str + version_added: 10.0.0 +""" + +EXAMPLES = r""" +- name: Create a Jenkins node using token authentication + community.general.jenkins_node: + url: http://localhost:8080 + user: jenkins + token: 11eb751baabb66c4d1cb8dc4e0fb142cde + name: my-node + state: present + +- name: Set number of executors on Jenkins node + community.general.jenkins_node: + name: my-node + state: present + num_executors: 4 + +- name: Set labels on Jenkins node + community.general.jenkins_node: + name: my-node + state: present + labels: + - label-1 + - label-2 + - label-3 + +- name: Set Jenkins node offline with offline message. + community.general.jenkins_node: + name: my-node + state: disabled + offline_message: >- + This node is offline for some reason. +""" + +RETURN = r""" +url: + description: URL used to connect to the Jenkins server. + returned: success + type: str + sample: https://jenkins.mydomain.com +user: + description: User used for authentication. + returned: success + type: str + sample: jenkins +name: + description: Name of the Jenkins node. + returned: success + type: str + sample: my-node +state: + description: State of the Jenkins node. + returned: success + type: str + sample: present +created: + description: Whether or not the Jenkins node was created by the task. + returned: success + type: bool +deleted: + description: Whether or not the Jenkins node was deleted by the task. + returned: success + type: bool +disabled: + description: Whether or not the Jenkins node was disabled by the task. + returned: success + type: bool +enabled: + description: Whether or not the Jenkins node was enabled by the task. + returned: success + type: bool +configured: + description: Whether or not the Jenkins node was configured by the task. + returned: success + type: bool +""" + +import sys +import traceback +from xml.etree import ElementTree as et + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils import deps + +with deps.declare( + "python-jenkins", + reason="python-jenkins is required to interact with Jenkins", + url="https://opendev.org/jjb/python-jenkins", +): + import jenkins + + +IS_PYTHON_2 = sys.version_info[0] <= 2 + + +class JenkinsNode: + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.state = module.params['state'] + self.token = module.params['token'] + self.user = module.params['user'] + self.url = module.params['url'] + self.num_executors = module.params['num_executors'] + self.labels = module.params['labels'] + self.offline_message = module.params['offline_message'] # type: str | None + + if self.offline_message is not None: + self.offline_message = self.offline_message.strip() + + if self.state != "disabled": + self.module.fail_json("can not set offline message when state is not disabled") + + if self.labels is not None: + for label in self.labels: + if " " in label: + self.module.fail_json("labels must not contain spaces: got invalid label {}".format(label)) + + self.instance = self.get_jenkins_instance() + self.result = { + 'changed': False, + 'url': self.url, + 'user': self.user, + 'name': self.name, + 'state': self.state, + 'created': False, + 'deleted': False, + 'disabled': False, + 'enabled': False, + 'configured': False, + 'warnings': [], + } + + def get_jenkins_instance(self): + try: + if self.user and self.token: + return jenkins.Jenkins(self.url, self.user, self.token) + elif self.user and not self.token: + return jenkins.Jenkins(self.url, self.user) + else: + return jenkins.Jenkins(self.url) + except Exception as e: + self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e)) + + def configure_node(self, present): + if not present: + # Node would only not be present if in check mode and if not present there + # is no way to know what would and would not be changed. + if not self.module.check_mode: + raise Exception("configure_node present is False outside of check mode") + return + + configured = False + + data = self.instance.get_node_config(self.name) + root = et.fromstring(data) + + if self.num_executors is not None: + elem = root.find('numExecutors') + if elem is None: + elem = et.SubElement(root, 'numExecutors') + if elem.text is None or int(elem.text) != self.num_executors: + elem.text = str(self.num_executors) + configured = True + + if self.labels is not None: + elem = root.find('label') + if elem is None: + elem = et.SubElement(root, 'label') + labels = [] + if elem.text: + labels = elem.text.split() + if labels != self.labels: + elem.text = " ".join(self.labels) + configured = True + + if configured: + if IS_PYTHON_2: + data = et.tostring(root) + else: + data = et.tostring(root, encoding="unicode") + + self.instance.reconfig_node(self.name, data) + + self.result['configured'] = configured + if configured: + self.result['changed'] = True + + def present_node(self, configure=True): # type: (bool) -> bool + """Assert node present. + + Args: + configure: If True, run node configuration after asserting node present. + + Returns: + True if the node is present, False otherwise (i.e. is check mode). + """ + def create_node(): + try: + self.instance.create_node(self.name, launcher=jenkins.LAUNCHER_SSH) + except jenkins.JenkinsException as e: + # Some versions of python-jenkins < 1.8.3 has an authorization bug when + # handling redirects returned when posting to resources. If the node is + # created OK then can ignore the error. + if not self.instance.node_exists(self.name): + self.module.fail_json(msg="Create node failed: %s" % to_native(e), exception=traceback.format_exc()) + + # TODO: Remove authorization workaround. + self.result['warnings'].append( + "suppressed 401 Not Authorized on redirect after node created: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" + ) + + present = self.instance.node_exists(self.name) + created = False + if not present: + if not self.module.check_mode: + create_node() + present = True + + created = True + + if configure: + self.configure_node(present) + + self.result['created'] = created + if created: + self.result['changed'] = True + + return present # Used to gate downstream queries when in check mode. + + def absent_node(self): + def delete_node(): + try: + self.instance.delete_node(self.name) + except jenkins.JenkinsException as e: + # Some versions of python-jenkins < 1.8.3 has an authorization bug when + # handling redirects returned when posting to resources. If the node is + # deleted OK then can ignore the error. + if self.instance.node_exists(self.name): + self.module.fail_json(msg="Delete node failed: %s" % to_native(e), exception=traceback.format_exc()) + + # TODO: Remove authorization workaround. + self.result['warnings'].append( + "suppressed 401 Not Authorized on redirect after node deleted: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" + ) + + present = self.instance.node_exists(self.name) + deleted = False + if present: + if not self.module.check_mode: + delete_node() + + deleted = True + + self.result['deleted'] = deleted + if deleted: + self.result['changed'] = True + + def enabled_node(self): + def get_offline(): # type: () -> bool + return self.instance.get_node_info(self.name)["offline"] + + present = self.present_node() + + enabled = False + + if present: + def enable_node(): + try: + self.instance.enable_node(self.name) + except jenkins.JenkinsException as e: + # Some versions of python-jenkins < 1.8.3 has an authorization bug when + # handling redirects returned when posting to resources. If the node is + # disabled OK then can ignore the error. + offline = get_offline() + + if offline: + self.module.fail_json(msg="Enable node failed: %s" % to_native(e), exception=traceback.format_exc()) + + # TODO: Remove authorization workaround. + self.result['warnings'].append( + "suppressed 401 Not Authorized on redirect after node enabled: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" + ) + + offline = get_offline() + + if offline: + if not self.module.check_mode: + enable_node() + + enabled = True + else: + # Would have created node with initial state enabled therefore would not have + # needed to enable therefore not enabled. + if not self.module.check_mode: + raise Exception("enabled_node present is False outside of check mode") + enabled = False + + self.result['enabled'] = enabled + if enabled: + self.result['changed'] = True + + def disabled_node(self): + def get_offline_info(): + info = self.instance.get_node_info(self.name) + + offline = info["offline"] + offline_message = info["offlineCauseReason"] + + return offline, offline_message + + # Don't configure until after disabled, in case the change in configuration + # causes the node to pick up a job. + present = self.present_node(False) + + disabled = False + changed = False + + if present: + offline, offline_message = get_offline_info() + + if self.offline_message is not None and self.offline_message != offline_message: + if offline: + # n.b. Internally disable_node uses toggleOffline gated by a not + # offline condition. This means that disable_node can not be used to + # update an offline message if the node is already offline. + # + # Toggling the node online to set the message when toggling offline + # again is not an option as during this transient online time jobs + # may be scheduled on the node which is not acceptable. + self.result["warnings"].append( + "unable to change offline message when already offline" + ) + else: + offline_message = self.offline_message + changed = True + + def disable_node(): + try: + self.instance.disable_node(self.name, offline_message) + except jenkins.JenkinsException as e: + # Some versions of python-jenkins < 1.8.3 has an authorization bug when + # handling redirects returned when posting to resources. If the node is + # disabled OK then can ignore the error. + offline, _offline_message = get_offline_info() + + if not offline: + self.module.fail_json(msg="Disable node failed: %s" % to_native(e), exception=traceback.format_exc()) + + # TODO: Remove authorization workaround. + self.result['warnings'].append( + "suppressed 401 Not Authorized on redirect after node disabled: see https://review.opendev.org/c/jjb/python-jenkins/+/931707" + ) + + if not offline: + if not self.module.check_mode: + disable_node() + + disabled = True + + else: + # Would have created node with initial state enabled therefore would have + # needed to disable therefore disabled. + if not self.module.check_mode: + raise Exception("disabled_node present is False outside of check mode") + disabled = True + + if disabled: + changed = True + + self.result['disabled'] = disabled + + if changed: + self.result['changed'] = True + + self.configure_node(present) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, type='str'), + url=dict(default='http://localhost:8080'), + user=dict(), + token=dict(no_log=True), + state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='present'), + num_executors=dict(type='int'), + labels=dict(type='list', elements='str'), + offline_message=dict(type='str'), + ), + supports_check_mode=True, + ) + + deps.validate(module) + + jenkins_node = JenkinsNode(module) + + state = module.params.get('state') + if state == 'enabled': + jenkins_node.enabled_node() + elif state == 'disabled': + jenkins_node.disabled_node() + elif state == 'present': + jenkins_node.present_node() + else: + jenkins_node.absent_node() + + module.exit_json(**jenkins_node.result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/web_infrastructure/jenkins_plugin.py b/plugins/modules/jenkins_plugin.py similarity index 64% rename from plugins/modules/web_infrastructure/jenkins_plugin.py rename to plugins/modules/jenkins_plugin.py index 6adb348156..9f38668037 100644 --- a/plugins/modules/web_infrastructure/jenkins_plugin.py +++ b/plugins/modules/jenkins_plugin.py @@ -1,26 +1,29 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2016, Jiri Tyr -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Jiri Tyr +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: jenkins_plugin author: Jiri Tyr (@jtyr) short_description: Add or remove Jenkins plugin description: - Ansible module which helps to manage Jenkins plugins. +attributes: + check_mode: + support: full + diff_mode: + support: none options: group: type: str description: - - Name of the Jenkins group on the OS. + - GID or name of the Jenkins group on the OS. default: jenkins jenkins_home: type: path @@ -36,18 +39,17 @@ options: type: str description: - Plugin name. - required: yes + required: true owner: type: str description: - - Name of the Jenkins user on the OS. + - UID or name of the Jenkins user on the OS. default: jenkins state: type: str description: - Desired plugin state. - - If the C(latest) is set, the check for new version will be performed - every time. This is suitable to keep the plugin up-to-date. + - If set to V(latest), the check for new version is performed every time. This is suitable to keep the plugin up-to-date. choices: [absent, present, pinned, unpinned, enabled, disabled, latest] default: present timeout: @@ -58,39 +60,56 @@ options: updates_expiration: type: int description: - - Number of seconds after which a new copy of the I(update-center.json) - file is downloaded. This is used to avoid the need to download the - plugin to calculate its checksum when C(latest) is specified. - - Set it to C(0) if no cache file should be used. In that case, the - plugin file will always be downloaded to calculate its checksum when - C(latest) is specified. + - Number of seconds after which a new copy of the C(update-center.json) file is downloaded. This is used to avoid the + need to download the plugin to calculate its checksum when O(state=latest) is specified. + - Set it to V(0) if no cache file should be used. In that case, the plugin file is always downloaded to calculate its + checksum when O(state=latest) is specified. default: 86400 updates_url: type: list elements: str description: - - A list of base URL(s) to retrieve I(update-center.json), and direct plugin files from. + - A list of base URL(s) to retrieve C(update-center.json), and direct plugin files from. - This can be a list since community.general 3.3.0. default: ['https://updates.jenkins.io', 'http://mirrors.jenkins.io'] + updates_url_username: + description: + - If using a custom O(updates_url), set this as the username of the user with access to the URL. + - If the custom O(updates_url) does not require authentication, this can be left empty. + type: str + version_added: 11.2.0 + updates_url_password: + description: + - If using a custom O(updates_url), set this as the password of the user with access to the URL. + - If the custom O(updates_url) does not require authentication, this can be left empty. + type: str + version_added: 11.2.0 update_json_url_segment: type: list elements: str description: - - A list of URL segment(s) to retrieve the update center json file from. + - A list of URL segment(s) to retrieve the update center JSON file from. default: ['update-center.json', 'updates/update-center.json'] version_added: 3.3.0 + plugin_versions_url_segment: + type: list + elements: str + description: + - A list of URL segment(s) to retrieve the plugin versions JSON file from. + default: ['plugin-versions.json', 'current/plugin-versions.json'] + version_added: 11.2.0 latest_plugins_url_segments: type: list elements: str description: - - Path inside the I(updates_url) to get latest plugins from. + - Path inside the O(updates_url) to get latest plugins from. default: ['latest'] version_added: 3.3.0 versioned_plugins_url_segments: type: list elements: str description: - - Path inside the I(updates_url) to get specific version of plugins from. + - Path inside the O(updates_url) to get specific version of plugins from. default: ['download/plugins', 'plugins'] version_added: 3.3.0 url: @@ -102,37 +121,37 @@ options: type: str description: - Plugin version number. - - If this option is specified, all plugin dependencies must be installed - manually. - - It might take longer to verify that the correct version is installed. - This is especially true if a specific version number is specified. - - Quote the version to prevent the value to be interpreted as float. For - example if C(1.20) would be unquoted, it would become C(1.2). + - If this option is specified, all plugin dependencies must be installed manually. + - It might take longer to verify that the correct version is installed. This is especially true if a specific version + number is specified. + - Quote the version to prevent the value to be interpreted as float. For example if V(1.20) would be unquoted, it would + become V(1.2). with_dependencies: description: - Defines whether to install plugin dependencies. - - This option takes effect only if the I(version) is not defined. + - In earlier versions, this option had no effect when a specific O(version) was set. + Since community.general 11.2.0, dependencies are also installed for versioned plugins. type: bool - default: yes + default: true notes: - - Plugin installation should be run under root or the same user which owns - the plugin files on the disk. Only if the plugin is not installed yet and - no version is specified, the API installation is performed which requires - only the Web UI credentials. - - It's necessary to notify the handler or call the I(service) module to - restart the Jenkins service after a new plugin was installed. - - Pinning works only if the plugin is installed and Jenkins service was - successfully restarted after the plugin installation. - - It is not possible to run the module remotely by changing the I(url) - parameter to point to the Jenkins server. The module must be used on the - host where Jenkins runs as it needs direct access to the plugin files. + - Plugin installation should be run under root or the same user which owns the plugin files on the disk. Only if the plugin + is not installed yet and no version is specified, the API installation is performed which requires only the Web UI credentials. + - It is necessary to notify the handler or call the M(ansible.builtin.service) module to restart the Jenkins service after + a new plugin was installed. + - Pinning works only if the plugin is installed and Jenkins service was successfully restarted after the plugin installation. + - It is not possible to run the module remotely by changing the O(url) parameter to point to the Jenkins server. The module + must be used on the host where Jenkins runs as it needs direct access to the plugin files. + - If using a custom O(updates_url), ensure that the URL provides a C(plugin-versions.json) file. + This file must include metadata for all available plugin versions to support version compatibility resolution. + The file should be in the same format as the one provided by Jenkins update center (https://updates.jenkins.io/current/plugin-versions.json). extends_documentation_fragment: - - url - - files -''' + - ansible.builtin.url + - ansible.builtin.files + - community.general.attributes +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install plugin community.general.jenkins_plugin: name: build-pipeline-plugin @@ -140,7 +159,7 @@ EXAMPLES = ''' - name: Install plugin without its dependencies community.general.jenkins_plugin: name: build-pipeline-plugin - with_dependencies: no + with_dependencies: false - name: Make sure the plugin is always up-to-date community.general.jenkins_plugin: @@ -187,6 +206,29 @@ EXAMPLES = ''' url_password: p4ssw0rd url: http://localhost:8888 +# +# Example of how to authenticate with serverless deployment +# +- name: Update plugins on ECS Fargate Jenkins instance + community.general.jenkins_plugin: + # plugin name and version + name: ws-cleanup + version: '0.45' + # Jenkins home path mounted on ec2-helper VM (example) + jenkins_home: "/mnt/{{ jenkins_instance }}" + # matching the UID/GID to one in official Jenkins image + owner: 1000 + group: 1000 + # Jenkins instance URL and admin credentials + url: "https://{{ jenkins_instance }}.com/" + url_username: admin + url_password: p4ssw0rd + # make module work from EC2 which has local access + # to EFS mount as well as Jenkins URL + delegate_to: ec2-helper + vars: + jenkins_instance: foobar + # # Example of a Play which handles Jenkins restarts during the state changes # @@ -195,11 +237,11 @@ EXAMPLES = ''' vars: my_jenkins_plugins: token-macro: - enabled: yes + enabled: true build-pipeline-plugin: version: "1.4.9" - pinned: no - enabled: yes + pinned: false + enabled: true tasks: - name: Install plugins without a specific version community.general.jenkins_plugin: @@ -220,17 +262,17 @@ EXAMPLES = ''' - name: Initiate the fact ansible.builtin.set_fact: - jenkins_restart_required: no + jenkins_restart_required: false - name: Check if restart is required by any of the versioned plugins ansible.builtin.set_fact: - jenkins_restart_required: yes + jenkins_restart_required: true when: item.changed with_items: "{{ my_jenkins_plugin_versioned.results }}" - name: Check if restart is required by any of the unversioned plugins ansible.builtin.set_fact: - jenkins_restart_required: yes + jenkins_restart_required: true when: item.changed with_items: "{{ my_jenkins_plugin_unversioned.results }}" @@ -250,13 +292,13 @@ EXAMPLES = ''' retries: 60 delay: 5 until: > - 'status' in jenkins_service_status and - jenkins_service_status['status'] == 200 + 'status' in jenkins_service_status and + jenkins_service_status['status'] == 200 when: jenkins_restart_required - name: Reset the fact ansible.builtin.set_fact: - jenkins_restart_required: no + jenkins_restart_required: false when: jenkins_restart_required - name: Plugin pinning @@ -274,34 +316,36 @@ EXAMPLES = ''' when: > 'enabled' in item.value with_dict: "{{ my_jenkins_plugins }}" -''' +""" -RETURN = ''' +RETURN = r""" plugin: - description: plugin name - returned: success - type: str - sample: build-pipeline-plugin + description: Plugin name. + returned: success + type: str + sample: build-pipeline-plugin state: - description: state of the target, after execution - returned: success - type: str - sample: "present" -''' + description: State of the target, after execution. + returned: success + type: str + sample: "present" +""" -from ansible.module_utils.basic import AnsibleModule, to_bytes -from ansible.module_utils.six.moves import http_cookiejar as cookiejar -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url, url_argument_spec -from ansible.module_utils.six import text_type, binary_type -from ansible.module_utils.common.text.converters import to_native -import base64 import hashlib import io import json import os import tempfile import time +from collections import OrderedDict +from http import cookiejar +from urllib.parse import urlencode + +from ansible.module_utils.basic import AnsibleModule, to_bytes +from ansible.module_utils.urls import fetch_url, url_argument_spec, basic_auth_header +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.jenkins import download_updates_file class FailedInstallingWithPluginManager(Exception): @@ -318,14 +362,24 @@ class JenkinsPlugin(object): self.url = self.params['url'] self.timeout = self.params['timeout'] + # Authentication for non-Jenkins calls + self.updates_url_credentials = {} + if self.params.get('updates_url_username') and self.params.get('updates_url_password'): + self.updates_url_credentials["Authorization"] = basic_auth_header(self.params['updates_url_username'], self.params['updates_url_password']) + # Crumb self.crumb = {} + + # Authentication for Jenkins calls + if self.params.get('url_username') and self.params.get('url_password'): + self.crumb["Authorization"] = basic_auth_header(self.params['url_username'], self.params['url_password']) + # Cookie jar for crumb session self.cookies = None if self._csrf_enabled(): self.cookies = cookiejar.LWPCookieJar() - self.crumb = self._get_crumb() + self._get_crumb() # Get list of installed plugins self._get_installed_plugins() @@ -368,10 +422,14 @@ class JenkinsPlugin(object): err_msg = None try: self.module.debug("fetching url: %s" % url) + + is_jenkins_call = url.startswith(self.url) + self.module.params['force_basic_auth'] = is_jenkins_call + response, info = fetch_url( self.module, url, timeout=self.timeout, cookies=self.cookies, - headers=self.crumb, **kwargs) - + headers=self.crumb if is_jenkins_call else self.updates_url_credentials or self.crumb, + **kwargs) if info['status'] == 200: return response else: @@ -400,9 +458,13 @@ class JenkinsPlugin(object): # Get the URL data try: + is_jenkins_call = url.startswith(self.url) + self.module.params['force_basic_auth'] = is_jenkins_call + response, info = fetch_url( self.module, url, timeout=self.timeout, cookies=self.cookies, - headers=self.crumb, **kwargs) + headers=self.crumb if is_jenkins_call else self.updates_url_credentials or self.crumb, + **kwargs) if info['status'] != 200: if dont_fail: @@ -422,16 +484,12 @@ class JenkinsPlugin(object): "%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb') if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data: - ret = { - crumb_data['crumbRequestField']: crumb_data['crumb'] - } + self.crumb[crumb_data['crumbRequestField']] = crumb_data['crumb'] else: self.module.fail_json( msg="Required fields not found in the Crum response.", details=crumb_data) - return ret - def _get_installed_plugins(self): plugins_data = self._get_json_data( "%s/%s" % (self.url, "pluginManager/api/json?depth=1"), @@ -445,6 +503,7 @@ class JenkinsPlugin(object): self.is_installed = False self.is_pinned = False self.is_enabled = False + self.installed_plugins = plugins_data['plugins'] for p in plugins_data['plugins']: if p['shortName'] == self.params['name']: @@ -458,6 +517,40 @@ class JenkinsPlugin(object): break + def _install_dependencies(self): + dependencies = self._get_versioned_dependencies() + self.dependencies_states = [] + + for dep_name, dep_version in dependencies.items(): + if not any(p['shortName'] == dep_name and p['version'] == dep_version for p in self.installed_plugins): + dep_params = self.params.copy() + dep_params['name'] = dep_name + dep_params['version'] = dep_version + dep_module = AnsibleModule( + argument_spec=self.module.argument_spec, + supports_check_mode=self.module.check_mode + ) + dep_module.params = dep_params + dep_plugin = JenkinsPlugin(dep_module) + if not dep_plugin.install(): + self.dependencies_states.append( + { + 'name': dep_name, + 'version': dep_version, + 'state': 'absent'}) + else: + self.dependencies_states.append( + { + 'name': dep_name, + 'version': dep_version, + 'state': 'present'}) + else: + self.dependencies_states.append( + { + 'name': dep_name, + 'version': dep_version, + 'state': 'present'}) + def _install_with_plugin_manager(self): if not self.module.check_mode: # Install the plugin (with dependencies) @@ -518,6 +611,10 @@ class JenkinsPlugin(object): plugin_content = plugin_fh.read() checksum_old = hashlib.sha1(plugin_content).hexdigest() + # Install dependencies + if self.params['with_dependencies']: + self._install_dependencies() + if self.params['version'] in [None, 'latest']: # Take latest version plugin_urls = self._get_latest_plugin_urls() @@ -590,6 +687,58 @@ class JenkinsPlugin(object): urls.append("{0}/{1}/{2}.hpi".format(base_url, update_segment, self.params['name'])) return urls + def _get_latest_compatible_plugin_version(self, plugin_name=None): + if not hasattr(self, 'jenkins_version'): + self.module.params['force_basic_auth'] = True + resp, info = fetch_url(self.module, self.url) + raw_version = info.get("x-jenkins") + self.jenkins_version = self.parse_version(raw_version) + name = plugin_name or self.params['name'] + cache_path = "{}/ansible_jenkins_plugin_cache.json".format(self.params['jenkins_home']) + plugin_version_urls = [] + for base_url in self.params['updates_url']: + for update_json in self.params['plugin_versions_url_segment']: + plugin_version_urls.append("{}/{}".format(base_url, update_json)) + + try: # Check if file is saved localy + if os.path.exists(cache_path): + file_mtime = os.path.getmtime(cache_path) + else: + file_mtime = 0 + + now = time.time() + if now - file_mtime >= 86400: + response = self._get_urls_data(plugin_version_urls, what="plugin-versions.json") + plugin_data = json.loads(to_native(response.read()), object_pairs_hook=OrderedDict) + + # Save it to file for next time + with open(cache_path, "w") as f: + json.dump(plugin_data, f) + + with open(cache_path, "r") as f: + plugin_data = json.load(f) + + except Exception as e: + if os.path.exists(cache_path): + os.remove(cache_path) + self.module.fail_json(msg="Failed to parse plugin-versions.json", details=to_native(e)) + + plugin_versions = plugin_data.get("plugins", {}).get(name) + if not plugin_versions: + self.module.fail_json(msg="Plugin '{}' not found.".format(name)) + + sorted_versions = list(reversed(plugin_versions.items())) + + for idx, (version_title, version_info) in enumerate(sorted_versions): + required_core = version_info.get("requiredCore", "0.0") + if self.parse_version(required_core) <= self.jenkins_version: + return 'latest' if idx == 0 else version_title + + self.module.warn( + "No compatible version found for plugin '{}'. " + "Installing latest version.".format(name)) + return 'latest' + def _get_versioned_plugin_urls(self): urls = [] for base_url in self.params['updates_url']: @@ -604,22 +753,25 @@ class JenkinsPlugin(object): urls.append("{0}/{1}".format(base_url, update_json)) return urls + def _get_versioned_dependencies(self): + # Get dependencies for the specified plugin version + plugin_data = self._download_updates()['dependencies'] + + dependencies_info = { + dep["name"]: self._get_latest_compatible_plugin_version(dep["name"]) + for dep in plugin_data + if not dep.get("optional", False) + } + + return dependencies_info + def _download_updates(self): - updates_filename = 'jenkins-plugin-cache.json' - updates_dir = os.path.expanduser('~/.ansible/tmp') - updates_file = "%s/%s" % (updates_dir, updates_filename) - download_updates = True - - # Check if we need to download new updates file - if os.path.isfile(updates_file): - # Get timestamp when the file was changed last time - ts_file = os.stat(updates_file).st_mtime - ts_now = time.time() - - if ts_now - ts_file < self.params['updates_expiration']: - download_updates = False - - updates_file_orig = updates_file + try: + updates_file, download_updates = download_updates_file(self.params['updates_expiration']) + except OSError as e: + self.module.fail_json( + msg="Cannot create temporal directory.", + details=to_native(e)) # Download the updates file if needed if download_updates: @@ -632,56 +784,41 @@ class JenkinsPlugin(object): msg_exception="Updates download failed.") # Write the updates file - update_fd, updates_file = tempfile.mkstemp() - os.write(update_fd, r.read()) + tmp_update_fd, tmp_updates_file = tempfile.mkstemp() + os.write(tmp_update_fd, r.read()) try: - os.close(update_fd) + os.close(tmp_update_fd) except IOError as e: self.module.fail_json( - msg="Cannot close the tmp updates file %s." % updates_file, + msg="Cannot close the tmp updates file %s." % tmp_updates_file, details=to_native(e)) + else: + tmp_updates_file = updates_file # Open the updates file try: - f = io.open(updates_file, encoding='utf-8') + f = io.open(tmp_updates_file, encoding='utf-8') + + # Read only the second line + dummy = f.readline() + data = json.loads(f.readline()) except IOError as e: self.module.fail_json( - msg="Cannot open temporal updates file.", + msg="Cannot open%s updates file." % (" temporary" if tmp_updates_file != updates_file else ""), + details=to_native(e)) + except Exception as e: + self.module.fail_json( + msg="Cannot load JSON data from the%s updates file." % (" temporary" if tmp_updates_file != updates_file else ""), details=to_native(e)) - i = 0 - for line in f: - # Read only the second line - if i == 1: - try: - data = json.loads(line) - except Exception as e: - self.module.fail_json( - msg="Cannot load JSON data from the tmp updates file.", - details=to_native(e)) - - break - - i += 1 - # Move the updates file to the right place if we could read it - if download_updates: - # Make sure the destination directory exists - if not os.path.isdir(updates_dir): - try: - os.makedirs(updates_dir, int('0700', 8)) - except OSError as e: - self.module.fail_json( - msg="Cannot create temporal directory.", - details=to_native(e)) - - self.module.atomic_move(updates_file, updates_file_orig) + if tmp_updates_file != updates_file: + self.module.atomic_move(os.path.abspath(tmp_updates_file), os.path.abspath(updates_file)) # Check if we have the plugin data available - if 'plugins' not in data or self.params['name'] not in data['plugins']: - self.module.fail_json( - msg="Cannot find plugin data in the updates file.") + if not data.get('plugins', {}).get(self.params['name']): + self.module.fail_json(msg="Cannot find plugin data in the updates file.") return data['plugins'][self.params['name']] @@ -697,7 +834,7 @@ class JenkinsPlugin(object): # Store the plugin into a temp file and then move it tmp_f_fd, tmp_f = tempfile.mkstemp() - if isinstance(data, (text_type, binary_type)): + if isinstance(data, (str, bytes)): os.write(tmp_f_fd, data) else: os.write(tmp_f_fd, data.read()) @@ -710,7 +847,7 @@ class JenkinsPlugin(object): details=to_native(e)) # Move the file onto the right place - self.module.atomic_move(tmp_f, f) + self.module.atomic_move(os.path.abspath(tmp_f), os.path.abspath(f)) def uninstall(self): changed = False @@ -781,6 +918,10 @@ class JenkinsPlugin(object): msg_exception="%s has failed." % msg, method="POST") + @staticmethod + def parse_version(version_str): + return tuple(int(x) for x in version_str.split('.')) + def main(): # Module arguments @@ -805,8 +946,12 @@ def main(): updates_expiration=dict(default=86400, type="int"), updates_url=dict(type="list", elements="str", default=['https://updates.jenkins.io', 'http://mirrors.jenkins.io']), + updates_url_username=dict(type="str"), + updates_url_password=dict(type="str", no_log=True), update_json_url_segment=dict(type="list", elements="str", default=['update-center.json', 'updates/update-center.json']), + plugin_versions_url_segment=dict(type="list", elements="str", default=['plugin-versions.json', + 'current/plugin-versions.json']), latest_plugins_url_segments=dict(type="list", elements="str", default=['latest']), versioned_plugins_url_segments=dict(type="list", elements="str", default=['download/plugins', 'plugins']), url=dict(default='http://localhost:8080'), @@ -821,9 +966,6 @@ def main(): supports_check_mode=True, ) - # Force basic authentication - module.params['force_basic_auth'] = True - # Convert timeout to float try: module.params['timeout'] = float(module.params['timeout']) @@ -831,11 +973,17 @@ def main(): module.fail_json( msg='Cannot convert %s to float.' % module.params['timeout'], details=to_native(e)) + # Instantiate the JenkinsPlugin object + jp = JenkinsPlugin(module) # Set version to latest if state is latest if module.params['state'] == 'latest': module.params['state'] = 'present' - module.params['version'] = 'latest' + module.params['version'] = jp._get_latest_compatible_plugin_version() + + # Set version to latest compatible version if version is latest + if module.params['version'] == 'latest': + module.params['version'] = jp._get_latest_compatible_plugin_version() # Create some shortcuts name = module.params['name'] @@ -844,9 +992,6 @@ def main(): # Initial change state of the task changed = False - # Instantiate the JenkinsPlugin object - jp = JenkinsPlugin(module) - # Perform action depending on the requested state if state == 'present': changed = jp.install() @@ -862,7 +1007,7 @@ def main(): changed = jp.disable() # Print status of the change - module.exit_json(changed=changed, plugin=name, state=state) + module.exit_json(changed=changed, plugin=name, state=state, dependencies=jp.dependencies_states if hasattr(jp, 'dependencies_states') else None) if __name__ == '__main__': diff --git a/plugins/modules/web_infrastructure/jenkins_script.py b/plugins/modules/jenkins_script.py similarity index 69% rename from plugins/modules/web_infrastructure/jenkins_script.py rename to plugins/modules/jenkins_script.py index 3ad51a9703..eda3a49f2e 100644 --- a/plugins/modules/web_infrastructure/jenkins_script.py +++ b/plugins/modules/jenkins_script.py @@ -1,42 +1,46 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2016, James Hogarth -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, James Hogarth +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" author: James Hogarth (@hogarthj) module: jenkins_script short_description: Executes a groovy script in the jenkins instance description: - - The C(jenkins_script) module takes a script plus a dict of values - to use within the script and returns the result of the script being run. + - The C(jenkins_script) module takes a script plus a dict of values to use within the script and returns the result of the + script being run. +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: none + diff_mode: + support: none options: script: type: str description: - - The groovy script to be executed. - This gets passed as a string Template if args is defined. + - The groovy script to be executed. This gets passed as a string Template if args is defined. required: true url: type: str description: - - The jenkins server to execute the script against. The default is a local - jenkins instance that is not being proxied through a webserver. + - The jenkins server to execute the script against. The default is a local jenkins instance that is not being proxied + through a webserver. default: http://localhost:8080 validate_certs: description: - - If set to C(no), the SSL certificates will not be validated. - This should only set to C(no) used on personally controlled sites - using self-signed certificates as it avoids verifying the source site. + - If set to V(false), the SSL certificates are not validated. This should only set to V(false) used on personally controlled + sites using self-signed certificates as it avoids verifying the source site. type: bool - default: 'yes' + default: true user: type: str description: @@ -48,21 +52,18 @@ options: timeout: type: int description: - - The request timeout in seconds + - The request timeout in seconds. default: 10 args: type: dict description: - A dict of key-value pairs used in formatting the script using string.Template (see https://docs.python.org/2/library/string.html#template-strings). - notes: - - Since the script can do anything this does not report on changes. - Knowing the script is being run it's important to set changed_when - for the ansible output to be clear on any alterations made. + - Since the script can do anything this does not report on changes. Knowing the script is being run it is important to set + C(changed_when) for the ansible output to be clear on any alterations made. +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Obtaining a list of plugins community.general.jenkins_script: script: 'println(Jenkins.instance.pluginManager.plugins)' @@ -72,10 +73,10 @@ EXAMPLES = ''' - name: Setting master using a variable to hold a more complicate script ansible.builtin.set_fact: setmaster_mode: | - import jenkins.model.* - instance = Jenkins.getInstance() - instance.setMode(${jenkins_mode}) - instance.save() + import jenkins.model.* + instance = Jenkins.getInstance() + instance.setMode(${jenkins_mode}) + instance.save() - name: Use the variable as the script community.general.jenkins_script: @@ -89,22 +90,22 @@ EXAMPLES = ''' user: admin password: admin url: https://localhost - validate_certs: no -''' + validate_certs: false # only do this when you trust the network! +""" -RETURN = ''' +RETURN = r""" output: - description: Result of script - returned: success - type: str - sample: 'Result: true' -''' + description: Result of script. + returned: success + type: str + sample: 'Result: true' +""" import json +from http import cookiejar +from urllib.parse import urlencode from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import http_cookiejar as cookiejar -from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url from ansible.module_utils.common.text.converters import to_native @@ -139,12 +140,12 @@ def main(): module = AnsibleModule( argument_spec=dict( script=dict(required=True, type="str"), - url=dict(required=False, type="str", default="http://localhost:8080"), - validate_certs=dict(required=False, type="bool", default=True), - user=dict(required=False, type="str", default=None), - password=dict(required=False, no_log=True, type="str", default=None), - timeout=dict(required=False, type="int", default=10), - args=dict(required=False, type="dict", default=None) + url=dict(type="str", default="http://localhost:8080"), + validate_certs=dict(type="bool", default=True), + user=dict(type="str"), + password=dict(no_log=True, type="str"), + timeout=dict(type="int", default=10), + args=dict(type="dict") ) ) diff --git a/plugins/modules/web_infrastructure/jira.py b/plugins/modules/jira.py similarity index 69% rename from plugins/modules/web_infrastructure/jira.py rename to plugins/modules/jira.py index d6c7653835..34d1cc3a8a 100644 --- a/plugins/modules/web_infrastructure/jira.py +++ b/plugins/modules/jira.py @@ -1,23 +1,31 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2014, Steve Smith +# Copyright (c) 2014, Steve Smith # Atlassian open-source approval reference OSR-76. # -# (c) 2020, Per Abildgaard Toft Search and update function -# (c) 2021, Brandon McNama Issue attachment functionality +# Copyright (c) 2020, Per Abildgaard Toft Search and update function +# Copyright (c) 2021, Brandon McNama Issue attachment functionality +# Copyright (c) 2022, Hugo Prudente Worklog functionality # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" module: jira -short_description: create and modify issues in a JIRA instance +short_description: Create and modify issues in a JIRA instance description: - Create and modify issues in a JIRA instance. +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: none + diff_mode: + support: none options: uri: @@ -25,89 +33,95 @@ options: required: true description: - Base URI for the JIRA instance. - operation: type: str required: true - aliases: [ command ] - choices: [ attach, comment, create, edit, fetch, link, search, transition, update ] + aliases: [command] + choices: [attach, comment, create, edit, fetch, link, search, transition, update, worklog] description: - The operation to perform. - + - V(worklog) was added in community.general 6.5.0. username: type: str description: - The username to log-in with. - - Must be used with I(password). Mutually exclusive with I(token). - + - Must be used with O(password). Mutually exclusive with O(token). password: type: str description: - The password to log-in with. - - Must be used with I(username). Mutually exclusive with I(token). - + - Must be used with O(username). Mutually exclusive with O(token). token: type: str description: - The personal access token to log-in with. - - Mutually exclusive with I(username) and I(password). + - Mutually exclusive with O(username) and O(password). version_added: 4.2.0 + client_cert: + type: path + description: + - Client certificate if required. + - In addition to O(username) and O(password) or O(token). Not mutually exclusive. + version_added: 10.4.0 + client_key: + type: path + description: + - Client certificate key if required. + - In addition to O(username) and O(password) or O(token). Not mutually exclusive. + version_added: 10.4.0 project: type: str required: false description: - The project for this operation. Required for issue creation. - summary: type: str required: false description: - - The issue summary, where appropriate. - - Note that JIRA may not allow changing field values on specific transitions or states. - + - The issue summary, where appropriate. + - Note that JIRA may not allow changing field values on specific transitions or states. description: type: str required: false description: - - The issue description, where appropriate. - - Note that JIRA may not allow changing field values on specific transitions or states. - + - The issue description, where appropriate. + - Note that JIRA may not allow changing field values on specific transitions or states. issuetype: type: str required: false description: - - The issue type, for issue creation. - + - The issue type, for issue creation. issue: type: str required: false description: - - An existing issue key to operate on. + - An existing issue key to operate on. aliases: ['ticket'] comment: type: str required: false description: - - The comment text to add. - - Note that JIRA may not allow changing field values on specific transitions or states. - + - The comment text to add. + - Note that JIRA may not allow changing field values on specific transitions or states. comment_visibility: type: dict description: - - Used to specify comment comment visibility. - - See U(https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-comments/#api-rest-api-2-issue-issueidorkey-comment-post) for details. + - Used to specify comment comment visibility. + - See + U(https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-comments/#api-rest-api-2-issue-issueidorkey-comment-post) + for details. suboptions: type: description: - - Use type to specify which of the JIRA visibility restriction types will be used. + - Use O(comment_visibility.type) to specify which of the JIRA visibility restriction types is used. type: str required: true choices: [group, role] value: description: - - Use value to specify value corresponding to the type of visibility restriction. For example name of the group or role. + - Specify value corresponding to the type of visibility restriction. For example name of the group or role. type: str required: true version_added: '3.2.0' @@ -116,63 +130,67 @@ options: type: str required: false description: - - Only used when I(operation) is C(transition), and a bit of a misnomer, it actually refers to the transition name. - + - Only used when O(operation) is V(transition), and a bit of a misnomer, it actually refers to the transition name. + - This is mutually exclusive with O(status_id). + status_id: + type: str + required: false + description: + - Only used when O(operation) is V(transition), and refers to the transition ID. + - This is mutually exclusive with O(status). + version_added: 10.3.0 assignee: type: str required: false description: - - Sets the the assignee when I(operation) is C(create), C(transition) or C(edit). - - Recent versions of JIRA no longer accept a user name as a user identifier. In that case, use I(account_id) instead. - - Note that JIRA may not allow changing field values on specific transitions or states. - + - Sets the assignee when O(operation) is V(create), V(transition), or V(edit). + - Recent versions of JIRA no longer accept a user name as a user identifier. In that case, use O(account_id) instead. + - Note that JIRA may not allow changing field values on specific transitions or states. account_id: type: str description: - - Sets the account identifier for the assignee when I(operation) is C(create), C(transition) or C(edit). - - Note that JIRA may not allow changing field values on specific transitions or states. + - Sets the account identifier for the assignee when O(operation) is V(create), V(transition), or V(edit). + - Note that JIRA may not allow changing field values on specific transitions or states. version_added: 2.5.0 linktype: type: str required: false description: - - Set type of link, when action 'link' selected. - + - Set type of link, when action 'link' selected. inwardissue: type: str required: false description: - - Set issue from which link will be created. - + - Set issue from which link is created. outwardissue: type: str required: false description: - - Set issue to which link will be created. - + - Set issue to which link is created. fields: type: dict required: false description: - - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API - (possibly after merging with other required data, as when passed to create). See examples for more information, - and the JIRA REST API for the structure required for various fields. - - When passed to comment, the data structure is merged at the first level since community.general 4.6.0. Useful to add JIRA properties for example. - - Note that JIRA may not allow changing field values on specific transitions or states. - + - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API (possibly + after merging with other required data, as when passed to create). See examples for more information, and the JIRA + REST API for the structure required for various fields. + - When passed to comment, the data structure is merged at the first level since community.general 4.6.0. Useful to add + JIRA properties for example. + - Note that JIRA may not allow changing field values on specific transitions or states. + default: {} jql: required: false description: - - Query JIRA in JQL Syntax, e.g. 'CMDB Hostname'='test.example.com'. + - Query JIRA in JQL Syntax, for example V("CMDB Hostname" = test.example.com). type: str version_added: '0.2.0' maxresults: required: false description: - - Limit the result of I(operation=search). If no value is specified, the default jira limit will be used. - - Used when I(operation=search) only, ignored otherwise. + - Limit the result of O(operation=search). If no value is specified, the default JIRA limit is used. + - Used when O(operation=search) only, ignored otherwise. type: int version_added: '0.2.0' @@ -186,7 +204,7 @@ options: validate_certs: required: false description: - - Require valid SSL certificates (set to `false` if you'd like to use self-signed certificates) + - Require valid SSL certificates (set to V(false) if you would like to use self-signed certificates). default: true type: bool @@ -200,27 +218,24 @@ options: required: true type: path description: - - The path to the file to upload (from the remote node) or, if I(content) is specified, - the filename to use for the attachment. + - The path to the file to upload (from the remote node) or, if O(attachment.content) is specified, the filename + to use for the attachment. content: type: str description: - - The Base64 encoded contents of the file to attach. If not specified, the contents of I(filename) will be + - The Base64 encoded contents of the file to attach. If not specified, the contents of O(attachment.filename) is used instead. mimetype: type: str description: - - The MIME type to supply for the upload. If not specified, best-effort detection will be - done. - + - The MIME type to supply for the upload. If not specified, best-effort detection is performed. notes: - - "Currently this only works with basic-auth, or tokens." - - "To use with JIRA Cloud, pass the login e-mail as the I(username) and the API token as I(password)." - + - Currently this only works with basic-auth, or tokens. + - To use with JIRA Cloud, pass the login e-mail as the O(username) and the API token as O(password). author: -- "Steve Smith (@tarka)" -- "Per Abildgaard Toft (@pertoft)" -- "Brandon McNama (@DWSR)" + - "Steve Smith (@tarka)" + - "Per Abildgaard Toft (@pertoft)" + - "Brandon McNama (@DWSR)" """ EXAMPLES = r""" @@ -237,8 +252,8 @@ EXAMPLES = r""" issuetype: Task args: fields: - customfield_13225: "test" - customfield_12931: {"value": "Test"} + customfield_13225: "test" + customfield_12931: {"value": "Test"} register: issue - name: Comment on issue @@ -276,6 +291,47 @@ EXAMPLES = r""" value: internal: true +# Add an workog to an existing issue +- name: Worklog on issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: worklog + comment: A worklog added by Ansible + fields: + timeSpentSeconds: 12000 + +- name: Workflow on issue with comment restricted visibility + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: worklog + comment: A worklog added by Ansible + comment_visibility: + type: role + value: Developers + fields: + timeSpentSeconds: 12000 + +- name: Workflow on issue with comment property to mark it internal + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: worklog + comment: A worklog added by Ansible + fields: + properties: + - key: 'sd.public.comment' + value: + internal: true + timeSpentSeconds: 12000 + # Assign an existing issue using edit - name: Assign an issue using free-form fields community.general.jira: @@ -309,9 +365,9 @@ EXAMPLES = r""" operation: edit args: fields: - labels: - - autocreated - - ansible + labels: + - autocreated + - ansible # Updating a field using operations: add, set & remove - name: Change the value of a Select dropdown @@ -323,8 +379,8 @@ EXAMPLES = r""" operation: update args: fields: - customfield_12931: [ {'set': {'value': 'Virtual'}} ] - customfield_13820: [ {'set': {'value':'Manually'}} ] + customfield_12931: ['set': {'value': 'Virtual'}] + customfield_13820: ['set': {'value': 'Manually'}] register: cmdb_issue delegate_to: localhost @@ -353,7 +409,7 @@ EXAMPLES = r""" jql: project=cmdb AND cf[13225]="test" args: fields: - lastViewed: null + lastViewed: register: issue - name: Create a unix account for the reporter @@ -399,6 +455,23 @@ EXAMPLES = r""" operation: attach attachment: filename: topsecretreport.xlsx + +# Use username, password and client certificate authentification +- name: Create an issue + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + client_cert: '{{ path/to/client-cert }}' + client_key: '{{ path/to/client-key }}' + +# Use token and client certificate authentification +- name: Create an issue + community.general.jira: + uri: '{{ server }}' + token: '{{ token }}' + client_cert: '{{ path/to/client-cert }}' + client_key: '{{ path/to/client-key }}' """ import base64 @@ -409,9 +482,9 @@ import os import random import string import traceback +from urllib.request import pathname2url from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper, cause_changes -from ansible.module_utils.six.moves.urllib.request import pathname2url from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native from ansible.module_utils.urls import fetch_url @@ -427,12 +500,14 @@ class JIRA(StateModuleHelper): uri=dict(type='str', required=True), operation=dict( type='str', - choices=['attach', 'create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search'], + choices=['attach', 'create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search', 'worklog'], aliases=['command'], required=True ), username=dict(type='str'), password=dict(type='str', no_log=True), token=dict(type='str', no_log=True), + client_cert=dict(type='path'), + client_key=dict(type='path'), project=dict(type='str', ), summary=dict(type='str', ), description=dict(type='str', ), @@ -444,6 +519,7 @@ class JIRA(StateModuleHelper): value=dict(type='str', required=True) )), status=dict(type='str', ), + status_id=dict(type='str', ), assignee=dict(type='str', ), fields=dict(default={}, type='dict'), linktype=dict(type='str', ), @@ -459,9 +535,11 @@ class JIRA(StateModuleHelper): ['username', 'token'], ['password', 'token'], ['assignee', 'account_id'], + ['status', 'status_id'] ], required_together=[ ['username', 'password'], + ['client_cert', 'client_key'] ], required_one_of=[ ['username', 'token'], @@ -470,14 +548,15 @@ class JIRA(StateModuleHelper): ('operation', 'attach', ['issue', 'attachment']), ('operation', 'create', ['project', 'issuetype', 'summary']), ('operation', 'comment', ['issue', 'comment']), + ('operation', 'workflow', ['issue', 'comment']), ('operation', 'fetch', ['issue']), - ('operation', 'transition', ['issue', 'status']), + ('operation', 'transition', ['issue']), + ('operation', 'transition', ['status', 'status_id'], True), ('operation', 'link', ['linktype', 'inwardissue', 'outwardissue']), ('operation', 'search', ['jql']), ), supports_check_mode=False ) - state_param = 'operation' def __init_module__(self): @@ -490,7 +569,7 @@ class JIRA(StateModuleHelper): self.vars.uri = self.vars.uri.strip('/') self.vars.set('restbase', self.vars.uri + '/rest/api/2') - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_create(self): createfields = { 'project': {'key': self.vars.project}, @@ -508,7 +587,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issue/' self.vars.meta = self.post(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_comment(self): data = { 'body': self.vars.comment @@ -524,7 +603,23 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issue/' + self.vars.issue + '/comment' self.vars.meta = self.post(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") + def operation_worklog(self): + data = { + 'comment': self.vars.comment + } + # if comment_visibility is specified restrict visibility + if self.vars.comment_visibility is not None: + data['visibility'] = self.vars.comment_visibility + + # Use 'fields' to merge in any additional data + if self.vars.fields: + data.update(self.vars.fields) + + url = self.vars.restbase + '/issue/' + self.vars.issue + '/worklog' + self.vars.meta = self.post(url, data) + + @cause_changes(when="success") def operation_edit(self): data = { 'fields': self.vars.fields @@ -532,7 +627,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issue/' + self.vars.issue self.vars.meta = self.put(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_update(self): data = { "update": self.vars.fields, @@ -554,20 +649,33 @@ class JIRA(StateModuleHelper): self.vars.meta = self.get(url) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_transition(self): # Find the transition id turl = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions" tmeta = self.get(turl) - target = self.vars.status tid = None + target = None + + if self.vars.status is not None: + target = self.vars.status.strip() + elif self.vars.status_id is not None: + tid = self.vars.status_id.strip() + for t in tmeta['transitions']: - if t['name'] == target: - tid = t['id'] - break + if target is not None: + if t['name'] == target: + tid = t['id'] + break + else: + if tid == t['id']: + break else: - raise ValueError("Failed find valid transition for '%s'" % target) + if target is not None: + raise ValueError("Failed find valid transition for '%s'" % target) + else: + raise ValueError("Failed find valid transition for ID '%s'" % tid) fields = dict(self.vars.fields) if self.vars.summary is not None: @@ -587,7 +695,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions" self.vars.meta = self.post(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_link(self): data = { 'type': {'name': self.vars.linktype}, @@ -597,7 +705,7 @@ class JIRA(StateModuleHelper): url = self.vars.restbase + '/issueLink/' self.vars.meta = self.post(url, data) - @cause_changes(on_success=True) + @cause_changes(when="success") def operation_attach(self): v = self.vars filename = v.attachment.get('filename') @@ -729,7 +837,7 @@ class JIRA(StateModuleHelper): if msg: self.module.fail_json(msg=', '.join(msg)) self.module.fail_json(msg=to_native(error)) - # Fallback print body, if it cant be decoded + # Fallback print body, if it can't be decoded self.module.fail_json(msg=to_native(info['body'])) body = response.read() diff --git a/plugins/modules/kdeconfig.py b/plugins/modules/kdeconfig.py new file mode 100644 index 0000000000..c0d5b80b70 --- /dev/null +++ b/plugins/modules/kdeconfig.py @@ -0,0 +1,273 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Salvatore Mesoraca +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + + +DOCUMENTATION = r""" +module: kdeconfig +short_description: Manage KDE configuration files +version_added: "6.5.0" +description: + - Add or change individual settings in KDE configuration files. + - It uses B(kwriteconfig) under the hood. +options: + path: + description: + - Path to the config file. If the file does not exist it is created. + type: path + required: true + kwriteconfig_path: + description: + - Path to the kwriteconfig executable. If not specified, Ansible tries to discover it. + type: path + values: + description: + - List of values to set. + type: list + elements: dict + suboptions: + group: + description: + - The option's group. One between this and O(values[].groups) is required. + type: str + groups: + description: + - List of the option's groups. One between this and O(values[].group) is required. + type: list + elements: str + key: + description: + - The option's name. + type: str + required: true + value: + description: + - The option's value. One between this and O(values[].bool_value) is required. + type: str + bool_value: + description: + - Boolean value. + - One between this and O(values[].value) is required. + type: bool + required: true + backup: + description: + - Create a backup file. + type: bool + default: false +extends_documentation_fragment: + - files + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +requirements: + - kwriteconfig +author: + - Salvatore Mesoraca (@smeso) +""" + +EXAMPLES = r""" +- name: Ensure "Homepage=https://www.ansible.com/" in group "Branding" + community.general.kdeconfig: + path: /etc/xdg/kickoffrc + values: + - group: Branding + key: Homepage + value: https://www.ansible.com/ + mode: '0644' + +- name: Ensure "KEY=true" in groups "Group" and "Subgroup", and "KEY=VALUE" in Group2 + community.general.kdeconfig: + path: /etc/xdg/someconfigrc + values: + - groups: [Group, Subgroup] + key: KEY + bool_value: true + - group: Group2 + key: KEY + value: VALUE + backup: true +""" + +RETURN = r""" # """ + +import os +import shutil +import tempfile +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_text + + +class TemporaryDirectory(object): + """Basic backport of tempfile.TemporaryDirectory""" + + def __init__(self, suffix="", prefix="tmp", dir=None): + self.name = None + self.name = tempfile.mkdtemp(suffix, prefix, dir) + + def __enter__(self): + return self.name + + def rm(self): + if self.name: + shutil.rmtree(self.name, ignore_errors=True) + self.name = None + + def __exit__(self, exc, value, tb): + self.rm() + + def __del__(self): + self.rm() + + +def run_kwriteconfig(module, cmd, path, groups, key, value): + """Invoke kwriteconfig with arguments""" + args = [cmd, '--file', path, '--key', key] + for group in groups: + args.extend(['--group', group]) + if isinstance(value, bool): + args.extend(['--type', 'bool']) + if value: + args.append('true') + else: + args.append('false') + else: + args.extend(['--', value]) + module.run_command(args, check_rc=True) + + +def run_module(module, tmpdir, kwriteconfig): + result = dict(changed=False, msg='OK', path=module.params['path']) + b_path = to_bytes(module.params['path']) + tmpfile = os.path.join(tmpdir, 'file') + b_tmpfile = to_bytes(tmpfile) + diff = dict( + before='', + after='', + before_header=result['path'], + after_header=result['path'], + ) + try: + with open(b_tmpfile, 'wb') as dst: + try: + with open(b_path, 'rb') as src: + b_data = src.read() + except IOError: + result['changed'] = True + else: + dst.write(b_data) + try: + diff['before'] = to_text(b_data) + except UnicodeError: + diff['before'] = repr(b_data) + except IOError: + module.fail_json(msg='Unable to create temporary file', traceback=traceback.format_exc()) + + for row in module.params['values']: + groups = row['groups'] + if groups is None: + groups = [row['group']] + key = row['key'] + value = row['bool_value'] + if value is None: + value = row['value'] + run_kwriteconfig(module, kwriteconfig, tmpfile, groups, key, value) + + with open(b_tmpfile, 'rb') as tmpf: + b_data = tmpf.read() + try: + diff['after'] = to_text(b_data) + except UnicodeError: + diff['after'] = repr(b_data) + + result['changed'] = result['changed'] or diff['after'] != diff['before'] + + file_args = module.load_file_common_arguments(module.params) + + if module.check_mode: + if not result['changed']: + shutil.copystat(b_path, b_tmpfile) + uid, gid = module.user_and_group(b_path) + os.chown(b_tmpfile, uid, gid) + if module._diff: + diff = {} + else: + diff = None + result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff) + if module._diff: + result['diff'] = diff + module.exit_json(**result) + + if result['changed']: + if module.params['backup'] and os.path.exists(b_path): + result['backup_file'] = module.backup_local(result['path']) + try: + module.atomic_move(b_tmpfile, os.path.abspath(b_path)) + except IOError: + module.ansible.fail_json(msg='Unable to move temporary file %s to %s, IOError' % (tmpfile, result['path']), traceback=traceback.format_exc()) + + if result['changed']: + module.set_fs_attributes_if_different(file_args, result['changed']) + else: + if module._diff: + diff = {} + else: + diff = None + result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff) + if module._diff: + result['diff'] = diff + module.exit_json(**result) + + +def main(): + single_value_arg = dict(group=dict(type='str'), + groups=dict(type='list', elements='str'), + key=dict(type='str', required=True, no_log=False), + value=dict(type='str'), + bool_value=dict(type='bool')) + required_alternatives = [('group', 'groups'), ('value', 'bool_value')] + module_args = dict( + values=dict(type='list', + elements='dict', + options=single_value_arg, + mutually_exclusive=required_alternatives, + required_one_of=required_alternatives, + required=True), + path=dict(type='path', required=True), + kwriteconfig_path=dict(type='path'), + backup=dict(type='bool', default=False), + ) + + module = AnsibleModule( + argument_spec=module_args, + add_file_common_args=True, + supports_check_mode=True, + ) + + kwriteconfig = None + if module.params['kwriteconfig_path'] is not None: + kwriteconfig = module.get_bin_path(module.params['kwriteconfig_path'], required=True) + else: + for progname in ('kwriteconfig6', 'kwriteconfig5', 'kwriteconfig', 'kwriteconfig4'): + kwriteconfig = module.get_bin_path(progname) + if kwriteconfig is not None: + break + if kwriteconfig is None: + module.fail_json(msg='kwriteconfig is not installed') + for v in module.params['values']: + if not v['key']: + module.fail_json(msg="'key' cannot be empty") + with TemporaryDirectory(dir=module.tmpdir) as tmpdir: + run_module(module, tmpdir, kwriteconfig) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/kernel_blacklist.py b/plugins/modules/kernel_blacklist.py similarity index 60% rename from plugins/modules/system/kernel_blacklist.py rename to plugins/modules/kernel_blacklist.py index ad0241b31a..a0bad12b83 100644 --- a/plugins/modules/system/kernel_blacklist.py +++ b/plugins/modules/kernel_blacklist.py @@ -1,51 +1,54 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Alexei Znamensky (@russoz) -# Copyright: (c) 2013, Matthias Vogelgesang -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Alexei Znamensky (@russoz) +# Copyright (c) 2013, Matthias Vogelgesang +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: kernel_blacklist author: -- Matthias Vogelgesang (@matze) + - Matthias Vogelgesang (@matze) short_description: Blacklist kernel modules description: - - Add or remove kernel modules from blacklist. + - Add or remove kernel modules from blacklist. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full options: - name: - type: str - description: - - Name of kernel module to black- or whitelist. - required: true - state: - type: str - description: - - Whether the module should be present in the blacklist or absent. - choices: [ absent, present ] - default: present - blacklist_file: - type: str - description: - - If specified, use this blacklist file instead of - C(/etc/modprobe.d/blacklist-ansible.conf). - default: /etc/modprobe.d/blacklist-ansible.conf -''' + name: + type: str + description: + - Name of kernel module to black- or whitelist. + required: true + state: + type: str + description: + - Whether the module should be present in the blacklist or absent. + choices: [absent, present] + default: present + blacklist_file: + type: str + description: + - If specified, use this blacklist file instead of C(/etc/modprobe.d/blacklist-ansible.conf). + default: /etc/modprobe.d/blacklist-ansible.conf +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Blacklist the nouveau driver module community.general.kernel_blacklist: name: nouveau state: present -''' +""" import os import re -import tempfile from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper @@ -98,16 +101,10 @@ class Blacklist(StateModuleHelper): def __quit_module__(self): if self.has_changed() and not self.module.check_mode: - dummy, tmpfile = tempfile.mkstemp() - try: - os.remove(tmpfile) - self.module.preserved_copy(self.vars.filename, tmpfile) # ensure right perms/ownership - with open(tmpfile, 'w') as fd: - fd.writelines(["{0}\n".format(x) for x in self.vars.lines]) - self.module.atomic_move(tmpfile, self.vars.filename) - finally: - if os.path.exists(tmpfile): - os.remove(tmpfile) + bkp = self.module.backup_local(self.vars.filename) + with open(self.vars.filename, "w") as fd: + fd.writelines(["{0}\n".format(x) for x in self.vars.lines]) + self.module.add_cleanup_file(bkp) def main(): diff --git a/plugins/modules/identity/keycloak/keycloak_authentication.py b/plugins/modules/keycloak_authentication.py similarity index 63% rename from plugins/modules/identity/keycloak/keycloak_authentication.py rename to plugins/modules/keycloak_authentication.py index c7bf5bc01f..6e84a6adfd 100644 --- a/plugins/modules/identity/keycloak/keycloak_authentication.py +++ b/plugins/modules/keycloak_authentication.py @@ -1,222 +1,207 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, INSPQ -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, INSPQ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: keycloak_authentication short_description: Configure authentication in Keycloak description: - - This module actually can only make a copy of an existing authentication flow, add an execution to it and configure it. - - It can also delete the flow. - + - This module actually can only make a copy of an existing authentication flow, add an execution to it and configure it. + - It can also delete the flow. version_added: "3.3.0" -options: - realm: - description: - - The name of the realm in which is the authentication. - required: true - type: str - alias: - description: - - Alias for the authentication flow. - required: true - type: str - description: - description: - - Description of the flow. - type: str - providerId: - description: - - C(providerId) for the new flow when not copied from an existing flow. - type: str - copyFrom: - description: - - C(flowAlias) of the authentication flow to use for the copy. - type: str - authenticationExecutions: - description: - - Configuration structure for the executions. - type: list - elements: dict - suboptions: - providerId: - description: - - C(providerID) for the new flow when not copied from an existing flow. - type: str - displayName: - description: - - Name of the execution or subflow to create or update. - type: str - requirement: - description: - - Control status of the subflow or execution. - choices: [ "REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL" ] - type: str - flowAlias: - description: - - Alias of parent flow. - type: str - authenticationConfig: - description: - - Describe the config of the authentication. - type: dict - index: - description: - - Priority order of the execution. - type: int - state: - description: - - Control if the authentication flow must exists or not. - choices: [ "present", "absent" ] - default: present - type: str - force: - type: bool - default: false - description: - - If C(true), allows to remove the authentication flow and recreate it. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 +options: + realm: + description: + - The name of the realm in which is the authentication. + required: true + type: str + alias: + description: + - Alias for the authentication flow. + required: true + type: str + description: + description: + - Description of the flow. + type: str + providerId: + description: + - C(providerId) for the new flow when not copied from an existing flow. + choices: ["basic-flow", "client-flow"] + type: str + copyFrom: + description: + - C(flowAlias) of the authentication flow to use for the copy. + type: str + authenticationExecutions: + description: + - Configuration structure for the executions. + type: list + elements: dict + suboptions: + providerId: + description: + - C(providerID) for the new flow when not copied from an existing flow. + type: str + displayName: + description: + - Name of the execution or subflow to create or update. + type: str + requirement: + description: + - Control status of the subflow or execution. + choices: ["REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL"] + type: str + flowAlias: + description: + - Alias of parent flow. + type: str + authenticationConfig: + description: + - Describe the config of the authentication. + type: dict + index: + description: + - Priority order of the execution. + type: int + subFlowType: + description: + - For new subflows, optionally specify the type. + - Is only used at creation. + choices: ["basic-flow", "form-flow"] + default: "basic-flow" + type: str + version_added: 6.6.0 + state: + description: + - Control if the authentication flow must exists or not. + choices: ["present", "absent"] + default: present + type: str + force: + type: bool + default: false + description: + - If V(true), allows to remove the authentication flow and recreate it. extends_documentation_fragment: -- community.general.keycloak + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes author: - - Philippe Gauthier (@elfelip) - - Gaëtan Daubresse (@Gaetan2907) -''' + - Philippe Gauthier (@elfelip) + - Gaëtan Daubresse (@Gaetan2907) +""" -EXAMPLES = ''' - - name: Create an authentication flow from first broker login and add an execution to it. - community.general.keycloak_authentication: - auth_keycloak_url: http://localhost:8080/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: master - alias: "Copy of first broker login" - copyFrom: "first broker login" - authenticationExecutions: - - providerId: "test-execution1" - requirement: "REQUIRED" - authenticationConfig: - alias: "test.execution1.property" - config: - test1.property: "value" - - providerId: "test-execution2" - requirement: "REQUIRED" - authenticationConfig: - alias: "test.execution2.property" - config: - test2.property: "value" - state: present +EXAMPLES = r""" +- name: Create an authentication flow from first broker login and add an execution to it. + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "test-execution1" + requirement: "REQUIRED" + authenticationConfig: + alias: "test.execution1.property" + config: + test1.property: "value" + - providerId: "test-execution2" + requirement: "REQUIRED" + authenticationConfig: + alias: "test.execution2.property" + config: + test2.property: "value" + state: present - - name: Re-create the authentication flow - community.general.keycloak_authentication: - auth_keycloak_url: http://localhost:8080/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: master - alias: "Copy of first broker login" - copyFrom: "first broker login" - authenticationExecutions: - - providerId: "test-provisioning" - requirement: "REQUIRED" - authenticationConfig: - alias: "test.provisioning.property" - config: - test.provisioning.property: "value" - state: present - force: true +- name: Re-create the authentication flow + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "test-provisioning" + requirement: "REQUIRED" + authenticationConfig: + alias: "test.provisioning.property" + config: + test.provisioning.property: "value" + state: present + force: true - - name: Create an authentication flow with subflow containing an execution. - community.general.keycloak_authentication: - auth_keycloak_url: http://localhost:8080/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: master - alias: "Copy of first broker login" - copyFrom: "first broker login" - authenticationExecutions: - - providerId: "test-execution1" - requirement: "REQUIRED" - - displayName: "New Subflow" - requirement: "REQUIRED" - - providerId: "auth-cookie" - requirement: "REQUIRED" - flowAlias: "New Sublow" - state: present +- name: Create an authentication flow with subflow containing an execution. + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "test-execution1" + requirement: "REQUIRED" + - displayName: "New Subflow" + requirement: "REQUIRED" + - providerId: "auth-cookie" + requirement: "REQUIRED" + flowAlias: "New Sublow" + state: present - - name: Remove authentication. - community.general.keycloak_authentication: - auth_keycloak_url: http://localhost:8080/auth - auth_realm: master - auth_username: admin - auth_password: password - realm: master - alias: "Copy of first broker login" - state: absent -''' +- name: Remove authentication. + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + state: absent +""" -RETURN = ''' +RETURN = r""" msg: - description: Message as to what action was taken. - returned: always - type: str - -flow: - description: - - JSON representation for the authentication. - - Deprecated return value, it will be removed in community.general 6.0.0. Please use the return value I(end_state) instead. - returned: on success - type: dict - sample: { - "alias": "Copy of first broker login", - "authenticationExecutions": [ - { - "alias": "review profile config", - "authenticationConfig": { - "alias": "review profile config", - "config": { "update.profile.on.first.login": "missing" }, - "id": "6f09e4fb-aad4-496a-b873-7fa9779df6d7" - }, - "configurable": true, - "displayName": "Review Profile", - "id": "8f77dab8-2008-416f-989e-88b09ccf0b4c", - "index": 0, - "level": 0, - "providerId": "idp-review-profile", - "requirement": "REQUIRED", - "requirementChoices": [ "REQUIRED", "ALTERNATIVE", "DISABLED" ] - } - ], - "builtIn": false, - "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account", - "id": "bc228863-5887-4297-b898-4d988f8eaa5c", - "providerId": "basic-flow", - "topLevel": true - } + description: Message as to what action was taken. + returned: always + type: str end_state: - description: Representation of the authentication after module execution. - returned: on success - type: dict - sample: { + description: Representation of the authentication after module execution. + returned: on success + type: dict + sample: + { "alias": "Copy of first broker login", "authenticationExecutions": [ { "alias": "review profile config", "authenticationConfig": { "alias": "review profile config", - "config": { "update.profile.on.first.login": "missing" }, + "config": { + "update.profile.on.first.login": "missing" + }, "id": "6f09e4fb-aad4-496a-b873-7fa9779df6d7" }, "configurable": true, @@ -226,7 +211,11 @@ end_state: "level": 0, "providerId": "idp-review-profile", "requirement": "REQUIRED", - "requirementChoices": [ "REQUIRED", "ALTERNATIVE", "DISABLED" ] + "requirementChoices": [ + "REQUIRED", + "ALTERNATIVE", + "DISABLED" + ] } ], "builtIn": false, @@ -235,10 +224,10 @@ end_state: "providerId": "basic-flow", "topLevel": true } -''' +""" from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak \ - import KeycloakAPI, camel, keycloak_argument_spec, get_token, KeycloakError, is_struct_included + import KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError, is_struct_included from ansible.module_utils.basic import AnsibleModule @@ -262,7 +251,7 @@ def create_or_update_executions(kc, config, realm='master'): """ Create or update executions for an authentication flow. :param kc: Keycloak API access. - :param config: Representation of the authentication flow including it's executions. + :param config: Representation of the authentication flow including its executions. :param realm: Realm :return: tuple (changed, dict(before, after) WHERE @@ -273,6 +262,7 @@ def create_or_update_executions(kc, config, realm='master'): changed = False after = "" before = "" + execution = None if "authenticationExecutions" in config: # Get existing executions on the Keycloak server for this alias existing_executions = kc.get_executions_representation(config, realm=realm) @@ -289,44 +279,50 @@ def create_or_update_executions(kc, config, realm='master'): exec_index = find_exec_in_executions(new_exec, existing_executions) if exec_index != -1: # Remove key that doesn't need to be compared with existing_exec - exclude_key = ["flowAlias"] + exclude_key = ["flowAlias", "subFlowType"] for index_key, key in enumerate(new_exec, start=0): if new_exec[key] is None: exclude_key.append(key) # Compare the executions to see if it need changes if not is_struct_included(new_exec, existing_executions[exec_index], exclude_key) or exec_index != new_exec_index: exec_found = True + if new_exec['index'] is None: + new_exec_index = exec_index before += str(existing_executions[exec_index]) + '\n' - id_to_update = existing_executions[exec_index]["id"] + execution = existing_executions[exec_index].copy() # Remove exec from list in case 2 exec with same name existing_executions[exec_index].clear() elif new_exec["providerId"] is not None: kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm) + execution = kc.get_executions_representation(config, realm=realm)[exec_index] exec_found = True exec_index = new_exec_index - id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"] after += str(new_exec) + '\n' elif new_exec["displayName"] is not None: - kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm) + kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm, flowType=new_exec["subFlowType"]) + execution = kc.get_executions_representation(config, realm=realm)[exec_index] exec_found = True exec_index = new_exec_index - id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"] after += str(new_exec) + '\n' if exec_found: changed = True if exec_index != -1: # Update the existing execution updated_exec = { - "id": id_to_update + "id": execution["id"] } # add the execution configuration if new_exec["authenticationConfig"] is not None: + if "authenticationConfig" in execution and "id" in execution["authenticationConfig"]: + kc.delete_authentication_config(execution["authenticationConfig"]["id"], realm=realm) kc.add_authenticationConfig_to_execution(updated_exec["id"], new_exec["authenticationConfig"], realm=realm) for key in new_exec: # remove unwanted key for the next API call - if key != "flowAlias" and key != "authenticationConfig": + if key not in ("flowAlias", "authenticationConfig", "subFlowType"): updated_exec[key] = new_exec[key] if new_exec["requirement"] is not None: + if "priority" in execution: + updated_exec["priority"] = execution["priority"] kc.update_authentication_executions(flow_alias_parent, updated_exec, realm=realm) diff = exec_index - new_exec_index kc.change_execution_priority(updated_exec["id"], diff, realm=realm) @@ -348,7 +344,7 @@ def main(): meta_args = dict( realm=dict(type='str', required=True), alias=dict(type='str', required=True), - providerId=dict(type='str'), + providerId=dict(type='str', choices=["basic-flow", "client-flow"]), description=dict(type='str'), copyFrom=dict(type='str'), authenticationExecutions=dict(type='list', elements='dict', @@ -359,6 +355,7 @@ def main(): flowAlias=dict(type='str'), authenticationConfig=dict(type='dict'), index=dict(type='int'), + subFlowType=dict(choices=["basic-flow", "form-flow"], default='basic-flow', type='str'), )), state=dict(choices=["absent", "present"], default='present'), force=dict(type='bool', default=False), @@ -368,8 +365,9 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']]) + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, ) result = dict(changed=False, msg='', flow={}) @@ -406,7 +404,6 @@ def main(): result['diff'] = dict(before='', after='') result['changed'] = False result['end_state'] = {} - result['flow'] = result['end_state'] result['msg'] = new_auth_repr["alias"] + ' absent' module.exit_json(**result) @@ -439,7 +436,6 @@ def main(): if exec_repr is not None: auth_repr["authenticationExecutions"] = exec_repr result['end_state'] = auth_repr - result['flow'] = result['end_state'] else: if state == 'present': @@ -477,7 +473,6 @@ def main(): if exec_repr is not None: auth_repr["authenticationExecutions"] = exec_repr result['end_state'] = auth_repr - result['flow'] = result['end_state'] else: # Process a deletion (because state was not 'present') diff --git a/plugins/modules/keycloak_authentication_required_actions.py b/plugins/modules/keycloak_authentication_required_actions.py new file mode 100644 index 0000000000..61672721bd --- /dev/null +++ b/plugins/modules/keycloak_authentication_required_actions.py @@ -0,0 +1,456 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_authentication_required_actions + +short_description: Allows administration of Keycloak authentication required actions + +description: + - This module can register, update and delete required actions. + - It also filters out any duplicate required actions by their alias. The first occurrence is preserved. +version_added: 7.1.0 + +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + realm: + description: + - The name of the realm in which are the authentication required actions. + required: true + type: str + required_actions: + elements: dict + description: + - Authentication required action. + suboptions: + alias: + description: + - Unique name of the required action. + required: true + type: str + config: + description: + - Configuration for the required action. + type: dict + defaultAction: + description: + - Indicates whether new users have the required action assigned to them. + type: bool + enabled: + description: + - Indicates, if the required action is enabled or not. + type: bool + name: + description: + - Displayed name of the required action. Required for registration. + type: str + priority: + description: + - Priority of the required action. + type: int + providerId: + description: + - Provider ID of the required action. Required for registration. + type: str + type: list + state: + choices: ["absent", "present"] + description: + - Control if the realm authentication required actions are going to be registered/updated (V(present)) or deleted (V(absent)). + required: true + type: str + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Skrekulko (@Skrekulko) +""" + +EXAMPLES = r""" +- name: Register a new required action. + community.general.keycloak_authentication_required_actions: + auth_client_id: "admin-cli" + auth_keycloak_url: "http://localhost:8080" + auth_password: "password" + auth_realm: "master" + auth_username: "admin" + realm: "master" + required_action: + - alias: "TERMS_AND_CONDITIONS" + name: "Terms and conditions" + providerId: "TERMS_AND_CONDITIONS" + enabled: true + state: "present" + +- name: Update the newly registered required action. + community.general.keycloak_authentication_required_actions: + auth_client_id: "admin-cli" + auth_keycloak_url: "http://localhost:8080" + auth_password: "password" + auth_realm: "master" + auth_username: "admin" + realm: "master" + required_action: + - alias: "TERMS_AND_CONDITIONS" + enabled: false + state: "present" + +- name: Delete the updated registered required action. + community.general.keycloak_authentication_required_actions: + auth_client_id: "admin-cli" + auth_keycloak_url: "http://localhost:8080" + auth_password: "password" + auth_realm: "master" + auth_username: "admin" + realm: "master" + required_action: + - alias: "TERMS_AND_CONDITIONS" + state: "absent" +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the authentication required actions after module execution. + returned: on success + type: complex + contains: + alias: + description: + - Unique name of the required action. + sample: test-provider-id + type: str + config: + description: + - Configuration for the required action. + sample: {} + type: dict + defaultAction: + description: + - Indicates whether new users have the required action assigned to them. + sample: false + type: bool + enabled: + description: + - Indicates, if the required action is enabled or not. + sample: false + type: bool + name: + description: + - Displayed name of the required action. Required for registration. + sample: Test provider ID + type: str + priority: + description: + - Priority of the required action. + sample: 90 + type: int + providerId: + description: + - Provider ID of the required action. Required for registration. + sample: test-provider-id + type: str +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def sanitize_required_actions(objects): + for obj in objects: + alias = obj['alias'] + name = obj['name'] + provider_id = obj['providerId'] + + if not name: + obj['name'] = alias + + if provider_id != alias: + obj['providerId'] = alias + + return objects + + +def filter_duplicates(objects): + filtered_objects = {} + + for obj in objects: + alias = obj["alias"] + + if alias not in filtered_objects: + filtered_objects[alias] = obj + + return list(filtered_objects.values()) + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + realm=dict(type='str', required=True), + required_actions=dict( + type='list', + elements='dict', + options=dict( + alias=dict(type='str', required=True), + config=dict(type='dict'), + defaultAction=dict(type='bool'), + enabled=dict(type='bool'), + name=dict(type='str'), + priority=dict(type='int'), + providerId=dict(type='str') + ) + ), + state=dict(type='str', choices=['present', 'absent'], required=True) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + # Convenience variables + realm = module.params.get('realm') + desired_required_actions = module.params.get('required_actions') + state = module.params.get('state') + + # Sanitize required actions + desired_required_actions = sanitize_required_actions(desired_required_actions) + + # Filter out duplicate required actions + desired_required_actions = filter_duplicates(desired_required_actions) + + # Get required actions + before_required_actions = kc.get_required_actions(realm=realm) + + if state == 'present': + # Initialize empty lists to hold the required actions that need to be + # registered, updated, and original ones of the updated one + register_required_actions = [] + before_updated_required_actions = [] + updated_required_actions = [] + + # Loop through the desired required actions and check if they exist in the before required actions + for desired_required_action in desired_required_actions: + found = False + + # Loop through the before required actions and check if the aliases match + for before_required_action in before_required_actions: + if desired_required_action['alias'] == before_required_action['alias']: + update_required = False + + # Fill in the parameters + for k, v in before_required_action.items(): + if k not in desired_required_action or desired_required_action[k] is None: + desired_required_action[k] = v + + # Loop through the keys of the desired and before required actions + # and check if there are any differences between them + for key in desired_required_action.keys(): + if key in before_required_action and desired_required_action[key] != before_required_action[key]: + update_required = True + break + + # If there are differences, add the before and desired required actions + # to their respective lists for updating + if update_required: + before_updated_required_actions.append(before_required_action) + updated_required_actions.append(desired_required_action) + found = True + break + # If the desired required action is not found in the before required actions, + # add it to the list of required actions to register + if not found: + # Check if name is provided + if 'name' not in desired_required_action or desired_required_action['name'] is None: + module.fail_json( + msg='Unable to register required action %s in realm %s: name not included' + % (desired_required_action['alias'], realm) + ) + + # Check if provider ID is provided + if 'providerId' not in desired_required_action or desired_required_action['providerId'] is None: + module.fail_json( + msg='Unable to register required action %s in realm %s: providerId not included' + % (desired_required_action['alias'], realm) + ) + + register_required_actions.append(desired_required_action) + + # Handle diff + if module._diff: + diff_required_actions = updated_required_actions.copy() + diff_required_actions.extend(register_required_actions) + + result['diff'] = dict( + before=before_updated_required_actions, + after=diff_required_actions + ) + + # Handle changed + if register_required_actions or updated_required_actions: + result['changed'] = True + + # Handle check mode + if module.check_mode: + if register_required_actions or updated_required_actions: + result['change'] = True + result['msg'] = 'Required actions would be registered/updated' + else: + result['change'] = False + result['msg'] = 'Required actions would not be registered/updated' + + module.exit_json(**result) + + # Register required actions + if register_required_actions: + for register_required_action in register_required_actions: + kc.register_required_action(realm=realm, rep=register_required_action) + kc.update_required_action(alias=register_required_action['alias'], realm=realm, rep=register_required_action) + + # Update required actions + if updated_required_actions: + for updated_required_action in updated_required_actions: + kc.update_required_action(alias=updated_required_action['alias'], realm=realm, rep=updated_required_action) + + # Initialize the final list of required actions + final_required_actions = [] + + # Iterate over the before_required_actions + for before_required_action in before_required_actions: + # Check if there is an updated_required_action with the same alias + updated_required_action_found = False + + for updated_required_action in updated_required_actions: + if updated_required_action['alias'] == before_required_action['alias']: + # Merge the two dictionaries, favoring the values from updated_required_action + merged_dict = {} + for key in before_required_action.keys(): + if key in updated_required_action: + merged_dict[key] = updated_required_action[key] + else: + merged_dict[key] = before_required_action[key] + + for key in updated_required_action.keys(): + if key not in before_required_action: + merged_dict[key] = updated_required_action[key] + + # Add the merged dictionary to the final list of required actions + final_required_actions.append(merged_dict) + + # Mark the updated_required_action as found + updated_required_action_found = True + + # Stop looking for updated_required_action + break + + # If no matching updated_required_action was found, add the before_required_action to the final list of required actions + if not updated_required_action_found: + final_required_actions.append(before_required_action) + + # Append any remaining updated_required_actions that were not merged + for updated_required_action in updated_required_actions: + if not any(updated_required_action['alias'] == action['alias'] for action in final_required_actions): + final_required_actions.append(updated_required_action) + + # Append newly registered required actions + final_required_actions.extend(register_required_actions) + + # Handle message and end state + result['msg'] = 'Required actions registered/updated' + result['end_state'] = final_required_actions + else: + # Filter out the deleted required actions + final_required_actions = [] + delete_required_actions = [] + + for before_required_action in before_required_actions: + delete_action = False + + for desired_required_action in desired_required_actions: + if before_required_action['alias'] == desired_required_action['alias']: + delete_action = True + break + + if not delete_action: + final_required_actions.append(before_required_action) + else: + delete_required_actions.append(before_required_action) + + # Handle diff + if module._diff: + result['diff'] = dict( + before=before_required_actions, + after=final_required_actions + ) + + # Handle changed + if delete_required_actions: + result['changed'] = True + + # Handle check mode + if module.check_mode: + if final_required_actions: + result['change'] = True + result['msg'] = 'Required actions would be deleted' + else: + result['change'] = False + result['msg'] = 'Required actions would not be deleted' + + module.exit_json(**result) + + # Delete required actions + if delete_required_actions: + for delete_required_action in delete_required_actions: + kc.delete_required_action(alias=delete_required_action['alias'], realm=realm) + + # Handle message and end state + result['msg'] = 'Required actions deleted' + result['end_state'] = final_required_actions + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_authz_authorization_scope.py b/plugins/modules/keycloak_authz_authorization_scope.py new file mode 100644 index 0000000000..ad7ada6719 --- /dev/null +++ b/plugins/modules/keycloak_authz_authorization_scope.py @@ -0,0 +1,277 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_authz_authorization_scope + +short_description: Allows administration of Keycloak client authorization scopes using Keycloak API + +version_added: 6.6.0 + +description: + - This module allows the administration of Keycloak client Authorization Scopes using the Keycloak REST API. Authorization + Scopes are only available if a client has Authorization enabled. + - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have + the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate + realm definition with the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services + paths and payloads have not officially been documented by the Keycloak project. + U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the authorization scope. + - On V(present), the authorization scope is created (or updated if it exists already). + - On V(absent), the authorization scope is removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Name of the authorization scope to create. + type: str + required: true + display_name: + description: + - The display name of the authorization scope. + type: str + required: false + icon_uri: + description: + - The icon URI for the authorization scope. + type: str + required: false + client_id: + description: + - The C(clientId) of the Keycloak client that should have the authorization scope. + - This is usually a human-readable name of the Keycloak client. + type: str + required: true + realm: + description: + - The name of the Keycloak realm the Keycloak client is in. + type: str + required: true + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Samuli Seppänen (@mattock) +""" + +EXAMPLES = r""" +- name: Manage Keycloak file:delete authorization scope + keycloak_authz_authorization_scope: + name: file:delete + state: present + display_name: File delete + client_id: myclient + realm: myrealm + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the authorization scope after module execution. + returned: on success + type: complex + contains: + id: + description: ID of the authorization scope. + type: str + returned: when O(state=present) + sample: a6ab1cf2-1001-40ec-9f39-48f23b6a0a41 + name: + description: Name of the authorization scope. + type: str + returned: when O(state=present) + sample: file:delete + display_name: + description: Display name of the authorization scope. + type: str + returned: when O(state=present) + sample: File delete + icon_uri: + description: Icon URI for the authorization scope. + type: str + returned: when O(state=present) + sample: http://localhost/icon.png +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', default='present', + choices=['present', 'absent']), + name=dict(type='str', required=True), + display_name=dict(type='str'), + icon_uri=dict(type='str'), + client_id=dict(type='str', required=True), + realm=dict(type='str', required=True) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + # Convenience variables + state = module.params.get('state') + name = module.params.get('name') + display_name = module.params.get('display_name') + icon_uri = module.params.get('icon_uri') + client_id = module.params.get('client_id') + realm = module.params.get('realm') + + # Get the "id" of the client based on the usually more human-readable + # "clientId" + cid = kc.get_client_id(client_id, realm=realm) + if not cid: + module.fail_json(msg='Invalid client %s for realm %s' % + (client_id, realm)) + + # Get current state of the Authorization Scope using its name as the search + # filter. This returns False if it is not found. + before_authz_scope = kc.get_authz_authorization_scope_by_name( + name=name, client_id=cid, realm=realm) + + # Generate a JSON payload for Keycloak Admin API. This is needed for + # "create" and "update" operations. + desired_authz_scope = {} + desired_authz_scope['name'] = name + desired_authz_scope['displayName'] = display_name + desired_authz_scope['iconUri'] = icon_uri + + # Add "id" to payload for modify operations + if before_authz_scope: + desired_authz_scope['id'] = before_authz_scope['id'] + + # Ensure that undefined (null) optional parameters are presented as empty + # strings in the desired state. This makes comparisons with current state + # much easier. + for k, v in desired_authz_scope.items(): + if not v: + desired_authz_scope[k] = '' + + # Do the above for the current state + if before_authz_scope: + for k in ['displayName', 'iconUri']: + if k not in before_authz_scope: + before_authz_scope[k] = '' + + if before_authz_scope and state == 'present': + changes = False + for k, v in desired_authz_scope.items(): + if before_authz_scope[k] != v: + changes = True + # At this point we know we have to update the object anyways, + # so there's no need to do more work. + break + + if changes: + if module._diff: + result['diff'] = dict(before=before_authz_scope, after=desired_authz_scope) + + if module.check_mode: + result['changed'] = True + result['msg'] = 'Authorization scope would be updated' + module.exit_json(**result) + else: + kc.update_authz_authorization_scope( + payload=desired_authz_scope, id=before_authz_scope['id'], client_id=cid, realm=realm) + result['changed'] = True + result['msg'] = 'Authorization scope updated' + else: + result['changed'] = False + result['msg'] = 'Authorization scope not updated' + + result['end_state'] = desired_authz_scope + elif not before_authz_scope and state == 'present': + if module._diff: + result['diff'] = dict(before={}, after=desired_authz_scope) + + if module.check_mode: + result['changed'] = True + result['msg'] = 'Authorization scope would be created' + module.exit_json(**result) + else: + kc.create_authz_authorization_scope( + payload=desired_authz_scope, client_id=cid, realm=realm) + result['changed'] = True + result['msg'] = 'Authorization scope created' + result['end_state'] = desired_authz_scope + elif before_authz_scope and state == 'absent': + if module._diff: + result['diff'] = dict(before=before_authz_scope, after={}) + + if module.check_mode: + result['changed'] = True + result['msg'] = 'Authorization scope would be removed' + module.exit_json(**result) + else: + kc.remove_authz_authorization_scope( + id=before_authz_scope['id'], client_id=cid, realm=realm) + result['changed'] = True + result['msg'] = 'Authorization scope removed' + elif not before_authz_scope and state == 'absent': + result['changed'] = False + else: + module.fail_json(msg='Unable to determine what to do with authorization scope %s of client %s in realm %s' % ( + name, client_id, realm)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_authz_custom_policy.py b/plugins/modules/keycloak_authz_custom_policy.py new file mode 100644 index 0000000000..87b8fde834 --- /dev/null +++ b/plugins/modules/keycloak_authz_custom_policy.py @@ -0,0 +1,208 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_authz_custom_policy + +short_description: Allows administration of Keycloak client custom Javascript policies using Keycloak API + +version_added: 7.5.0 + +description: + - This module allows the administration of Keycloak client custom Javascript using the Keycloak REST API. Custom Javascript + policies are only available if a client has Authorization enabled and if they have been deployed to the Keycloak server + as JAR files. + - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have + the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate + realm definition with the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services + paths and payloads have not officially been documented by the Keycloak project. + U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the custom policy. + - On V(present), the custom policy is created (or updated if it exists already). + - On V(absent), the custom policy is removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Name of the custom policy to create. + type: str + required: true + policy_type: + description: + - The type of the policy. This must match the name of the custom policy deployed to the server. + - Multiple policies pointing to the same policy type can be created, but their names have to differ. + type: str + required: true + client_id: + description: + - The V(clientId) of the Keycloak client that should have the custom policy attached to it. + - This is usually a human-readable name of the Keycloak client. + type: str + required: true + realm: + description: + - The name of the Keycloak realm the Keycloak client is in. + type: str + required: true + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Samuli Seppänen (@mattock) +""" + +EXAMPLES = r""" +- name: Manage Keycloak custom authorization policy + community.general.keycloak_authz_custom_policy: + name: OnlyOwner + state: present + policy_type: script-policy.js + client_id: myclient + realm: myrealm + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the custom policy after module execution. + returned: on success + type: dict + contains: + name: + description: Name of the custom policy. + type: str + returned: when I(state=present) + sample: file:delete + policy_type: + description: Type of custom policy. + type: str + returned: when I(state=present) + sample: File delete +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', default='present', + choices=['present', 'absent']), + name=dict(type='str', required=True), + policy_type=dict(type='str', required=True), + client_id=dict(type='str', required=True), + realm=dict(type='str', required=True) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + # Convenience variables + state = module.params.get('state') + name = module.params.get('name') + policy_type = module.params.get('policy_type') + client_id = module.params.get('client_id') + realm = module.params.get('realm') + + cid = kc.get_client_id(client_id, realm=realm) + if not cid: + module.fail_json(msg='Invalid client %s for realm %s' % + (client_id, realm)) + + before_authz_custom_policy = kc.get_authz_policy_by_name( + name=name, client_id=cid, realm=realm) + + desired_authz_custom_policy = {} + desired_authz_custom_policy['name'] = name + desired_authz_custom_policy['type'] = policy_type + + # Modifying existing custom policies is not possible + if before_authz_custom_policy and state == 'present': + result['msg'] = "Custom policy %s already exists" % (name) + result['changed'] = False + result['end_state'] = desired_authz_custom_policy + elif not before_authz_custom_policy and state == 'present': + if module.check_mode: + result['msg'] = "Would create custom policy %s" % (name) + else: + kc.create_authz_custom_policy( + payload=desired_authz_custom_policy, policy_type=policy_type, client_id=cid, realm=realm) + result['msg'] = "Custom policy %s created" % (name) + + result['changed'] = True + result['end_state'] = desired_authz_custom_policy + elif before_authz_custom_policy and state == 'absent': + if module.check_mode: + result['msg'] = "Would remove custom policy %s" % (name) + else: + kc.remove_authz_custom_policy( + policy_id=before_authz_custom_policy['id'], client_id=cid, realm=realm) + result['msg'] = "Custom policy %s removed" % (name) + + result['changed'] = True + result['end_state'] = {} + elif not before_authz_custom_policy and state == 'absent': + result['msg'] = "Custom policy %s does not exist" % (name) + result['changed'] = False + result['end_state'] = {} + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_authz_permission.py b/plugins/modules/keycloak_authz_permission.py new file mode 100644 index 0000000000..b36db802cb --- /dev/null +++ b/plugins/modules/keycloak_authz_permission.py @@ -0,0 +1,429 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_authz_permission + +version_added: 7.2.0 + +short_description: Allows administration of Keycloak client authorization permissions using Keycloak API + +description: + - This module allows the administration of Keycloak client authorization permissions using the Keycloak REST API. Authorization + permissions are only available if a client has Authorization enabled. + - There are some peculiarities in JSON paths and payloads for authorization permissions. In particular POST and PUT operations + are targeted at permission endpoints, whereas GET requests go to policies endpoint. To make matters more interesting the + JSON responses from GET requests return data in a different format than what is expected for POST and PUT. The end result + is that it is not possible to detect changes to things like policies, scopes or resources - at least not without a large + number of additional API calls. Therefore this module always updates authorization permissions instead of attempting to + determine if changes are truly needed. + - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have + the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate + realm definition with the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services + paths and payloads have not officially been documented by the Keycloak project. + U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the authorization permission. + - On V(present), the authorization permission is created (or updated if it exists already). + - On V(absent), the authorization permission is removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Name of the authorization permission to create. + type: str + required: true + description: + description: + - The description of the authorization permission. + type: str + required: false + permission_type: + description: + - The type of authorization permission. + - On V(scope) create a scope-based permission. + - On V(resource) create a resource-based permission. + type: str + required: true + choices: + - resource + - scope + decision_strategy: + description: + - The decision strategy to use with this permission. + type: str + default: UNANIMOUS + required: false + choices: + - UNANIMOUS + - AFFIRMATIVE + - CONSENSUS + resources: + description: + - Resource names to attach to this permission. + - Scope-based permissions can only include one resource. + - Resource-based permissions can include multiple resources. + type: list + elements: str + default: [] + required: false + scopes: + description: + - Scope names to attach to this permission. + - Resource-based permissions cannot have scopes attached to them. + type: list + elements: str + default: [] + required: false + policies: + description: + - Policy names to attach to this permission. + type: list + elements: str + default: [] + required: false + client_id: + description: + - The clientId of the keycloak client that should have the authorization scope. + - This is usually a human-readable name of the Keycloak client. + type: str + required: true + realm: + description: + - The name of the Keycloak realm the Keycloak client is in. + type: str + required: true + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Samuli Seppänen (@mattock) +""" + +EXAMPLES = r""" +- name: Manage scope-based Keycloak authorization permission + community.general.keycloak_authz_permission: + name: ScopePermission + state: present + description: Scope permission + permission_type: scope + scopes: + - file:delete + policies: + - Default Policy + client_id: myclient + realm: myrealm + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master + +- name: Manage resource-based Keycloak authorization permission + community.general.keycloak_authz_permission: + name: ResourcePermission + state: present + description: Resource permission + permission_type: resource + resources: + - Default Resource + policies: + - Default Policy + client_id: myclient + realm: myrealm + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the authorization permission after module execution. + returned: on success + type: complex + contains: + id: + description: ID of the authorization permission. + type: str + returned: when O(state=present) + sample: 9da05cd2-b273-4354-bbd8-0c133918a454 + name: + description: Name of the authorization permission. + type: str + returned: when O(state=present) + sample: ResourcePermission + description: + description: Description of the authorization permission. + type: str + returned: when O(state=present) + sample: Resource Permission + type: + description: Type of the authorization permission. + type: str + returned: when O(state=present) + sample: resource + decisionStrategy: + description: The decision strategy to use. + type: str + returned: when O(state=present) + sample: UNANIMOUS + logic: + description: The logic used for the permission (part of the payload, but has a fixed value). + type: str + returned: when O(state=present) + sample: POSITIVE + resources: + description: IDs of resources attached to this permission. + type: list + returned: when O(state=present) + sample: + - 49e052ff-100d-4b79-a9dd-52669ed3c11d + scopes: + description: IDs of scopes attached to this permission. + type: list + returned: when O(state=present) + sample: + - 9da05cd2-b273-4354-bbd8-0c133918a454 + policies: + description: IDs of policies attached to this permission. + type: list + returned: when O(state=present) + sample: + - 9da05cd2-b273-4354-bbd8-0c133918a454 +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', default='present', + choices=['present', 'absent']), + name=dict(type='str', required=True), + description=dict(type='str'), + permission_type=dict(type='str', choices=['scope', 'resource'], required=True), + decision_strategy=dict(type='str', default='UNANIMOUS', + choices=['UNANIMOUS', 'AFFIRMATIVE', 'CONSENSUS']), + resources=dict(type='list', elements='str', default=[]), + scopes=dict(type='list', elements='str', default=[]), + policies=dict(type='list', elements='str', default=[]), + client_id=dict(type='str', required=True), + realm=dict(type='str', required=True) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + # Convenience variables + state = module.params.get('state') + name = module.params.get('name') + description = module.params.get('description') + permission_type = module.params.get('permission_type') + decision_strategy = module.params.get('decision_strategy') + realm = module.params.get('realm') + client_id = module.params.get('client_id') + realm = module.params.get('realm') + resources = module.params.get('resources') + scopes = module.params.get('scopes') + policies = module.params.get('policies') + + if permission_type == 'scope' and state == 'present': + if scopes == []: + module.fail_json(msg='Scopes need to defined when permission type is set to scope!') + if len(resources) > 1: + module.fail_json(msg='Only one resource can be defined for a scope permission!') + + if permission_type == 'resource' and state == 'present': + if resources == []: + module.fail_json(msg='A resource need to defined when permission type is set to resource!') + if scopes != []: + module.fail_json(msg='Scopes cannot be defined when permission type is set to resource!') + + result = dict(changed=False, msg='', end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + # Get id of the client based on client_id + cid = kc.get_client_id(client_id, realm=realm) + if not cid: + module.fail_json(msg='Invalid client %s for realm %s' % + (client_id, realm)) + + # Get current state of the permission using its name as the search + # filter. This returns False if it is not found. + permission = kc.get_authz_permission_by_name( + name=name, client_id=cid, realm=realm) + + # Generate a JSON payload for Keycloak Admin API. This is needed for + # "create" and "update" operations. + payload = {} + payload['name'] = name + payload['description'] = description + payload['type'] = permission_type + payload['decisionStrategy'] = decision_strategy + payload['logic'] = 'POSITIVE' + payload['scopes'] = [] + payload['resources'] = [] + payload['policies'] = [] + + if permission_type == 'scope': + # Add the resource id, if any, to the payload. While the data type is a + # list, it is only possible to have one entry in it based on what Keycloak + # Admin Console does. + r = False + resource_scopes = [] + + if resources: + r = kc.get_authz_resource_by_name(resources[0], cid, realm) + if not r: + module.fail_json(msg='Unable to find authorization resource with name %s for client %s in realm %s' % (resources[0], cid, realm)) + else: + payload['resources'].append(r['_id']) + + for rs in r['scopes']: + resource_scopes.append(rs['id']) + + # Generate a list of scope ids based on scope names. Fail if the + # defined resource does not include all those scopes. + for scope in scopes: + s = kc.get_authz_authorization_scope_by_name(scope, cid, realm) + if r and not s['id'] in resource_scopes: + module.fail_json(msg='Resource %s does not include scope %s for client %s in realm %s' % (resources[0], scope, client_id, realm)) + else: + payload['scopes'].append(s['id']) + + elif permission_type == 'resource': + if resources: + for resource in resources: + r = kc.get_authz_resource_by_name(resource, cid, realm) + if not r: + module.fail_json(msg='Unable to find authorization resource with name %s for client %s in realm %s' % (resource, cid, realm)) + else: + payload['resources'].append(r['_id']) + + # Add policy ids, if any, to the payload. + if policies: + for policy in policies: + p = kc.get_authz_policy_by_name(policy, cid, realm) + + if p: + payload['policies'].append(p['id']) + else: + module.fail_json(msg='Unable to find authorization policy with name %s for client %s in realm %s' % (policy, client_id, realm)) + + # Add "id" to payload for update operations + if permission: + payload['id'] = permission['id'] + + # Handle the special case where the user attempts to change an already + # existing permission's type - something that can't be done without a + # full delete -> (re)create cycle. + if permission['type'] != payload['type']: + module.fail_json(msg='Modifying the type of permission (scope/resource) is not supported: \ + permission %s of client %s in realm %s unchanged' % (permission['id'], cid, realm)) + + # Updating an authorization permission is tricky for several reasons. + # Firstly, the current permission is retrieved using a _policy_ endpoint, + # not from a permission endpoint. Also, the data that is returned is in a + # different format than what is expected by the payload. So, comparing the + # current state attribute by attribute to the payload is not possible. For + # example the data contains a JSON object "config" which may contain the + # authorization type, but which is no required in the payload. Moreover, + # information about resources, scopes and policies is _not_ present in the + # data. So, there is no way to determine if any of those fields have + # changed. Therefore the best options we have are + # + # a) Always apply the payload without checking the current state + # b) Refuse to make any changes to any settings (only support create and delete) + # + # The approach taken here is a). + # + if permission and state == 'present': + if module.check_mode: + result['msg'] = 'Notice: unable to check current resources, scopes and policies for permission. \ + Would apply desired state without checking the current state.' + else: + kc.update_authz_permission(payload=payload, permission_type=permission_type, id=permission['id'], client_id=cid, realm=realm) + result['msg'] = 'Notice: unable to check current resources, scopes and policies for permission. \ + Applying desired state without checking the current state.' + + # Assume that something changed, although we don't know if that is the case. + result['changed'] = True + result['end_state'] = payload + elif not permission and state == 'present': + if module.check_mode: + result['msg'] = 'Would create permission' + else: + kc.create_authz_permission(payload=payload, permission_type=permission_type, client_id=cid, realm=realm) + result['msg'] = 'Permission created' + + result['changed'] = True + result['end_state'] = payload + elif permission and state == 'absent': + if module.check_mode: + result['msg'] = 'Would remove permission' + else: + kc.remove_authz_permission(id=permission['id'], client_id=cid, realm=realm) + result['msg'] = 'Permission removed' + + result['changed'] = True + + elif not permission and state == 'absent': + result['changed'] = False + else: + module.fail_json(msg='Unable to determine what to do with permission %s of client %s in realm %s' % ( + name, client_id, realm)) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_authz_permission_info.py b/plugins/modules/keycloak_authz_permission_info.py new file mode 100644 index 0000000000..c60da778ed --- /dev/null +++ b/plugins/modules/keycloak_authz_permission_info.py @@ -0,0 +1,172 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_authz_permission_info + +version_added: 7.2.0 + +short_description: Query Keycloak client authorization permissions information + +description: + - This module allows querying information about Keycloak client authorization permissions from the resources endpoint using + the Keycloak REST API. Authorization permissions are only available if a client has Authorization enabled. + - This module requires access to the REST API using OpenID Connect; the user connecting and the realm being used must have + the requisite access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate + realm definition with the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase options used by Keycloak. The Authorization Services + paths and payloads have not officially been documented by the Keycloak project. + U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/). +attributes: + action_group: + version_added: 10.2.0 + +options: + name: + description: + - Name of the authorization permission to create. + type: str + required: true + client_id: + description: + - The clientId of the keycloak client that should have the authorization scope. + - This is usually a human-readable name of the Keycloak client. + type: str + required: true + realm: + description: + - The name of the Keycloak realm the Keycloak client is in. + type: str + required: true + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + - community.general.attributes.info_module + +author: + - Samuli Seppänen (@mattock) +""" + +EXAMPLES = r""" +- name: Query Keycloak authorization permission + community.general.keycloak_authz_permission_info: + name: ScopePermission + client_id: myclient + realm: myrealm + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +queried_state: + description: State of the resource (a policy) as seen by Keycloak. + returned: on success + type: complex + contains: + id: + description: ID of the authorization permission. + type: str + sample: 9da05cd2-b273-4354-bbd8-0c133918a454 + name: + description: Name of the authorization permission. + type: str + sample: ResourcePermission + description: + description: Description of the authorization permission. + type: str + sample: Resource Permission + type: + description: Type of the authorization permission. + type: str + sample: resource + decisionStrategy: + description: The decision strategy. + type: str + sample: UNANIMOUS + logic: + description: The logic used for the permission (part of the payload, but has a fixed value). + type: str + sample: POSITIVE + config: + description: Configuration of the permission (empty in all observed cases). + type: dict + sample: {} +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + name=dict(type='str', required=True), + client_id=dict(type='str', required=True), + realm=dict(type='str', required=True) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=( + [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + # Convenience variables + name = module.params.get('name') + client_id = module.params.get('client_id') + realm = module.params.get('realm') + + result = dict(changed=False, msg='', queried_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + # Get id of the client based on client_id + cid = kc.get_client_id(client_id, realm=realm) + if not cid: + module.fail_json(msg='Invalid client %s for realm %s' % + (client_id, realm)) + + # Get current state of the permission using its name as the search + # filter. This returns False if it is not found. + permission = kc.get_authz_permission_by_name( + name=name, client_id=cid, realm=realm) + + result['queried_state'] = permission + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py new file mode 100644 index 0000000000..ed13b106e2 --- /dev/null +++ b/plugins/modules/keycloak_client.py @@ -0,0 +1,1425 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_client + +short_description: Allows administration of Keycloak clients using Keycloak API + + +description: + - This module allows the administration of Keycloak clients using the Keycloak REST API. It requires access to the REST + API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default + Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). Aliases are provided so camelCased versions can be used + as well. + - The Keycloak API does not always sanity check inputs, for example you can set SAML-specific settings on an OpenID Connect + client for instance and the other way around. Be careful. If you do not specify a setting, usually a sensible default + is chosen. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the client. + - On V(present), the client are created (or updated if it exists already). + - On V(absent), the client are removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + + realm: + description: + - The realm to create the client in. + type: str + default: master + + client_id: + description: + - Client ID of client to be worked on. This is usually an alphanumeric name chosen by you. Either this or O(id) is required. + If you specify both, O(id) takes precedence. This is C(clientId) in the Keycloak REST API. + aliases: + - clientId + type: str + + id: + description: + - ID of client to be worked on. This is usually an UUID. Either this or O(client_id) is required. If you specify both, + this takes precedence. + type: str + + name: + description: + - Name of the client (this is not the same as O(client_id)). + type: str + + description: + description: + - Description of the client in Keycloak. + type: str + + root_url: + description: + - Root URL appended to relative URLs for this client. This is C(rootUrl) in the Keycloak REST API. + aliases: + - rootUrl + type: str + + admin_url: + description: + - URL to the admin interface of the client. This is C(adminUrl) in the Keycloak REST API. + aliases: + - adminUrl + type: str + + base_url: + description: + - Default URL to use when the auth server needs to redirect or link back to the client This is C(baseUrl) in the Keycloak + REST API. + aliases: + - baseUrl + type: str + + enabled: + description: + - Is this client enabled or not? + type: bool + + client_authenticator_type: + description: + - How do clients authenticate with the auth server? Either V(client-secret), V(client-jwt), or V(client-x509) can be + chosen. When using V(client-secret), the module parameter O(secret) can set it, for V(client-jwt), you can use the + keys C(use.jwks.url), C(jwks.url), and C(jwt.credential.certificate) in the O(attributes) module parameter to configure + its behavior. For V(client-x509) you can use the keys C(x509.allow.regex.pattern.comparison) and C(x509.subjectdn) + in the O(attributes) module parameter to configure which certificate(s) to accept. + - This is C(clientAuthenticatorType) in the Keycloak REST API. + choices: ['client-secret', 'client-jwt', 'client-x509'] + aliases: + - clientAuthenticatorType + type: str + + secret: + description: + - When using O(client_authenticator_type=client-secret) (the default), you can specify a secret here (otherwise one + is generated if it does not exit). If changing this secret, the module does not register a change currently (but the + changed secret is saved). + type: str + + registration_access_token: + description: + - The registration access token provides access for clients to the client registration service. This is C(registrationAccessToken) + in the Keycloak REST API. + aliases: + - registrationAccessToken + type: str + + default_roles: + description: + - List of default roles for this client. If the client roles referenced do not exist yet, they are created. This is + C(defaultRoles) in the Keycloak REST API. + aliases: + - defaultRoles + type: list + elements: str + + redirect_uris: + description: + - Acceptable redirect URIs for this client. This is C(redirectUris) in the Keycloak REST API. + aliases: + - redirectUris + type: list + elements: str + + web_origins: + description: + - List of allowed CORS origins. This is C(webOrigins) in the Keycloak REST API. + aliases: + - webOrigins + type: list + elements: str + + not_before: + description: + - Revoke any tokens issued before this date for this client (this is a UNIX timestamp). This is C(notBefore) in the + Keycloak REST API. + type: int + aliases: + - notBefore + + bearer_only: + description: + - The access type of this client is bearer-only. This is C(bearerOnly) in the Keycloak REST API. + aliases: + - bearerOnly + type: bool + + consent_required: + description: + - If enabled, users have to consent to client access. This is C(consentRequired) in the Keycloak REST API. + aliases: + - consentRequired + type: bool + + standard_flow_enabled: + description: + - Enable standard flow for this client or not (OpenID connect). This is C(standardFlowEnabled) in the Keycloak REST + API. + aliases: + - standardFlowEnabled + type: bool + + implicit_flow_enabled: + description: + - Enable implicit flow for this client or not (OpenID connect). This is C(implicitFlowEnabled) in the Keycloak REST + API. + aliases: + - implicitFlowEnabled + type: bool + + direct_access_grants_enabled: + description: + - Are direct access grants enabled for this client or not (OpenID connect). This is C(directAccessGrantsEnabled) in + the Keycloak REST API. + aliases: + - directAccessGrantsEnabled + type: bool + + service_accounts_enabled: + description: + - Are service accounts enabled for this client or not (OpenID connect). This is C(serviceAccountsEnabled) in the Keycloak + REST API. + aliases: + - serviceAccountsEnabled + type: bool + + authorization_services_enabled: + description: + - Are authorization services enabled for this client or not (OpenID connect). This is C(authorizationServicesEnabled) + in the Keycloak REST API. + aliases: + - authorizationServicesEnabled + type: bool + + public_client: + description: + - Is the access type for this client public or not. This is C(publicClient) in the Keycloak REST API. + aliases: + - publicClient + type: bool + + frontchannel_logout: + description: + - Is frontchannel logout enabled for this client or not. This is C(frontchannelLogout) in the Keycloak REST API. + aliases: + - frontchannelLogout + type: bool + + protocol: + description: + - Type of client. + - At creation only, default value is V(openid-connect) if O(protocol) is omitted. + - The V(docker-v2) value was added in community.general 8.6.0. + type: str + choices: ['openid-connect', 'saml', 'docker-v2'] + + full_scope_allowed: + description: + - Is the "Full Scope Allowed" feature set for this client or not. This is C(fullScopeAllowed) in the Keycloak REST API. + aliases: + - fullScopeAllowed + type: bool + + node_re_registration_timeout: + description: + - Cluster node re-registration timeout for this client. This is C(nodeReRegistrationTimeout) in the Keycloak REST API. + type: int + aliases: + - nodeReRegistrationTimeout + + registered_nodes: + description: + - Dict of registered cluster nodes (with C(nodename) as the key and last registration time as the value). This is C(registeredNodes) + in the Keycloak REST API. + type: dict + aliases: + - registeredNodes + + client_template: + description: + - Client template to use for this client. If it does not exist this field is silently dropped. This is C(clientTemplate) + in the Keycloak REST API. + type: str + aliases: + - clientTemplate + + use_template_config: + description: + - Whether or not to use configuration from the O(client_template). This is C(useTemplateConfig) in the Keycloak REST + API. + aliases: + - useTemplateConfig + type: bool + + use_template_scope: + description: + - Whether or not to use scope configuration from the O(client_template). This is C(useTemplateScope) in the Keycloak + REST API. + aliases: + - useTemplateScope + type: bool + + use_template_mappers: + description: + - Whether or not to use mapper configuration from the O(client_template). This is C(useTemplateMappers) in the Keycloak + REST API. + aliases: + - useTemplateMappers + type: bool + + always_display_in_console: + description: + - Whether or not to display this client in account console, even if the user does not have an active session. + aliases: + - alwaysDisplayInConsole + type: bool + version_added: 4.7.0 + + surrogate_auth_required: + description: + - Whether or not surrogate auth is required. This is C(surrogateAuthRequired) in the Keycloak REST API. + aliases: + - surrogateAuthRequired + type: bool + + authorization_settings: + description: + - A data structure defining the authorization settings for this client. For reference, please see the Keycloak API docs + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation). This is C(authorizationSettings) + in the Keycloak REST API. + type: dict + aliases: + - authorizationSettings + + authentication_flow_binding_overrides: + description: + - Override realm authentication flow bindings. + type: dict + suboptions: + browser: + description: + - Flow ID of the browser authentication flow. + - O(authentication_flow_binding_overrides.browser) and O(authentication_flow_binding_overrides.browser_name) are + mutually exclusive. + type: str + + browser_name: + description: + - Flow name of the browser authentication flow. + - O(authentication_flow_binding_overrides.browser) and O(authentication_flow_binding_overrides.browser_name) are + mutually exclusive. + aliases: + - browserName + type: str + version_added: 9.1.0 + + direct_grant: + description: + - Flow ID of the direct grant authentication flow. + - O(authentication_flow_binding_overrides.direct_grant) and O(authentication_flow_binding_overrides.direct_grant_name) + are mutually exclusive. + aliases: + - directGrant + type: str + + direct_grant_name: + description: + - Flow name of the direct grant authentication flow. + - O(authentication_flow_binding_overrides.direct_grant) and O(authentication_flow_binding_overrides.direct_grant_name) + are mutually exclusive. + aliases: + - directGrantName + type: str + version_added: 9.1.0 + aliases: + - authenticationFlowBindingOverrides + version_added: 3.4.0 + + client_scopes_behavior: + description: + - Determine how O(default_client_scopes) and O(optional_client_scopes) behave when updating an existing client. + - 'V(ignore): Do not change the client scopes of an existing client. This is the default for backward compatibility.' + - 'V(patch): Add missing scopes, do not remove any missing scopes.' + - 'V(idempotent): Make the client scopes exactly as specified, adding and removing scopes as needed.' + aliases: + - clientScopesBehavior + type: str + choices: ['ignore', 'patch', 'idempotent'] + default: 'ignore' + version_added: 11.4.0 + + default_client_scopes: + description: + - List of default client scopes. + - See O(client_scopes_behavior) for how this behaves when updating an existing client. + aliases: + - defaultClientScopes + type: list + elements: str + version_added: 4.7.0 + + optional_client_scopes: + description: + - List of optional client scopes. + - See O(client_scopes_behavior) for how this behaves when updating an existing client. + aliases: + - optionalClientScopes + type: list + elements: str + version_added: 4.7.0 + + protocol_mappers: + description: + - A list of dicts defining protocol mappers for this client. This is C(protocolMappers) in the Keycloak REST API. + aliases: + - protocolMappers + type: list + elements: dict + suboptions: + consentRequired: + description: + - Specifies whether a user needs to provide consent to a client for this mapper to be active. + type: bool + + consentText: + description: + - The human-readable name of the consent the user is presented to accept. + type: str + + id: + description: + - Usually a UUID specifying the internal ID of this protocol mapper instance. + type: str + + name: + description: + - The name of this protocol mapper. + type: str + + protocol: + description: + - This specifies for which protocol this protocol mapper is active. + choices: ['openid-connect', 'saml', 'docker-v2'] + type: str + + protocolMapper: + description: + - 'The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide + since this may be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least:' + - V(docker-v2-allow-all-mapper). + - V(oidc-address-mapper). + - V(oidc-full-name-mapper). + - V(oidc-group-membership-mapper). + - V(oidc-hardcoded-claim-mapper). + - V(oidc-hardcoded-role-mapper). + - V(oidc-role-name-mapper). + - V(oidc-script-based-protocol-mapper). + - V(oidc-sha256-pairwise-sub-mapper). + - V(oidc-usermodel-attribute-mapper). + - V(oidc-usermodel-client-role-mapper). + - V(oidc-usermodel-property-mapper). + - V(oidc-usermodel-realm-role-mapper). + - V(oidc-usersessionmodel-note-mapper). + - V(saml-group-membership-mapper). + - V(saml-hardcode-attribute-mapper). + - V(saml-hardcode-role-mapper). + - V(saml-role-list-mapper). + - V(saml-role-name-mapper). + - V(saml-user-attribute-mapper). + - V(saml-user-property-mapper). + - V(saml-user-session-note-mapper). + - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to + Server Info -> Providers and looking under 'protocol-mapper'. + type: str + + config: + description: + - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value + of O(protocol_mappers[].protocolMapper) and are not documented other than by the source of the mappers and its + parent class(es). An example is given below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the RV(existing) field. + type: dict + + attributes: + description: + - A dict of further attributes for this client. This can contain various configuration settings; an example is given + in the examples section. While an exhaustive list of permissible options is not available; possible options as of + Keycloak 3.4 are listed below. The Keycloak API does not validate whether a given option is appropriate for the protocol + used; if specified anyway, Keycloak does not use it. + type: dict + suboptions: + saml.authnstatement: + description: + - For SAML clients, boolean specifying whether or not a statement containing method and timestamp should be included + in the login response. + saml.client.signature: + description: + - For SAML clients, boolean specifying whether a client signature is required and validated. + saml.encrypt: + description: + - Boolean specifying whether SAML assertions should be encrypted with the client's public key. + saml.force.post.binding: + description: + - For SAML clients, boolean specifying whether always to use POST binding for responses. + saml.onetimeuse.condition: + description: + - For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses. + saml.server.signature: + description: + - Boolean specifying whether SAML documents should be signed by the realm. + saml.server.signature.keyinfo.ext: + description: + - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion + of the signing key ID in the SAML Extensions element. + saml.signature.algorithm: + description: + - Signature algorithm used to sign SAML documents. One of V(RSA_SHA256), V(RSA_SHA1), V(RSA_SHA512), or V(DSA_SHA1). + saml.signing.certificate: + description: + - SAML signing key certificate, base64-encoded. + saml.signing.private.key: + description: + - SAML signing key private key, base64-encoded. + saml_assertion_consumer_url_post: + description: + - SAML POST Binding URL for the client's assertion consumer service (login responses). + saml_assertion_consumer_url_redirect: + description: + - SAML Redirect Binding URL for the client's assertion consumer service (login responses). + saml_force_name_id_format: + description: + - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured + one instead. + saml_name_id_format: + description: + - For SAML clients, the NameID format to use (one of V(username), V(email), V(transient), or V(persistent)). + saml_signature_canonicalization_method: + description: + - SAML signature canonicalization method. This is one of four values, namely V(http://www.w3.org/2001/10/xml-exc-c14n#) + for EXCLUSIVE, V(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS, + V(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) + for INCLUSIVE, and V(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS. + saml_single_logout_service_url_post: + description: + - SAML POST binding URL for the client's single logout service. + saml_single_logout_service_url_redirect: + description: + - SAML redirect binding URL for the client's single logout service. + user.info.response.signature.alg: + description: + - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of V(RS256) or V(unsigned). + request.object.signature.alg: + description: + - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending OIDC request object. One + of V(any), V(none), V(RS256). + use.jwks.url: + description: + - For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client public keys. + jwks.url: + description: + - For OpenID-Connect clients, URL where client keys in JWK are stored. + jwt.credential.certificate: + description: + - For OpenID-Connect clients, client certificate for validating JWT issued by client and signed by its key, base64-encoded. + x509.subjectdn: + description: + - For OpenID-Connect clients, subject which is used to authenticate the client. + type: str + version_added: 9.5.0 + + x509.allow.regex.pattern.comparison: + description: + - For OpenID-Connect clients, boolean specifying whether to allow C(x509.subjectdn) as regular expression. + type: bool + version_added: 9.5.0 + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Eike Frost (@eikef) +""" + +EXAMPLES = r""" +- name: Create or update Keycloak client (minimal example), authentication with credentials + community.general.keycloak_client: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + client_id: test + state: present + delegate_to: localhost + + +- name: Create or update Keycloak client (minimal example), authentication with token + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + token: TOKEN + client_id: test + state: present + delegate_to: localhost + + +- name: Delete a Keycloak client + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + client_id: test + state: absent + delegate_to: localhost + + +- name: Create or update a Keycloak client (minimal example), with x509 authentication + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: master + state: present + client_id: test + client_authenticator_type: client-x509 + attributes: + x509.subjectdn: "CN=client" + x509.allow.regex.pattern.comparison: false + + +- name: Create or update a Keycloak client (with all the bells and whistles) + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + realm: master + client_id: test + id: d8b127a3-31f6-44c8-a7e4-4ab9a3e78d95 + name: this_is_a_test + description: Description of this wonderful client + root_url: https://www.example.com/ + admin_url: https://www.example.com/admin_url + base_url: basepath + enabled: true + client_authenticator_type: client-secret + secret: REALLYWELLKEPTSECRET + redirect_uris: + - https://www.example.com/* + - http://localhost:8888/ + web_origins: + - https://www.example.com/* + not_before: 1507825725 + bearer_only: false + consent_required: false + standard_flow_enabled: true + implicit_flow_enabled: false + direct_access_grants_enabled: false + service_accounts_enabled: false + authorization_services_enabled: false + public_client: false + frontchannel_logout: false + protocol: openid-connect + full_scope_allowed: false + node_re_registration_timeout: -1 + client_template: test + use_template_config: false + use_template_scope: false + use_template_mappers: false + always_display_in_console: true + registered_nodes: + node01.example.com: 1507828202 + registration_access_token: eyJWT_TOKEN + surrogate_auth_required: false + default_roles: + - test01 + - test02 + authentication_flow_binding_overrides: + browser: 4c90336b-bf1d-4b87-916d-3677ba4e5fbb + protocol_mappers: + - config: + access.token.claim: true + claim.name: "family_name" + id.token.claim: true + jsonType.label: String + user.attribute: lastName + userinfo.token.claim: true + consentRequired: true + consentText: "${familyName}" + name: family name + protocol: openid-connect + protocolMapper: oidc-usermodel-property-mapper + - config: + attribute.name: Role + attribute.nameformat: Basic + single: false + consentRequired: false + name: role list + protocol: saml + protocolMapper: saml-role-list-mapper + attributes: + saml.authnstatement: true + saml.client.signature: true + saml.force.post.binding: true + saml.server.signature: true + saml.signature.algorithm: RSA_SHA256 + saml.signing.certificate: CERTIFICATEHERE + saml.signing.private.key: PRIVATEKEYHERE + saml_force_name_id_format: false + saml_name_id_format: username + saml_signature_canonicalization_method: "http://www.w3.org/2001/10/xml-exc-c14n#" + user.info.response.signature.alg: RS256 + request.object.signature.alg: RS256 + use.jwks.url: true + jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT + jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Client testclient has been updated" + +proposed: + description: Representation of proposed client. + returned: always + type: dict + sample: {"clientId": "test"} + +existing: + description: Representation of existing client (sample is truncated). + returned: always + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } + +end_state: + description: Representation of client after module execution (sample is truncated). + returned: on success + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +import copy + + +PROTOCOL_OPENID_CONNECT = 'openid-connect' +PROTOCOL_SAML = 'saml' +PROTOCOL_DOCKER_V2 = 'docker-v2' +CLIENT_META_DATA = ['authorizationServicesEnabled'] + + +def normalise_scopes_for_behavior(desired_client, before_client, clientScopesBehavior): + """ + Normalize the desired and existing client scopes according to the specified behavior. + + This function adjusts the lists of default and optional client scopes in the desired client + configuration based on the selected behavior: + - 'ignore': The desired scopes are set to match the existing scopes. + - 'patch': Any scopes present in the existing configuration but missing from the desired configuration + are appended to the desired scopes. + - 'idempotent': No modification is made; the desired scopes are used as-is. + + :param desired_client: + type: dict + description: The desired client configuration, including default and optional client scopes. + + :param before_client: + type: dict + description: The current client configuration, including default and optional client scopes. + + :param clientScopesBehavior: + type: str + description: The behavior mode for handling client scopes. Must be one of 'ignore', 'patch', or 'idempotent'. + + :return: + type: tuple + description: Returns a tuple of (desired_client, before_client) after normalization. + """ + desired_client = copy.deepcopy(desired_client) + before_client = copy.deepcopy(before_client) + if clientScopesBehavior == 'ignore': + desired_client['defaultClientScopes'] = copy.deepcopy(before_client['defaultClientScopes']) + desired_client['optionalClientScopes'] = copy.deepcopy(before_client['optionalClientScopes']) + elif clientScopesBehavior == 'patch': + for scope in before_client['defaultClientScopes']: + if scope not in desired_client['defaultClientScopes']: + desired_client['defaultClientScopes'].append(scope) + for scope in before_client['optionalClientScopes']: + if scope not in desired_client['optionalClientScopes']: + desired_client['optionalClientScopes'].append(scope) + + return desired_client, before_client + + +def check_optional_scopes_not_default(desired_client, clientScopesBehavior, module): + """ + Ensure that no client scope is assigned as both default and optional. + + This function checks the desired client configuration to verify that no scope is present + in both the default and optional client scopes. If such a conflict is found, the module + execution fails with an appropriate error message. + + :param desired_client: + type: dict + description: The desired client configuration, including default and optional client scopes. + + :param clientScopesBehavior: + type: str + description: The behavior mode for handling client scopes. Must be one of 'ignore', 'patch', or 'idempotent'. + + :param module: + type: AnsibleModule + description: The Ansible module instance, used to fail execution if a conflict is detected. + + :return: + type: None + description: Returns None. Fails the module if a scope is both default and optional. + """ + if clientScopesBehavior == 'ignore': + return + for scope in desired_client['optionalClientScopes']: + if scope in desired_client['defaultClientScopes']: + module.fail_json(msg='Client scope %s cannot be both default and optional' % scope) + + +def normalise_cr(clientrep, remove_ids=False): + """ Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the + the change detection is more effective. + + :param clientrep: the clientrep dict to be sanitized + :param remove_ids: If set to true, then the unique ID's of objects is removed to make the diff and checks for changed + not alert when the ID's of objects are not usually known, (e.g. for protocol_mappers) + :return: normalised clientrep dict + """ + # Avoid the dict passed in to be modified + clientrep = copy.deepcopy(clientrep) + + if remove_ids: + clientrep.pop('id', None) + + if 'defaultClientScopes' in clientrep: + clientrep['defaultClientScopes'] = list(sorted(clientrep['defaultClientScopes'])) + else: + clientrep['defaultClientScopes'] = [] + + if 'optionalClientScopes' in clientrep: + clientrep['optionalClientScopes'] = list(sorted(clientrep['optionalClientScopes'])) + else: + clientrep['optionalClientScopes'] = [] + + if 'redirectUris' in clientrep: + clientrep['redirectUris'] = list(sorted(clientrep['redirectUris'])) + else: + clientrep['redirectUris'] = [] + + if 'protocolMappers' in clientrep: + clientrep['protocolMappers'] = sorted(clientrep['protocolMappers'], key=lambda x: (x.get('name'), x.get('protocol'), x.get('protocolMapper'))) + for mapper in clientrep['protocolMappers']: + if remove_ids: + mapper.pop('id', None) + + # Convert bool to string + if 'config' in mapper: + for key, value in mapper['config'].items(): + if isinstance(value, bool): + mapper['config'][key] = str(value).lower() + + # Set to a default value. + mapper['consentRequired'] = mapper.get('consentRequired', False) + else: + clientrep['protocolMappers'] = [] + + if 'attributes' in clientrep: + for key, value in clientrep['attributes'].items(): + if isinstance(value, bool): + clientrep['attributes'][key] = str(value).lower() + clientrep['attributes'].pop('client.secret.creation.time', None) + else: + clientrep['attributes'] = [] + + if 'webOrigins' in clientrep: + clientrep['webOrigins'] = sorted(clientrep['webOrigins']) + else: + clientrep['webOrigins'] = [] + + if 'redirectUris' in clientrep: + clientrep['redirectUris'] = sorted(clientrep['redirectUris']) + else: + clientrep['redirectUris'] = [] + + return clientrep + + +def normalize_kc_resp(clientrep): + # kc drops the variable 'authorizationServicesEnabled' if set to false + # to minimize diff/changes we set it to false if not set by kc + if clientrep and 'authorizationServicesEnabled' not in clientrep: + clientrep['authorizationServicesEnabled'] = False + + +def sanitize_cr(clientrep): + """ Removes probably sensitive details from a client representation. + + :param clientrep: the clientrep dict to be sanitized + :return: sanitized clientrep dict + """ + result = copy.deepcopy(clientrep) + if 'secret' in result: + result['secret'] = 'no_log' + if 'attributes' in result: + attributes = result['attributes'] + if isinstance(attributes, dict): + if 'saml.signing.private.key' in attributes: + attributes['saml.signing.private.key'] = 'no_log' + if 'saml.encryption.private.key' in attributes: + attributes['saml.encryption.private.key'] = 'no_log' + return normalise_cr(result) + + +def get_authentication_flow_id(flow_name, realm, kc): + """ Get the authentication flow ID based on the flow name, realm, and Keycloak client. + + Args: + flow_name (str): The name of the authentication flow. + realm (str): The name of the realm. + kc (KeycloakClient): The Keycloak client instance. + + Returns: + str: The ID of the authentication flow. + + Raises: + KeycloakAPIException: If the authentication flow with the given name is not found in the realm. + """ + flow = kc.get_authentication_flow_by_alias(flow_name, realm) + if flow: + return flow["id"] + kc.module.fail_json(msg='Authentification flow %s not found in realm %s' % (flow_name, realm)) + + +def flow_binding_from_dict_to_model(newClientFlowBinding, realm, kc): + """ Convert a dictionary representing client flow bindings to a model representation. + + Args: + newClientFlowBinding (dict): A dictionary containing client flow bindings. + realm (str): The name of the realm. + kc (KeycloakClient): An instance of the KeycloakClient class. + + Returns: + dict: A dictionary representing the model flow bindings. The dictionary has two keys: + - "browser" (str or None): The ID of the browser authentication flow binding, or None if not provided. + - "direct_grant" (str or None): The ID of the direct grant authentication flow binding, or None if not provided. + + Raises: + KeycloakAPIException: If the authentication flow with the given name is not found in the realm. + + """ + + modelFlow = { + "browser": None, + "direct_grant": None + } + + for k, v in newClientFlowBinding.items(): + if not v: + continue + if k == "browser": + modelFlow["browser"] = v + elif k == "browser_name": + modelFlow["browser"] = get_authentication_flow_id(v, realm, kc) + elif k == "direct_grant": + modelFlow["direct_grant"] = v + elif k == "direct_grant_name": + modelFlow["direct_grant"] = get_authentication_flow_id(v, realm, kc) + + return modelFlow + + +def find_match(iterable, attribute, name): + """ + Search for an element in a list of dictionaries based on a given attribute and value. + + This function iterates over the elements of an iterable (typically a list of dictionaries) + and returns the first element whose value for the specified attribute matches `name`. + + :param iterable: + type: iterable (commonly list[dict]) + description: The collection of elements to search within (usually a list of dictionaries). + + :param attribute: + type: str + description: The dictionary key/attribute used for comparison. + + :param name: + type: Any + description: The value to search for within the given attribute. + + :return: + type: dict | None + description: Returns the first dictionary where the attribute matches the given value case insensitive. + Returns `None` if no match is found. + """ + name_lower = str(name).lower() + return next( + ( + value + for value in iterable + if attribute in value and str(value[attribute]).lower() == name_lower + ), + None, + ) + + +def add_default_client_scopes(desired_client, before_client, realm, kc): + """ + Adds missing default client scopes to a Keycloak client. + + This function compares the desired default client scopes specified in `desired_client` + with the current default client scopes in `before_client`. For each scope that is present + in `desired_client["defaultClientScopes"]` but missing from `before_client['defaultClientScopes']`, + it retrieves the scope information from Keycloak and adds it to the client. + + :param desired_client: + type: dict + description: The desired client configuration, including the list of default client scopes. + + :param before_client: + type: dict + description: The current client configuration, including the list of default client scopes. + + :param realm + type: str + description: The name of the Keycloak realm. + + :param kc + type: KeycloakAPI + description: An instance of the Keycloak API client. + + Returns: + None + """ + desired_default_scope = desired_client["defaultClientScopes"] + missing_scopes = [item for item in desired_default_scope if item not in before_client['defaultClientScopes']] + if not missing_scopes: + return + client_scopes = kc.get_clientscopes(realm) + for name in missing_scopes: + scope = find_match(client_scopes, "name", name) + if scope: + kc.add_default_clientscope(scope['id'], realm, desired_client['clientId']) + + +def add_optional_client_scopes(desired_client, before_client, realm, kc): + """ + Adds missing optional client scopes to a Keycloak client. + + This function compares the desired optional client scopes specified in `desired_client` + with the current optional client scopes in `before_client`. For each scope that is present + in `desired_client["optionalClientScopes"]` but missing from `before_client['optionalClientScopes']`, + it retrieves the scope information from Keycloak and adds it to the client. + + :param desired_client: + type: dict + description: The desired client configuration, including the list of optional client scopes. + + :param before_client: + type: dict + description: The current client configuration, including the list of optional client scopes. + + :param realm: + type: str + description: The name of the Keycloak realm. + + :param kc: + type: KeycloakAPI + description: An instance of the Keycloak API client. + + Returns: + None + """ + desired_optional_scope = desired_client["optionalClientScopes"] + missing_scopes = [item for item in desired_optional_scope if item not in before_client['optionalClientScopes']] + if not missing_scopes: + return + client_scopes = kc.get_clientscopes(realm) + for name in missing_scopes: + scope = find_match(client_scopes, "name", name) + if scope: + kc.add_optional_clientscope(scope['id'], realm, desired_client['clientId']) + + +def remove_default_client_scopes(desired_client, before_client, realm, kc): + """ + Removes default client scopes from a Keycloak client that are no longer desired. + + This function compares the current default client scopes in `before_client` + with the desired default client scopes in `desired_client`. For each scope that is present + in `before_client["defaultClientScopes"]` but missing from `desired_client['defaultClientScopes']`, + it retrieves the scope information from Keycloak and removes it from the client. + + :param desired_client: + type: dict + description: The desired client configuration, including the list of default client scopes. + + :param before_client: + type: dict + description: The current client configuration, including the list of default client scopes. + + :param realm: + type: str + description: The name of the Keycloak realm. + + :param kc: + type: KeycloakAPI + description: An instance of the Keycloak API client. + + Returns: + None + """ + before_default_scope = before_client["defaultClientScopes"] + missing_scopes = [item for item in before_default_scope if item not in desired_client['defaultClientScopes']] + if not missing_scopes: + return + client_scopes = kc.get_default_clientscopes(realm, desired_client['clientId']) + for name in missing_scopes: + scope = find_match(client_scopes, "name", name) + if scope: + kc.delete_default_clientscope(scope['id'], realm, desired_client['clientId']) + + +def remove_optional_client_scopes(desired_client, before_client, realm, kc): + """ + Removes optional client scopes from a Keycloak client that are no longer desired. + + This function compares the current optional client scopes in `before_client` + with the desired optional client scopes in `desired_client`. For each scope that is present + in `before_client["optionalClientScopes"]` but missing from `desired_client['optionalClientScopes']`, + it retrieves the scope information from Keycloak and removes it from the client. + + :param desired_client: + type: dict + description: The desired client configuration, including the list of optional client scopes. + + :param before_client: + type: dict + description: The current client configuration, including the list of optional client scopes. + + :param realm: + type: str + description: The name of the Keycloak realm. + + :param kc: + type: KeycloakAPI + description: An instance of the Keycloak API client. + + Returns: + None + """ + before_optional_scope = before_client["optionalClientScopes"] + missing_scopes = [item for item in before_optional_scope if item not in desired_client['optionalClientScopes']] + if not missing_scopes: + return + client_scopes = kc.get_optional_clientscopes(realm, desired_client['clientId']) + for name in missing_scopes: + scope = find_match(client_scopes, "name", name) + if scope: + kc.delete_optional_clientscope(scope['id'], realm, desired_client['clientId']) + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + protmapper_spec = dict( + consentRequired=dict(type='bool'), + consentText=dict(type='str'), + id=dict(type='str'), + name=dict(type='str'), + protocol=dict(type='str', choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML, PROTOCOL_DOCKER_V2]), + protocolMapper=dict(type='str'), + config=dict(type='dict'), + ) + + authentication_flow_spec = dict( + browser=dict(type='str'), + browser_name=dict(type='str', aliases=['browserName']), + direct_grant=dict(type='str', aliases=['directGrant']), + direct_grant_name=dict(type='str', aliases=['directGrantName']), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(type='str', default='master'), + + id=dict(type='str'), + client_id=dict(type='str', aliases=['clientId']), + name=dict(type='str'), + description=dict(type='str'), + root_url=dict(type='str', aliases=['rootUrl']), + admin_url=dict(type='str', aliases=['adminUrl']), + base_url=dict(type='str', aliases=['baseUrl']), + surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']), + enabled=dict(type='bool'), + client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt', 'client-x509'], aliases=['clientAuthenticatorType']), + secret=dict(type='str', no_log=True), + registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True), + default_roles=dict(type='list', elements='str', aliases=['defaultRoles']), + redirect_uris=dict(type='list', elements='str', aliases=['redirectUris']), + web_origins=dict(type='list', elements='str', aliases=['webOrigins']), + not_before=dict(type='int', aliases=['notBefore']), + bearer_only=dict(type='bool', aliases=['bearerOnly']), + consent_required=dict(type='bool', aliases=['consentRequired']), + standard_flow_enabled=dict(type='bool', aliases=['standardFlowEnabled']), + implicit_flow_enabled=dict(type='bool', aliases=['implicitFlowEnabled']), + direct_access_grants_enabled=dict(type='bool', aliases=['directAccessGrantsEnabled']), + service_accounts_enabled=dict(type='bool', aliases=['serviceAccountsEnabled']), + authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']), + public_client=dict(type='bool', aliases=['publicClient']), + frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']), + protocol=dict(type='str', choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML, PROTOCOL_DOCKER_V2]), + attributes=dict(type='dict'), + full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']), + node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']), + registered_nodes=dict(type='dict', aliases=['registeredNodes']), + client_template=dict(type='str', aliases=['clientTemplate']), + use_template_config=dict(type='bool', aliases=['useTemplateConfig']), + use_template_scope=dict(type='bool', aliases=['useTemplateScope']), + use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']), + always_display_in_console=dict(type='bool', aliases=['alwaysDisplayInConsole']), + authentication_flow_binding_overrides=dict( + type='dict', + aliases=['authenticationFlowBindingOverrides'], + options=authentication_flow_spec, + required_one_of=[['browser', 'direct_grant', 'browser_name', 'direct_grant_name']], + mutually_exclusive=[['browser', 'browser_name'], ['direct_grant', 'direct_grant_name']], + ), + protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), + authorization_settings=dict(type='dict', aliases=['authorizationSettings']), + client_scopes_behavior=dict(type='str', aliases=['clientScopesBehavior'], choices=['ignore', 'patch', 'idempotent'], default='ignore'), + default_client_scopes=dict(type='list', elements='str', aliases=['defaultClientScopes']), + optional_client_scopes=dict(type='list', elements='str', aliases=['optionalClientScopes']), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['client_id', 'id'], + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + cid = module.params.get('id') + clientScopesBehavior = module.params.get('client_scopes_behavior') + state = module.params.get('state') + + # Filter and map the parameters names that apply to the client + client_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and + module.params.get(x) is not None] + + # See if it already exists in Keycloak + if cid is None: + before_client = kc.get_client_by_clientid(module.params.get('client_id'), realm=realm) + if before_client is not None: + cid = before_client['id'] + else: + before_client = kc.get_client_by_id(cid, realm=realm) + + normalize_kc_resp(before_client) + + if before_client is None: + before_client = {} + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for client_param in client_params: + new_param_value = module.params.get(client_param) + + # Unfortunately, the ansible argument spec checker introduces variables with null values when + # they are not specified + if client_param == 'protocol_mappers': + new_param_value = [{k: v for k, v in x.items() if v is not None} for x in new_param_value] + elif client_param == 'authentication_flow_binding_overrides': + new_param_value = flow_binding_from_dict_to_model(new_param_value, realm, kc) + elif client_param == 'attributes' and 'attributes' in before_client: + attributes_copy = copy.deepcopy(before_client['attributes']) + attributes_copy.update(new_param_value) + new_param_value = attributes_copy + elif client_param in ['clientScopesBehavior', 'client_scopes_behavior']: + continue + + changeset[camel(client_param)] = new_param_value + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_client = copy.deepcopy(before_client) + desired_client.update(changeset) + + result['proposed'] = sanitize_cr(changeset) + result['existing'] = sanitize_cr(before_client) + + # Cater for when it doesn't exist (an empty dict) + if not before_client: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Client does not exist; doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if 'clientId' not in desired_client: + module.fail_json(msg='client_id needs to be specified when creating a new client') + if 'protocol' not in desired_client: + desired_client['protocol'] = PROTOCOL_OPENID_CONNECT + + if module._diff: + result['diff'] = dict(before='', after=sanitize_cr(desired_client)) + + if module.check_mode: + module.exit_json(**result) + + # create it + kc.create_client(desired_client, realm=realm) + after_client = kc.get_client_by_clientid(desired_client['clientId'], realm=realm) + + result['end_state'] = sanitize_cr(after_client) + + result['msg'] = 'Client %s has been created.' % desired_client['clientId'] + module.exit_json(**result) + + else: + if state == 'present': + # We can only compare the current client with the proposed updates we have + desired_client_with_scopes, before_client_with_scopes = normalise_scopes_for_behavior(desired_client, before_client, clientScopesBehavior) + check_optional_scopes_not_default(desired_client, clientScopesBehavior, module) + before_norm = normalise_cr(before_client_with_scopes, remove_ids=True) + desired_norm = normalise_cr(desired_client_with_scopes, remove_ids=True) + # no changes + if before_norm == desired_norm: + result['changed'] = False + result['end_state'] = sanitize_cr(before_client) + result['msg'] = 'No changes required for Client %s.' % desired_client['clientId'] + module.exit_json(**result) + + # Process an update + result['changed'] = True + + if module.check_mode: + result['end_state'] = sanitize_cr(desired_client_with_scopes) + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_client), + after=sanitize_cr(desired_client)) + module.exit_json(**result) + + # do the update + kc.update_client(cid, desired_client, realm=realm) + + remove_default_client_scopes(desired_client_with_scopes, before_client_with_scopes, realm, kc) + remove_optional_client_scopes(desired_client_with_scopes, before_client_with_scopes, realm, kc) + add_default_client_scopes(desired_client_with_scopes, before_client_with_scopes, realm, kc) + add_optional_client_scopes(desired_client_with_scopes, before_client_with_scopes, realm, kc) + + after_client = kc.get_client_by_id(cid, realm=realm) + normalize_kc_resp(after_client) + + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_client), + after=sanitize_cr(after_client)) + + result['end_state'] = sanitize_cr(after_client) + + result['msg'] = 'Client %s has been updated.' % desired_client['clientId'] + module.exit_json(**result) + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_client), after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_client(cid, realm=realm) + result['proposed'] = {} + + result['end_state'] = {} + + result['msg'] = 'Client %s has been deleted.' % before_client['clientId'] + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_client_rolemapping.py b/plugins/modules/keycloak_client_rolemapping.py new file mode 100644 index 0000000000..53ac32c2e9 --- /dev/null +++ b/plugins/modules/keycloak_client_rolemapping.py @@ -0,0 +1,403 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_client_rolemapping + +short_description: Allows administration of Keycloak client_rolemapping with the Keycloak API + +version_added: 3.5.0 + +description: + - This module allows you to add, remove or modify Keycloak client_rolemapping with the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. + - When updating a client_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API + to translate the name into the role ID. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the client_rolemapping. + - On V(present), the client_rolemapping is created if it does not yet exist, or updated with the parameters + you provide. + - On V(absent), the client_rolemapping is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + type: str + description: + - They Keycloak realm under which this role_representation resides. + default: 'master' + + group_name: + type: str + description: + - Name of the group to be mapped. + - This parameter is required (can be replaced by gid for less API call). + parents: + version_added: "7.1.0" + type: list + description: + - List of parent groups for the group to handle sorted top to bottom. + - Set this if your group is a subgroup and you do not provide the GID in O(gid). + elements: dict + suboptions: + id: + type: str + description: + - Identify parent by ID. + - Needs less API calls than using O(parents[].name). + - A deep parent chain can be started at any point when first given parent is given as ID. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. + name: + type: str + description: + - Identify parent by name. + - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood. + - When giving a parent chain with only names it must be complete up to the top. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. + gid: + type: str + description: + - ID of the group to be mapped. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. + client_id: + type: str + description: + - Name of the client to be mapped (different than O(cid)). + - This parameter is required (can be replaced by cid for less API call). + cid: + type: str + description: + - ID of the client to be mapped. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. + roles: + description: + - Roles to be mapped to the group. + type: list + elements: dict + suboptions: + name: + type: str + description: + - Name of the role_representation. + - This parameter is required only when creating or updating the role_representation. + id: + type: str + description: + - The unique identifier for this role_representation. + - This parameter is not required for updating or deleting a role_representation but providing it reduces the number + of API calls required. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Gaëtan Daubresse (@Gaetan2907) +""" + +EXAMPLES = r""" +- name: Map a client role to a group, authentication with credentials + community.general.keycloak_client_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + client_id: client1 + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a group, authentication with token + community.general.keycloak_client_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + state: present + client_id: client1 + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a subgroup, authentication with token + community.general.keycloak_client_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + state: present + client_id: client1 + group_name: subgroup1 + parents: + - name: parent-group + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Unmap client role from a group + community.general.keycloak_client_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: absent + client_id: client1 + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Role role1 assigned to group group1." + +proposed: + description: Representation of proposed client role mapping. + returned: always + type: dict + sample: {"clientId": "test"} + +existing: + description: + - Representation of existing client role mapping. + - The sample is truncated. + returned: always + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } + +end_state: + description: + - Representation of client role mapping after module execution. + - The sample is truncated. + returned: on success + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError, +) +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + roles_spec = dict( + name=dict(type='str'), + id=dict(type='str'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + gid=dict(type='str'), + group_name=dict(type='str'), + parents=dict( + type='list', elements='dict', + options=dict( + id=dict(type='str'), + name=dict(type='str') + ), + ), + cid=dict(type='str'), + client_id=dict(type='str'), + roles=dict(type='list', elements='dict', options=roles_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + cid = module.params.get('cid') + client_id = module.params.get('client_id') + gid = module.params.get('gid') + group_name = module.params.get('group_name') + roles = module.params.get('roles') + parents = module.params.get('parents') + + # Check the parameters + if cid is None and client_id is None: + module.fail_json(msg='Either the `client_id` or `cid` has to be specified.') + if gid is None and group_name is None: + module.fail_json(msg='Either the `group_name` or `gid` has to be specified.') + + # Get the potential missing parameters + if gid is None: + group_rep = kc.get_group_by_name(group_name, realm=realm, parents=parents) + if group_rep is not None: + gid = group_rep['id'] + else: + module.fail_json(msg='Could not fetch group %s:' % group_name) + if cid is None: + cid = kc.get_client_id(client_id, realm=realm) + if cid is None: + module.fail_json(msg='Could not fetch client %s:' % client_id) + if roles is None: + module.exit_json(msg="Nothing to do (no roles specified).") + else: + for role_index, role in enumerate(roles, start=0): + if role['name'] is None and role['id'] is None: + module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') + # Fetch missing role_id + if role['id'] is None: + role_id = kc.get_client_role_id_by_name(cid, role['name'], realm=realm) + if role_id is not None: + role['id'] = role_id + else: + module.fail_json(msg='Could not fetch role %s:' % (role['name'])) + # Fetch missing role_name + else: + role['name'] = kc.get_client_group_rolemapping_by_id(gid, cid, role['id'], realm=realm)['name'] + if role['name'] is None: + module.fail_json(msg='Could not fetch role %s' % (role['id'])) + + # Get effective client-level role mappings + available_roles_before = kc.get_client_group_available_rolemappings(gid, cid, realm=realm) + assigned_roles_before = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm) + + result['existing'] = assigned_roles_before + result['proposed'] = list(assigned_roles_before) if assigned_roles_before else [] + + update_roles = [] + for role_index, role in enumerate(roles, start=0): + # Fetch roles to assign if state present + if state == 'present': + for available_role in available_roles_before: + if role['name'] == available_role['name']: + update_roles.append({ + 'id': role['id'], + 'name': role['name'], + }) + result['proposed'].append(available_role) + # Fetch roles to remove if state absent + else: + for assigned_role in assigned_roles_before: + if role['name'] == assigned_role['name']: + update_roles.append({ + 'id': role['id'], + 'name': role['name'], + }) + if assigned_role in result['proposed']: # Handle double removal + result['proposed'].remove(assigned_role) + + if len(update_roles): + if state == 'present': + # Assign roles + result['changed'] = True + if module._diff: + result['diff'] = dict(before=assigned_roles_before, after=result['proposed']) + if module.check_mode: + module.exit_json(**result) + kc.add_group_rolemapping(gid, cid, update_roles, realm=realm) + result['msg'] = 'Roles %s assigned to group %s.' % (update_roles, group_name) + assigned_roles_after = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + else: + # Remove mapping of role + result['changed'] = True + if module._diff: + result['diff'] = dict(before=assigned_roles_before, after=result['proposed']) + if module.check_mode: + module.exit_json(**result) + kc.delete_group_rolemapping(gid, cid, update_roles, realm=realm) + result['msg'] = 'Roles %s removed from group %s.' % (update_roles, group_name) + assigned_roles_after = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + # Do nothing + else: + result['changed'] = False + result['msg'] = 'Nothing to do, roles %s are %s with group %s.' % (roles, 'mapped' if state == 'present' else 'not mapped', group_name) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_client_rolescope.py b/plugins/modules/keycloak_client_rolescope.py new file mode 100644 index 0000000000..8f37172a18 --- /dev/null +++ b/plugins/modules/keycloak_client_rolescope.py @@ -0,0 +1,275 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_client_rolescope + +short_description: Allows administration of Keycloak client roles scope to restrict the usage of certain roles to a other + specific client applications + +version_added: 8.6.0 + +description: + - This module allows you to add or remove Keycloak roles from clients scope using the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - Client O(client_id) must have O(community.general.keycloak_client#module:full_scope_allowed) set to V(false). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the role mapping. + - On V(present), all roles in O(role_names) are mapped if not exist yet. + - On V(absent), all roles mapping in O(role_names) are removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + type: str + description: + - The Keycloak realm under which clients resides. + default: 'master' + + client_id: + type: str + required: true + description: + - Roles provided in O(role_names) while be added to this client scope. + client_scope_id: + type: str + description: + - If the O(role_names) are client role, the client ID under which it resides. + - If this parameter is absent, the roles are considered a realm role. + role_names: + required: true + type: list + elements: str + description: + - Names of roles to manipulate. + - If O(client_scope_id) is present, all roles must be under this client. + - If O(client_scope_id) is absent, all roles must be under the realm. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Andre Desrosiers (@desand01) +""" + +EXAMPLES = r""" +- name: Add roles to public client scope + community.general.keycloak_client_rolescope: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: MyCustomRealm + client_id: frontend-client-public + client_scope_id: backend-client-private + role_names: + - backend-role-admin + - backend-role-user + +- name: Remove roles from public client scope + community.general.keycloak_client_rolescope: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: MyCustomRealm + client_id: frontend-client-public + client_scope_id: backend-client-private + role_names: + - backend-role-admin + state: absent + +- name: Add realm roles to public client scope + community.general.keycloak_client_rolescope: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: MyCustomRealm + client_id: frontend-client-public + role_names: + - realm-role-admin + - realm-role-user +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Client role scope for frontend-client-public has been updated" + +end_state: + description: Representation of role role scope after module execution. + returned: on success + type: list + elements: dict + sample: + [ + { + "clientRole": false, + "composite": false, + "containerId": "MyCustomRealm", + "id": "47293104-59a6-46f0-b460-2e9e3c9c424c", + "name": "backend-role-admin" + }, + { + "clientRole": false, + "composite": false, + "containerId": "MyCustomRealm", + "id": "39c62a6d-542c-4715-92d2-41021eb33967", + "name": "backend-role-user" + } + ] +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + client_id=dict(type='str', required=True), + client_scope_id=dict(type='str'), + realm=dict(type='str', default='master'), + role_names=dict(type='list', elements='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = dict(changed=False, msg='', diff={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + clientid = module.params.get('client_id') + client_scope_id = module.params.get('client_scope_id') + role_names = module.params.get('role_names') + state = module.params.get('state') + + objRealm = kc.get_realm_by_id(realm) + if not objRealm: + module.fail_json(msg="Failed to retrive realm '{realm}'".format(realm=realm)) + + objClient = kc.get_client_by_clientid(clientid, realm) + if not objClient: + module.fail_json(msg="Failed to retrive client '{realm}.{clientid}'".format(realm=realm, clientid=clientid)) + if objClient["fullScopeAllowed"] and state == "present": + module.fail_json(msg="FullScopeAllowed is active for Client '{realm}.{clientid}'".format(realm=realm, clientid=clientid)) + + if client_scope_id: + objClientScope = kc.get_client_by_clientid(client_scope_id, realm) + if not objClientScope: + module.fail_json(msg="Failed to retrive client '{realm}.{client_scope_id}'".format(realm=realm, client_scope_id=client_scope_id)) + before_role_mapping = kc.get_client_role_scope_from_client(objClient["id"], objClientScope["id"], realm) + else: + before_role_mapping = kc.get_client_role_scope_from_realm(objClient["id"], realm) + + if client_scope_id: + # retrive all role from client_scope + client_scope_roles_by_name = kc.get_client_roles_by_id(objClientScope["id"], realm) + else: + # retrive all role from realm + client_scope_roles_by_name = kc.get_realm_roles(realm) + + # convert to indexed Dict by name + client_scope_roles_by_name = {role["name"]: role for role in client_scope_roles_by_name} + role_mapping_by_name = {role["name"]: role for role in before_role_mapping} + role_mapping_to_manipulate = [] + + if state == "present": + # update desired + for role_name in role_names: + if role_name not in client_scope_roles_by_name: + if client_scope_id: + module.fail_json(msg="Failed to retrive role '{realm}.{client_scope_id}.{role_name}'" + .format(realm=realm, client_scope_id=client_scope_id, role_name=role_name)) + else: + module.fail_json(msg="Failed to retrive role '{realm}.{role_name}'".format(realm=realm, role_name=role_name)) + if role_name not in role_mapping_by_name: + role_mapping_to_manipulate.append(client_scope_roles_by_name[role_name]) + role_mapping_by_name[role_name] = client_scope_roles_by_name[role_name] + else: + # remove role if present + for role_name in role_names: + if role_name in role_mapping_by_name: + role_mapping_to_manipulate.append(role_mapping_by_name[role_name]) + del role_mapping_by_name[role_name] + + before_role_mapping = sorted(before_role_mapping, key=lambda d: d['name']) + desired_role_mapping = sorted(role_mapping_by_name.values(), key=lambda d: d['name']) + + result['changed'] = len(role_mapping_to_manipulate) > 0 + + if result['changed']: + result['diff'] = dict(before=before_role_mapping, after=desired_role_mapping) + + if not result['changed']: + # no changes + result['end_state'] = before_role_mapping + result['msg'] = "No changes required for client role scope {name}.".format(name=clientid) + elif state == "present": + # doing update + if module.check_mode: + result['end_state'] = desired_role_mapping + elif client_scope_id: + result['end_state'] = kc.update_client_role_scope_from_client(role_mapping_to_manipulate, objClient["id"], objClientScope["id"], realm) + else: + result['end_state'] = kc.update_client_role_scope_from_realm(role_mapping_to_manipulate, objClient["id"], realm) + result['msg'] = "Client role scope for {name} has been updated".format(name=clientid) + else: + # doing delete + if module.check_mode: + result['end_state'] = desired_role_mapping + elif client_scope_id: + result['end_state'] = kc.delete_client_role_scope_from_client(role_mapping_to_manipulate, objClient["id"], objClientScope["id"], realm) + else: + result['end_state'] = kc.delete_client_role_scope_from_realm(role_mapping_to_manipulate, objClient["id"], realm) + result['msg'] = "Client role scope for {name} has been deleted".format(name=clientid) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/keycloak/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py similarity index 53% rename from plugins/modules/identity/keycloak/keycloak_clientscope.py rename to plugins/modules/keycloak_clientscope.py index 2deab5547d..cea4b4fab2 100644 --- a/plugins/modules/identity/keycloak/keycloak_clientscope.py +++ b/plugins/modules/keycloak_clientscope.py @@ -1,162 +1,158 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: keycloak_clientscope -short_description: Allows administration of Keycloak client_scopes via Keycloak API +short_description: Allows administration of Keycloak client_scopes using Keycloak API version_added: 3.4.0 description: - - This module allows you to add, remove or modify Keycloak client_scopes via the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will - be returned that way by this module. You may pass single values for attributes when calling the module, - and this will be translated into a list suitable for the API. - - - When updating a client_scope, where possible provide the client_scope ID to the module. This removes a lookup - to the API to translate the name into the client_scope ID. - + - This module allows you to add, remove or modify Keycloak client_scopes using the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. + - When updating a client_scope, where possible provide the client_scope ID to the module. This removes a lookup to the API + to translate the name into the client_scope ID. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 options: - state: - description: - - State of the client_scope. - - On C(present), the client_scope will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the client_scope will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent - - name: - type: str - description: - - Name of the client_scope. - - This parameter is required only when creating or updating the client_scope. - - realm: - type: str - description: - - They Keycloak realm under which this client_scope resides. - default: 'master' - - id: - type: str - description: - - The unique identifier for this client_scope. - - This parameter is not required for updating or deleting a client_scope but - providing it will reduce the number of API calls required. - + state: description: - type: str - description: - - Description for this client_scope. - - This parameter is not required for updating or deleting a client_scope. + - State of the client_scope. + - On V(present), the client_scope is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the client_scope is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent - protocol: + name: + type: str + description: + - Name of the client_scope. + - This parameter is required only when creating or updating the client_scope. + realm: + type: str + description: + - They Keycloak realm under which this client_scope resides. + default: 'master' + + id: + type: str + description: + - The unique identifier for this client_scope. + - This parameter is not required for updating or deleting a client_scope but providing it reduces the number of API + calls required. + description: + type: str + description: + - Description for this client_scope. + - This parameter is not required for updating or deleting a client_scope. + protocol: + description: + - Type of client. + - The V(docker-v2) value was added in community.general 8.6.0. + choices: ['openid-connect', 'saml', 'wsfed', 'docker-v2'] + type: str + + protocol_mappers: + description: + - A list of dicts defining protocol mappers for this client. + - This is C(protocolMappers) in the Keycloak REST API. + aliases: + - protocolMappers + type: list + elements: dict + suboptions: + protocol: description: - - Type of client. - choices: ['openid-connect', 'saml', 'wsfed'] + - This specifies for which protocol this protocol mapper. + - Is active. + choices: ['openid-connect', 'saml', 'wsfed', 'docker-v2'] type: str - protocol_mappers: + protocolMapper: description: - - A list of dicts defining protocol mappers for this client. - - This is 'protocolMappers' in the Keycloak REST API. - aliases: - - protocolMappers - type: list - elements: dict - suboptions: - protocol: - description: - - This specifies for which protocol this protocol mapper. - - is active. - choices: ['openid-connect', 'saml', 'wsfed'] - type: str + - 'The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide + since this may be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least:' + - V(docker-v2-allow-all-mapper). + - V(oidc-address-mapper). + - V(oidc-full-name-mapper). + - V(oidc-group-membership-mapper). + - V(oidc-hardcoded-claim-mapper). + - V(oidc-hardcoded-role-mapper). + - V(oidc-role-name-mapper). + - V(oidc-script-based-protocol-mapper). + - V(oidc-sha256-pairwise-sub-mapper). + - V(oidc-usermodel-attribute-mapper). + - V(oidc-usermodel-client-role-mapper). + - V(oidc-usermodel-property-mapper). + - V(oidc-usermodel-realm-role-mapper). + - V(oidc-usersessionmodel-note-mapper). + - V(saml-group-membership-mapper). + - V(saml-hardcode-attribute-mapper). + - V(saml-hardcode-role-mapper). + - V(saml-role-list-mapper). + - V(saml-role-name-mapper). + - V(saml-user-attribute-mapper). + - V(saml-user-property-mapper). + - V(saml-user-session-note-mapper). + - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to + Server Info -> Providers and looking under 'protocol-mapper'. + type: str - protocolMapper: - description: - - "The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is - impossible to provide since this may be extended through SPIs by the user of Keycloak, - by default Keycloak as of 3.4 ships with at least:" - - C(docker-v2-allow-all-mapper) - - C(oidc-address-mapper) - - C(oidc-full-name-mapper) - - C(oidc-group-membership-mapper) - - C(oidc-hardcoded-claim-mapper) - - C(oidc-hardcoded-role-mapper) - - C(oidc-role-name-mapper) - - C(oidc-script-based-protocol-mapper) - - C(oidc-sha256-pairwise-sub-mapper) - - C(oidc-usermodel-attribute-mapper) - - C(oidc-usermodel-client-role-mapper) - - C(oidc-usermodel-property-mapper) - - C(oidc-usermodel-realm-role-mapper) - - C(oidc-usersessionmodel-note-mapper) - - C(saml-group-membership-mapper) - - C(saml-hardcode-attribute-mapper) - - C(saml-hardcode-role-mapper) - - C(saml-role-list-mapper) - - C(saml-role-name-mapper) - - C(saml-user-attribute-mapper) - - C(saml-user-property-mapper) - - C(saml-user-session-note-mapper) - - An exhaustive list of available mappers on your installation can be obtained on - the admin console by going to Server Info -> Providers and looking under - 'protocol-mapper'. - type: str + name: + description: + - The name of this protocol mapper. + type: str - name: - description: - - The name of this protocol mapper. - type: str + id: + description: + - Usually a UUID specifying the internal ID of this protocol mapper instance. + type: str - id: - description: - - Usually a UUID specifying the internal ID of this protocol mapper instance. - type: str - - config: - description: - - Dict specifying the configuration options for the protocol mapper; the - contents differ depending on the value of I(protocolMapper) and are not documented - other than by the source of the mappers and its parent class(es). An example is given - below. It is easiest to obtain valid config values by dumping an already-existing - protocol mapper configuration through check-mode in the C(existing) return value. - type: dict - - attributes: + config: + description: + - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value + of O(protocol_mappers[].protocolMapper) and are not documented other than by the source of the mappers and its + parent class(es). An example is given below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the RV(existing) return value. type: dict - description: - - A dict of key/value pairs to set as custom attributes for the client_scope. - - Values may be single values (for example a string) or a list of strings. + attributes: + type: dict + description: + - A dict of key/value pairs to set as custom attributes for the client_scope. + - Values may be single values (for example a string) or a list of strings. extends_documentation_fragment: -- community.general.keycloak - + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes author: - - Gaëtan Daubresse (@Gaetan2907) -''' + - Gaëtan Daubresse (@Gaetan2907) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a Keycloak client_scopes, authentication with credentials community.general.keycloak_clientscope: name: my-new-kc-clientscope @@ -226,12 +222,12 @@ EXAMPLES = ''' protocol: openid-connect protocol_mappers: - config: - access.token.claim: True + access.token.claim: true claim.name: "family_name" - id.token.claim: True + id.token.claim: true jsonType.label: String user.attribute: lastName - userinfo.token.claim: True + userinfo.token.claim: true name: family name protocol: openid-connect protocolMapper: oidc-usermodel-property-mapper @@ -243,60 +239,84 @@ EXAMPLES = ''' protocol: saml protocolMapper: saml-role-list-mapper attributes: - attrib1: value1 - attrib2: value2 - attrib3: - - with - - numerous - - individual - - list - - items + attrib1: value1 + attrib2: value2 + attrib3: + - with + - numerous + - individual + - list + - items delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Client_scope testclientscope has been updated" + description: Message as to what action was taken. + returned: always + type: str + sample: "Client_scope testclientscope has been updated" proposed: - description: Representation of proposed client scope. - returned: always - type: dict - sample: { - clientId: "test" - } + description: Representation of proposed client scope. + returned: always + type: dict + sample: {"clientId": "test"} existing: - description: Representation of existing client scope (sample is truncated). - returned: always - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } + description: Representation of existing client scope (sample is truncated). + returned: always + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } } end_state: - description: Representation of client scope after module execution (sample is truncated). - returned: on success - type: dict - sample: { - "adminUrl": "http://www.example.com/admin_url", - "attributes": { - "request.object.signature.alg": "RS256", - } + description: Representation of client scope after module execution (sample is truncated). + returned: on success + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } } -''' +""" from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ keycloak_argument_spec, get_token, KeycloakError, is_struct_included from ansible.module_utils.basic import AnsibleModule +def normalise_cr(clientscoperep, remove_ids=False): + """ Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the + the change detection is more effective. + + :param clientscoperep: the clientscoperep dict to be sanitized + :param remove_ids: If set to true, then the unique ID's of objects is removed to make the diff and checks for changed + not alert when the ID's of objects are not usually known, (e.g. for protocol_mappers) + :return: normalised clientscoperep dict + """ + # Avoid the dict passed in to be modified + clientscoperep = clientscoperep.copy() + + if 'protocolMappers' in clientscoperep: + clientscoperep['protocolMappers'] = sorted(clientscoperep['protocolMappers'], key=lambda x: (x.get('name'), x.get('protocol'), x.get('protocolMapper'))) + for mapper in clientscoperep['protocolMappers']: + if remove_ids: + mapper.pop('id', None) + + # Set to a default value. + mapper['consentRequired'] = mapper.get('consentRequired', False) + + return clientscoperep + + def sanitize_cr(clientscoperep): """ Removes probably sensitive details from a clientscoperep representation. @@ -309,7 +329,7 @@ def sanitize_cr(clientscoperep): if 'attributes' in result: if 'saml.signing.private.key' in result['attributes']: result['attributes']['saml.signing.private.key'] = 'no_log' - return result + return normalise_cr(result) def main(): @@ -323,7 +343,7 @@ def main(): protmapper_spec = dict( id=dict(type='str'), name=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed', 'docker-v2']), protocolMapper=dict(type='str'), config=dict(type='dict'), ) @@ -334,7 +354,7 @@ def main(): id=dict(type='str'), name=dict(type='str'), description=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed', 'docker-v2']), attributes=dict(type='dict'), protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), ) @@ -344,8 +364,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) @@ -383,17 +405,10 @@ def main(): for clientscope_param in clientscope_params: new_param_value = module.params.get(clientscope_param) - # some lists in the Keycloak API are sorted, some are not. - if isinstance(new_param_value, list): - if clientscope_param in ['attributes']: - try: - new_param_value = sorted(new_param_value) - except TypeError: - pass # Unfortunately, the ansible argument spec checker introduces variables with null values when # they are not specified if clientscope_param == 'protocol_mappers': - new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] + new_param_value = [{k: v for k, v in x.items() if v is not None} for x in new_param_value] changeset[camel(clientscope_param)] = new_param_value # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) @@ -437,7 +452,9 @@ def main(): # Process an update # no changes - if desired_clientscope == before_clientscope: + # remove ids for compare, problematic if desired has no ids set (not required), + # normalize for consentRequired in protocolMappers + if normalise_cr(desired_clientscope, remove_ids=True) == normalise_cr(before_clientscope, remove_ids=True): result['changed'] = False result['end_state'] = sanitize_cr(desired_clientscope) result['msg'] = "No changes required to clientscope {name}.".format(name=before_clientscope['name']) @@ -450,6 +467,13 @@ def main(): result['diff'] = dict(before=sanitize_cr(before_clientscope), after=sanitize_cr(desired_clientscope)) if module.check_mode: + # We can only compare the current clientscope with the proposed updates we have + before_norm = normalise_cr(before_clientscope, remove_ids=True) + desired_norm = normalise_cr(desired_clientscope, remove_ids=True) + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_norm), + after=sanitize_cr(desired_norm)) + result['changed'] = not is_struct_included(desired_norm, before_norm) module.exit_json(**result) # do the update diff --git a/plugins/modules/keycloak_clientscope_type.py b/plugins/modules/keycloak_clientscope_type.py new file mode 100644 index 0000000000..e979d123ab --- /dev/null +++ b/plugins/modules/keycloak_clientscope_type.py @@ -0,0 +1,304 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: keycloak_clientscope_type + +short_description: Set the type of aclientscope in realm or client using Keycloak API + +version_added: 6.6.0 + +description: + - This module allows you to set the type (optional, default) of clientscopes using the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + realm: + type: str + description: + - The Keycloak realm. + default: 'master' + + client_id: + description: + - The O(client_id) of the client. If not set the clientscope types are set as a default for the realm. + aliases: + - clientId + type: str + + default_clientscopes: + description: + - Client scopes that should be of type default. + type: list + elements: str + + optional_clientscopes: + description: + - Client scopes that should be of type optional. + type: list + elements: str + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Simon Pahl (@simonpahl) +""" + +EXAMPLES = r""" +- name: Set default client scopes on realm level + community.general.keycloak_clientscope_type: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: "MyCustomRealm" + default_clientscopes: ['profile', 'roles'] + delegate_to: localhost + + +- name: Set default and optional client scopes on client level with token auth + community.general.keycloak_clientscope_type: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + realm: "MyCustomRealm" + client_id: "MyCustomClient" + default_clientscopes: ['profile', 'roles'] + optional_clientscopes: ['phone'] + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "" +proposed: + description: Representation of proposed client-scope types mapping. + returned: always + type: dict + sample: + { + "default_clientscopes": [ + "profile", + "role" + ], + "optional_clientscopes": [] + } +existing: + description: + - Representation of client scopes before module execution. + returned: always + type: dict + sample: + { + "default_clientscopes": [ + "profile", + "role" + ], + "optional_clientscopes": [ + "phone" + ] + } +end_state: + description: + - Representation of client scopes after module execution. + - The sample is truncated. + returned: on success + type: dict + sample: + { + "default_clientscopes": [ + "profile", + "role" + ], + "optional_clientscopes": [] + } +""" + +from ansible.module_utils.basic import AnsibleModule + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, KeycloakError, get_token) + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import \ + keycloak_argument_spec + + +def keycloak_clientscope_type_module(): + """ + Returns an AnsibleModule definition. + + :return: argument_spec dict + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + realm=dict(default='master'), + client_id=dict(type='str', aliases=['clientId']), + default_clientscopes=dict(type='list', elements='str'), + optional_clientscopes=dict(type='list', elements='str'), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([ + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret'], + ['default_clientscopes', 'optional_clientscopes'] + ]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + mutually_exclusive=[ + ['token', 'auth_realm'], + ['token', 'auth_username'], + ['token', 'auth_password'] + ], + ) + + return module + + +def clientscopes_to_add(existing, proposed): + to_add = [] + existing_clientscope_ids = extract_field(existing, 'id') + for clientscope in proposed: + if not clientscope['id'] in existing_clientscope_ids: + to_add.append(clientscope) + return to_add + + +def clientscopes_to_delete(existing, proposed): + to_delete = [] + proposed_clientscope_ids = extract_field(proposed, 'id') + for clientscope in existing: + if not clientscope['id'] in proposed_clientscope_ids: + to_delete.append(clientscope) + return to_delete + + +def extract_field(dictionary, field='name'): + return [cs[field] for cs in dictionary] + + +def normalize_scopes(scopes): + scopes_copy = scopes.copy() + if isinstance(scopes_copy.get('default_clientscopes'), list): + scopes_copy['default_clientscopes'] = sorted(scopes_copy['default_clientscopes']) + if isinstance(scopes_copy.get('optional_clientscopes'), list): + scopes_copy['optional_clientscopes'] = sorted(scopes_copy['optional_clientscopes']) + return scopes_copy + + +def main(): + """ + Module keycloak_clientscope_type + + :return: + """ + + module = keycloak_clientscope_type_module() + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + client_id = module.params.get('client_id') + default_clientscopes = module.params.get('default_clientscopes') + optional_clientscopes = module.params.get('optional_clientscopes') + + result = dict(changed=False, msg='', proposed={}, existing={}, end_state={}) + + all_clientscopes = kc.get_clientscopes(realm) + default_clientscopes_real = [] + optional_clientscopes_real = [] + + for client_scope in all_clientscopes: + if default_clientscopes is not None and client_scope["name"] in default_clientscopes: + default_clientscopes_real.append(client_scope) + if optional_clientscopes is not None and client_scope["name"] in optional_clientscopes: + optional_clientscopes_real.append(client_scope) + + if default_clientscopes is not None and len(default_clientscopes_real) != len(default_clientscopes): + module.fail_json(msg='At least one of the default_clientscopes does not exist!') + + if optional_clientscopes is not None and len(optional_clientscopes_real) != len(optional_clientscopes): + module.fail_json(msg='At least one of the optional_clientscopes does not exist!') + + result['proposed'].update({ + 'default_clientscopes': 'no-change' if default_clientscopes is None else default_clientscopes, + 'optional_clientscopes': 'no-change' if optional_clientscopes is None else optional_clientscopes + }) + + default_clientscopes_existing = kc.get_default_clientscopes(realm, client_id) + optional_clientscopes_existing = kc.get_optional_clientscopes(realm, client_id) + + result['existing'].update({ + 'default_clientscopes': extract_field(default_clientscopes_existing), + 'optional_clientscopes': extract_field(optional_clientscopes_existing) + }) + + if module._diff: + result['diff'] = dict(before=normalize_scopes(result['existing']), after=normalize_scopes(result['proposed'])) + + default_clientscopes_add = clientscopes_to_add(default_clientscopes_existing, default_clientscopes_real) + optional_clientscopes_add = clientscopes_to_add(optional_clientscopes_existing, optional_clientscopes_real) + + default_clientscopes_delete = clientscopes_to_delete(default_clientscopes_existing, default_clientscopes_real) + optional_clientscopes_delete = clientscopes_to_delete(optional_clientscopes_existing, optional_clientscopes_real) + + result["changed"] = any(len(x) > 0 for x in [ + default_clientscopes_add, optional_clientscopes_add, default_clientscopes_delete, optional_clientscopes_delete + ]) + + if module.check_mode: + module.exit_json(**result) + + # first delete so clientscopes can change type + for clientscope in default_clientscopes_delete: + kc.delete_default_clientscope(clientscope['id'], realm, client_id) + for clientscope in optional_clientscopes_delete: + kc.delete_optional_clientscope(clientscope['id'], realm, client_id) + + for clientscope in default_clientscopes_add: + kc.add_default_clientscope(clientscope['id'], realm, client_id) + for clientscope in optional_clientscopes_add: + kc.add_optional_clientscope(clientscope['id'], realm, client_id) + + result['end_state'].update({ + 'default_clientscopes': extract_field(kc.get_default_clientscopes(realm, client_id)), + 'optional_clientscopes': extract_field(kc.get_optional_clientscopes(realm, client_id)) + }) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_clientsecret_info.py b/plugins/modules/keycloak_clientsecret_info.py new file mode 100644 index 0000000000..8b92516eb9 --- /dev/null +++ b/plugins/modules/keycloak_clientsecret_info.py @@ -0,0 +1,166 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Fynn Chen +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: keycloak_clientsecret_info + +short_description: Retrieve client secret using Keycloak API + +version_added: 6.1.0 + +description: + - This module allows you to get a Keycloak client secret using the Keycloak REST API. It requires access to the REST API + using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default + Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + - When retrieving a new client secret, where possible provide the client's O(id) (not O(client_id)) to the module. This + removes a lookup to the API to translate the O(client_id) into the client ID. + - 'Note that this module returns the client secret. To avoid this showing up in the logs, please add C(no_log: true) to + the task.' +attributes: + action_group: + version_added: 10.2.0 + +options: + realm: + type: str + description: + - They Keycloak realm under which this client resides. + default: 'master' + + id: + description: + - The unique identifier for this client. + - This parameter is not required for getting or generating a client secret but providing it reduces the number of API + calls required. + type: str + + client_id: + description: + - The O(client_id) of the client. Passing this instead of O(id) results in an extra API call. + aliases: + - clientId + type: str + + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + - community.general.attributes.info_module + +author: + - Fynn Chen (@fynncfchen) + - John Cant (@johncant) +""" + +EXAMPLES = r""" +- name: Get a Keycloak client secret, authentication with credentials + community.general.keycloak_clientsecret_info: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + no_log: true + +- name: Get a new Keycloak client secret, authentication with token + community.general.keycloak_clientsecret_info: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + no_log: true + +- name: Get a new Keycloak client secret, passing client_id instead of id + community.general.keycloak_clientsecret_info: + client_id: 'myClientId' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + no_log: true + +- name: Get a new Keycloak client secret, authentication with auth_client_id and auth_client_secret + community.general.keycloak_clientsecret_info: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_client_secret: SECRET + auth_keycloak_url: https://auth.example.com/auth + delegate_to: localhost + no_log: true +""" + +RETURN = r""" +msg: + description: Textual description of whether we succeeded or failed. + returned: always + type: str + +clientsecret_info: + description: Representation of the client secret. + returned: on success + type: complex + contains: + type: + description: Credential type. + type: str + returned: always + sample: secret + value: + description: Client secret. + type: str + returned: always + sample: cUGnX1EIeTtPPAkcyGMv0ncyqDPu68P1 +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, KeycloakError, get_token) +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak_clientsecret import ( + keycloak_clientsecret_module, keycloak_clientsecret_module_resolve_params) + + +def main(): + """ + Module keycloak_clientsecret_info + + :return: + """ + + module = keycloak_clientsecret_module() + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + id, realm = keycloak_clientsecret_module_resolve_params(module, kc) + + clientsecret = kc.get_clientsecret(id=id, realm=realm) + + result = { + 'clientsecret_info': clientsecret, + 'msg': 'Get client secret successful for ID {id}'.format(id=id) + } + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_clientsecret_regenerate.py b/plugins/modules/keycloak_clientsecret_regenerate.py new file mode 100644 index 0000000000..823c011a96 --- /dev/null +++ b/plugins/modules/keycloak_clientsecret_regenerate.py @@ -0,0 +1,176 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Fynn Chen +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: keycloak_clientsecret_regenerate + +short_description: Regenerate Keycloak client secret using Keycloak API + +version_added: 6.1.0 + +description: + - This module allows you to regenerate a Keycloak client secret using the Keycloak REST API. It requires access to the REST + API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In a default + Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + - When regenerating a client secret, where possible provide the client's ID (not client_id) to the module. This removes + a lookup to the API to translate the client_id into the client ID. + - 'Note that this module returns the client secret. To avoid this showing up in the logs, please add C(no_log: true) to + the task.' +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 10.2.0 + +options: + realm: + type: str + description: + - They Keycloak realm under which this client resides. + default: 'master' + + id: + description: + - The unique identifier for this client. + - This parameter is not required for getting or generating a client secret but providing it reduces the number of API + calls required. + type: str + + client_id: + description: + - The client_id of the client. Passing this instead of ID results in an extra API call. + aliases: + - clientId + type: str + + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Fynn Chen (@fynncfchen) + - John Cant (@johncant) +""" + +EXAMPLES = r""" +- name: Regenerate a Keycloak client secret, authentication with credentials + community.general.keycloak_clientsecret_regenerate: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + no_log: true + +- name: Regenerate a Keycloak client secret, authentication with token + community.general.keycloak_clientsecret_regenerate: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + no_log: true + +- name: Regenerate a Keycloak client secret, passing client_id instead of id + community.general.keycloak_clientsecret_info: + client_id: 'myClientId' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + no_log: true + +- name: Regenerate a new Keycloak client secret, authentication with auth_client_id and auth_client_secret + community.general.keycloak_clientsecret_regenerate: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + realm: MyCustomRealm + auth_client_id: admin-cli + auth_client_secret: SECRET + auth_keycloak_url: https://auth.example.com/auth + delegate_to: localhost + no_log: true +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the client credential after module execution. + returned: on success + type: complex + contains: + type: + description: Credential type. + type: str + returned: always + sample: secret + value: + description: Client secret. + type: str + returned: always + sample: cUGnX1EIeTtPPAkcyGMv0ncyqDPu68P1 +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, KeycloakError, get_token) +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak_clientsecret import ( + keycloak_clientsecret_module, keycloak_clientsecret_module_resolve_params) + + +def main(): + """ + Module keycloak_clientsecret_regenerate + + :return: + """ + + module = keycloak_clientsecret_module() + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + id, realm = keycloak_clientsecret_module_resolve_params(module, kc) + + if module.check_mode: + dummy_result = { + "msg": 'No action taken while in check mode', + "end_state": {'type': 'secret', 'value': 'X' * 32} + } + module.exit_json(**dummy_result) + + # Create new secret + clientsecret = kc.create_clientsecret(id=id, realm=realm) + + result = { + "msg": 'New client secret has been generated for ID {id}'.format(id=id), + "end_state": clientsecret + } + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/keycloak/keycloak_clienttemplate.py b/plugins/modules/keycloak_clienttemplate.py similarity index 52% rename from plugins/modules/identity/keycloak/keycloak_clienttemplate.py rename to plugins/modules/keycloak_clienttemplate.py index cec7c93d8d..7eda821de6 100644 --- a/plugins/modules/identity/keycloak/keycloak_clienttemplate.py +++ b/plugins/modules/keycloak_clienttemplate.py @@ -1,173 +1,170 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, Eike Frost -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: keycloak_clienttemplate -short_description: Allows administration of Keycloak client templates via Keycloak API - +short_description: Allows administration of Keycloak client templates using Keycloak API description: - - This module allows the administration of Keycloak client templates via the Keycloak REST API. It - requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html) - - - The Keycloak API does not always enforce for only sensible settings to be used -- you can set - SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. - If you do not specify a setting, usually a sensible default is chosen. + - This module allows the administration of Keycloak client templates using the Keycloak REST API. It requires access to + the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - The Keycloak API does not always enforce for only sensible settings to be used -- you can set SAML-specific settings on + an OpenID Connect client for instance and the other way around. Be careful. If you do not specify a setting, usually a + sensible default is chosen. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 options: - state: - description: - - State of the client template. - - On C(present), the client template will be created (or updated if it exists already). - - On C(absent), the client template will be removed if it exists - choices: ['present', 'absent'] - default: 'present' - type: str - - id: - description: - - Id of client template to be worked on. This is usually a UUID. - type: str - - realm: - description: - - Realm this client template is found in. - type: str - default: master - - name: - description: - - Name of the client template. - type: str - + state: description: - description: - - Description of the client template in Keycloak. - type: str + - State of the client template. + - On V(present), the client template is created (or updated if it exists already). + - On V(absent), the client template is removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str - protocol: - description: - - Type of client template (either C(openid-connect) or C(saml). - choices: ['openid-connect', 'saml'] - type: str + id: + description: + - ID of client template to be worked on. This is usually a UUID. + type: str - full_scope_allowed: + realm: + description: + - Realm this client template is found in. + type: str + default: master + + name: + description: + - Name of the client template. + type: str + + description: + description: + - Description of the client template in Keycloak. + type: str + + protocol: + description: + - Type of client template. + - The V(docker-v2) value was added in community.general 8.6.0. + choices: ['openid-connect', 'saml', 'docker-v2'] + type: str + + full_scope_allowed: + description: + - Is the "Full Scope Allowed" feature set for this client template or not. This is C(fullScopeAllowed) in the Keycloak + REST API. + type: bool + + protocol_mappers: + description: + - A list of dicts defining protocol mappers for this client template. This is C(protocolMappers) in the Keycloak REST + API. + type: list + elements: dict + suboptions: + consentRequired: description: - - Is the "Full Scope Allowed" feature set for this client template or not. - This is 'fullScopeAllowed' in the Keycloak REST API. + - Specifies whether a user needs to provide consent to a client for this mapper to be active. type: bool - protocol_mappers: + consentText: description: - - a list of dicts defining protocol mappers for this client template. - This is 'protocolMappers' in the Keycloak REST API. - type: list - elements: dict - suboptions: - consentRequired: - description: - - Specifies whether a user needs to provide consent to a client for this mapper to be active. - type: bool + - The human-readable name of the consent the user is presented to accept. + type: str - consentText: - description: - - The human-readable name of the consent the user is presented to accept. - type: str - - id: - description: - - Usually a UUID specifying the internal ID of this protocol mapper instance. - type: str - - name: - description: - - The name of this protocol mapper. - type: str - - protocol: - description: - - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper. - is active. - choices: ['openid-connect', 'saml'] - type: str - - protocolMapper: - description: - - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is - impossible to provide since this may be extended through SPIs by the user of Keycloak, - by default Keycloak as of 3.4 ships with at least - - C(docker-v2-allow-all-mapper) - - C(oidc-address-mapper) - - C(oidc-full-name-mapper) - - C(oidc-group-membership-mapper) - - C(oidc-hardcoded-claim-mapper) - - C(oidc-hardcoded-role-mapper) - - C(oidc-role-name-mapper) - - C(oidc-script-based-protocol-mapper) - - C(oidc-sha256-pairwise-sub-mapper) - - C(oidc-usermodel-attribute-mapper) - - C(oidc-usermodel-client-role-mapper) - - C(oidc-usermodel-property-mapper) - - C(oidc-usermodel-realm-role-mapper) - - C(oidc-usersessionmodel-note-mapper) - - C(saml-group-membership-mapper) - - C(saml-hardcode-attribute-mapper) - - C(saml-hardcode-role-mapper) - - C(saml-role-list-mapper) - - C(saml-role-name-mapper) - - C(saml-user-attribute-mapper) - - C(saml-user-property-mapper) - - C(saml-user-session-note-mapper) - - An exhaustive list of available mappers on your installation can be obtained on - the admin console by going to Server Info -> Providers and looking under - 'protocol-mapper'. - type: str - - config: - description: - - Dict specifying the configuration options for the protocol mapper; the - contents differ depending on the value of I(protocolMapper) and are not documented - other than by the source of the mappers and its parent class(es). An example is given - below. It is easiest to obtain valid config values by dumping an already-existing - protocol mapper configuration through check-mode in the I(existing) field. - type: dict - - attributes: + id: description: - - A dict of further attributes for this client template. This can contain various - configuration settings, though in the default installation of Keycloak as of 3.4, none - are documented or known, so this is usually empty. + - Usually a UUID specifying the internal ID of this protocol mapper instance. + type: str + + name: + description: + - The name of this protocol mapper. + type: str + + protocol: + description: + - This specifies for which protocol this protocol mapper is active. + choices: ['openid-connect', 'saml', 'docker-v2'] + type: str + + protocolMapper: + description: + - 'The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is impossible to provide + since this may be extended through SPIs by the user of Keycloak, by default Keycloak as of 3.4 ships with at least:' + - V(docker-v2-allow-all-mapper). + - V(oidc-address-mapper). + - V(oidc-full-name-mapper). + - V(oidc-group-membership-mapper). + - V(oidc-hardcoded-claim-mapper). + - V(oidc-hardcoded-role-mapper). + - V(oidc-role-name-mapper). + - V(oidc-script-based-protocol-mapper). + - V(oidc-sha256-pairwise-sub-mapper). + - V(oidc-usermodel-attribute-mapper). + - V(oidc-usermodel-client-role-mapper). + - V(oidc-usermodel-property-mapper). + - V(oidc-usermodel-realm-role-mapper). + - V(oidc-usersessionmodel-note-mapper). + - V(saml-group-membership-mapper). + - V(saml-hardcode-attribute-mapper). + - V(saml-hardcode-role-mapper). + - V(saml-role-list-mapper). + - V(saml-role-name-mapper). + - V(saml-user-attribute-mapper). + - V(saml-user-property-mapper). + - V(saml-user-session-note-mapper). + - An exhaustive list of available mappers on your installation can be obtained on the admin console by going to + Server Info -> Providers and looking under 'protocol-mapper'. + type: str + + config: + description: + - Dict specifying the configuration options for the protocol mapper; the contents differ depending on the value + of O(protocol_mappers[].protocolMapper) and are not documented other than by the source of the mappers and its + parent class(es). An example is given below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the RV(existing) field. type: dict -notes: -- The Keycloak REST API defines further fields (namely I(bearerOnly), I(consentRequired), I(standardFlowEnabled), - I(implicitFlowEnabled), I(directAccessGrantsEnabled), I(serviceAccountsEnabled), I(publicClient), and - I(frontchannelLogout)) which, while available with keycloak_client, do not have any effect on - Keycloak client-templates and are discarded if supplied with an API request changing client-templates. As such, - they are not available through this module. + attributes: + description: + - A dict of further attributes for this client template. This can contain various configuration settings, though in + the default installation of Keycloak as of 3.4, none are documented or known, so this is usually empty. + type: dict +notes: + - The Keycloak REST API defines further fields (namely C(bearerOnly), C(consentRequired), C(standardFlowEnabled), C(implicitFlowEnabled), + C(directAccessGrantsEnabled), C(serviceAccountsEnabled), C(publicClient), and C(frontchannelLogout)) which, while available + with keycloak_client, do not have any effect on Keycloak client-templates and are discarded if supplied with an API request + changing client-templates. As such, they are not available through this module. extends_documentation_fragment: -- community.general.keycloak + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes author: - - Eike Frost (@eikef) -''' + - Eike Frost (@eikef) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create or update Keycloak client template (minimal), authentication with credentials community.general.keycloak_client: auth_client_id: admin-cli @@ -212,13 +209,13 @@ EXAMPLES = ''' name: this_is_a_test protocol_mappers: - config: - access.token.claim: True + access.token.claim: true claim.name: "family_name" - id.token.claim: True + id.token.claim: true jsonType.label: String user.attribute: lastName - userinfo.token.claim: True - consentRequired: True + userinfo.token.claim: true + consentRequired: true consentText: "${familyName}" name: family name protocol: openid-connect @@ -226,47 +223,47 @@ EXAMPLES = ''' full_scope_allowed: false id: bce6f5e9-d7d3-4955-817e-c5b7f8d65b3f delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Client template testclient has been updated" + description: Message as to what action was taken. + returned: always + type: str + sample: "Client template testclient has been updated" proposed: - description: Representation of proposed client template. - returned: always - type: dict - sample: { - name: "test01" - } + description: Representation of proposed client template. + returned: always + type: dict + sample: {"name": "test01"} existing: - description: Representation of existing client template (sample is truncated). - returned: always - type: dict - sample: { - "description": "test01", - "fullScopeAllowed": false, - "id": "9c3712ab-decd-481e-954f-76da7b006e5f", - "name": "test01", - "protocol": "saml" + description: Representation of existing client template (sample is truncated). + returned: always + type: dict + sample: + { + "description": "test01", + "fullScopeAllowed": false, + "id": "9c3712ab-decd-481e-954f-76da7b006e5f", + "name": "test01", + "protocol": "saml" } end_state: - description: Representation of client template after module execution (sample is truncated). - returned: on success - type: dict - sample: { - "description": "test01", - "fullScopeAllowed": false, - "id": "9c3712ab-decd-481e-954f-76da7b006e5f", - "name": "test01", - "protocol": "saml" + description: Representation of client template after module execution (sample is truncated). + returned: on success + type: dict + sample: + { + "description": "test01", + "fullScopeAllowed": false, + "id": "9c3712ab-decd-481e-954f-76da7b006e5f", + "name": "test01", + "protocol": "saml" } -''' +""" from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ keycloak_argument_spec, get_token, KeycloakError @@ -286,7 +283,7 @@ def main(): consentText=dict(type='str'), id=dict(type='str'), name=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml']), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'docker-v2']), protocolMapper=dict(type='str'), config=dict(type='dict'), ) @@ -298,7 +295,7 @@ def main(): id=dict(type='str'), name=dict(type='str'), description=dict(type='str'), - protocol=dict(type='str', choices=['openid-connect', 'saml']), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'docker-v2']), attributes=dict(type='dict'), full_scope_allowed=dict(type='bool'), protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec), @@ -309,8 +306,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) diff --git a/plugins/modules/keycloak_component.py b/plugins/modules/keycloak_component.py new file mode 100644 index 0000000000..c33c9af136 --- /dev/null +++ b/plugins/modules/keycloak_component.py @@ -0,0 +1,322 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Björn Bösel +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_component + +short_description: Allows administration of Keycloak components using Keycloak API + +version_added: 10.0.0 + +description: + - This module allows the administration of Keycloak components using the Keycloak REST API. It requires access to the REST + API using OpenID Connect; the user connecting and the realm being used must have the requisite access rights. In a default + Keycloak installation, C(admin-cli) and an C(admin) user would work, as would a separate realm definition with the scope + tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/latest/rest-api/index.html). Aliases are provided so camelCased versions can be + used as well. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the Keycloak component. + - On V(present), the component is created (or updated if it exists already). + - On V(absent), the component is removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Name of the component to create. + type: str + required: true + parent_id: + description: + - The parent_id of the component. In practice the ID (name) of the realm. + type: str + required: true + provider_id: + description: + - The name of the "provider ID" for the key. + type: str + required: true + provider_type: + description: + - The name of the "provider type" for the key. That is, V(org.keycloak.storage.UserStorageProvider), V(org.keycloak.userprofile.UserProfileProvider), + ... + - See U(https://www.keycloak.org/docs/latest/server_development/index.html#_providers). + type: str + required: true + config: + description: + - Configuration properties for the provider. + - Contents vary depending on the provider type. + type: dict + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Björn Bösel (@fivetide) +""" + +EXAMPLES = r""" +- name: Manage Keycloak User Storage Provider + community.general.keycloak_component: + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master + name: my storage provider + state: present + parent_id: some_realm + provider_id: my storage + provider_type: "org.keycloak.storage.UserStorageProvider" + config: + myCustomKey: "my_custom_key" + cachePolicy: "NO_CACHE" + enabled: true +""" + +RETURN = r""" +end_state: + description: Representation of the keycloak_component after module execution. + returned: on success + type: dict + contains: + id: + description: ID of the component. + type: str + returned: when O(state=present) + sample: 5b7ec13f-99da-46ad-8326-ab4c73cf4ce4 + name: + description: Name of the component. + type: str + returned: when O(state=present) + sample: mykey + parentId: + description: ID of the realm this key belongs to. + type: str + returned: when O(state=present) + sample: myrealm + providerId: + description: The ID of the key provider. + type: str + returned: when O(state=present) + sample: rsa + providerType: + description: The type of provider. + type: str + returned: when O(state=present) + config: + description: Component configuration. + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from urllib.parse import urlencode +from copy import deepcopy + + +def main(): + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type='str', required=True), + parent_id=dict(type='str', required=True), + provider_id=dict(type='str', required=True), + provider_type=dict(type='str', required=True), + config=dict( + type='dict', + ) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + + # This will include the current state of the component if it is already + # present. This is only used for diff-mode. + before_component = {} + before_component['config'] = {} + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + params_to_ignore = list(keycloak_argument_spec().keys()) + ["state", "parent_id"] + + # Filter and map the parameters names that apply to the role + component_params = [x for x in module.params + if x not in params_to_ignore and + module.params.get(x) is not None] + + provider_type = module.params.get("provider_type") + + # Build a proposed changeset from parameters given to this module + changeset = {} + changeset['config'] = {} + + # Generate a JSON payload for Keycloak Admin API from the module + # parameters. Parameters that do not belong to the JSON payload (e.g. + # "state" or "auth_keycloal_url") have been filtered away earlier (see + # above). + # + # This loop converts Ansible module parameters (snake-case) into + # Keycloak-compatible format (camel-case). For example private_key + # becomes privateKey. + # + # It also converts bool, str and int parameters into lists with a single + # entry of 'str' type. Bool values are also lowercased. This is required + # by Keycloak. + # + for component_param in component_params: + if component_param == 'config': + for config_param in module.params.get('config'): + changeset['config'][camel(config_param)] = [] + raw_value = module.params.get('config')[config_param] + if isinstance(raw_value, bool): + value = str(raw_value).lower() + else: + value = str(raw_value) + + changeset['config'][camel(config_param)].append(value) + else: + # No need for camelcase in here as these are one word parameters + new_param_value = module.params.get(component_param) + changeset[camel(component_param)] = new_param_value + + # Make a deep copy of the changeset. This is use when determining + # changes to the current state. + changeset_copy = deepcopy(changeset) + + # Make it easier to refer to current module parameters + name = module.params.get('name') + force = module.params.get('force') + state = module.params.get('state') + enabled = module.params.get('enabled') + provider_id = module.params.get('provider_id') + provider_type = module.params.get('provider_type') + parent_id = module.params.get('parent_id') + + # Get a list of all Keycloak components that are of keyprovider type. + current_components = kc.get_components(urlencode(dict(type=provider_type)), parent_id) + + # If this component is present get its key ID. Confusingly the key ID is + # also known as the Provider ID. + component_id = None + + # Track individual parameter changes + changes = "" + + # This tells Ansible whether the key was changed (added, removed, modified) + result['changed'] = False + + # Loop through the list of components. If we encounter a component whose + # name matches the value of the name parameter then assume the key is + # already present. + for component in current_components: + if component['name'] == name: + component_id = component['id'] + changeset['id'] = component_id + changeset_copy['id'] = component_id + + # Compare top-level parameters + for param, value in changeset.items(): + before_component[param] = component[param] + + if changeset_copy[param] != component[param] and param != 'config': + changes += "%s: %s -> %s, " % (param, component[param], changeset_copy[param]) + result['changed'] = True + # Compare parameters under the "config" key + for p, v in changeset_copy['config'].items(): + try: + before_component['config'][p] = component['config'][p] or [] + except KeyError: + before_component['config'][p] = [] + if changeset_copy['config'][p] != component['config'][p]: + changes += "config.%s: %s -> %s, " % (p, component['config'][p], changeset_copy['config'][p]) + result['changed'] = True + + # Check all the possible states of the resource and do what is needed to + # converge current state with desired state (create, update or delete + # the key). + if component_id and state == 'present': + if result['changed']: + if module._diff: + result['diff'] = dict(before=before_component, after=changeset_copy) + + if module.check_mode: + result['msg'] = "Component %s would be changed: %s" % (name, changes.strip(", ")) + else: + kc.update_component(changeset, parent_id) + result['msg'] = "Component %s changed: %s" % (name, changes.strip(", ")) + else: + result['msg'] = "Component %s was in sync" % (name) + + result['end_state'] = changeset_copy + elif component_id and state == 'absent': + if module._diff: + result['diff'] = dict(before=before_component, after={}) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Component %s would be deleted" % (name) + else: + kc.delete_component(component_id, parent_id) + result['changed'] = True + result['msg'] = "Component %s deleted" % (name) + + result['end_state'] = {} + elif not component_id and state == 'present': + if module._diff: + result['diff'] = dict(before={}, after=changeset_copy) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Component %s would be created" % (name) + else: + kc.create_component(changeset, parent_id) + result['changed'] = True + result['msg'] = "Component %s created" % (name) + + result['end_state'] = changeset_copy + elif not component_id and state == 'absent': + result['changed'] = False + result['msg'] = "Component %s not present" % (name) + result['end_state'] = {} + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_component_info.py b/plugins/modules/keycloak_component_info.py new file mode 100644 index 0000000000..92f86ea046 --- /dev/null +++ b/plugins/modules/keycloak_component_info.py @@ -0,0 +1,165 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_component_info + +short_description: Retrieve component info in Keycloak + +version_added: 8.2.0 + +description: + - This module retrieve information on component from Keycloak. +attributes: + action_group: + version_added: 10.2.0 + +options: + realm: + description: + - The name of the realm. + required: true + type: str + name: + description: + - Name of the Component. + type: str + provider_type: + description: + - Provider type of components. + - 'Examples: V(org.keycloak.storage.UserStorageProvider), V(org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy), + V(org.keycloak.keys.KeyProvider), V(org.keycloak.userprofile.UserProfileProvider), V(org.keycloak.storage.ldap.mappers.LDAPStorageMapper).' + type: str + parent_id: + description: + - Container ID of the components. + type: str + + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + - community.general.attributes.info_module + +author: + - Andre Desrosiers (@desand01) +""" + +EXAMPLES = r""" +- name: Retrive info of a UserStorageProvider named myldap + community.general.keycloak_component_info: + auth_keycloak_url: http://localhost:8080/auth + auth_sername: admin + auth_password: password + auth_realm: master + realm: myrealm + name: myldap + provider_type: org.keycloak.storage.UserStorageProvider + +- name: Retrive key info component + community.general.keycloak_component_info: + auth_keycloak_url: http://localhost:8080/auth + auth_sername: admin + auth_password: password + auth_realm: master + realm: myrealm + name: rsa-enc-generated + provider_type: org.keycloak.keys.KeyProvider + +- name: Retrive all component from realm master + community.general.keycloak_component_info: + auth_keycloak_url: http://localhost:8080/auth + auth_sername: admin + auth_password: password + auth_realm: master + realm: myrealm + +- name: Retrive all sub components of parent component filter by type + community.general.keycloak_component_info: + auth_keycloak_url: http://localhost:8080/auth + auth_sername: admin + auth_password: password + auth_realm: master + realm: myrealm + parent_id: "075ef2fa-19fc-4a6d-bf4c-249f57365fd2" + provider_type: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" +""" + +RETURN = r""" +components: + description: JSON representation of components. + returned: always + type: list + elements: dict +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from urllib.parse import quote + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + name=dict(type='str'), + realm=dict(type='str', required=True), + parent_id=dict(type='str'), + provider_type=dict(type='str'), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = dict(changed=False, components=[]) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + parentId = module.params.get('parent_id') + name = module.params.get('name') + providerType = module.params.get('provider_type') + + objRealm = kc.get_realm_by_id(realm) + if not objRealm: + module.fail_json(msg="Failed to retrive realm '{realm}'".format(realm=realm)) + + filters = [] + + if parentId: + filters.append("parent=%s" % (quote(parentId, safe=''))) + else: + filters.append("parent=%s" % (quote(objRealm['id'], safe=''))) + + if name: + filters.append("name=%s" % (quote(name, safe=''))) + if providerType: + filters.append("type=%s" % (quote(providerType, safe=''))) + + result['components'] = kc.get_components(filter="&".join(filters), realm=realm) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/keycloak/keycloak_group.py b/plugins/modules/keycloak_group.py similarity index 53% rename from plugins/modules/identity/keycloak/keycloak_group.py rename to plugins/modules/keycloak_group.py index 3455f57818..a040e6e659 100644 --- a/plugins/modules/identity/keycloak/keycloak_group.py +++ b/plugins/modules/keycloak_group.py @@ -1,86 +1,110 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019, Adam Goossens -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: keycloak_group -short_description: Allows administration of Keycloak groups via Keycloak API +short_description: Allows administration of Keycloak groups using Keycloak API description: - - This module allows you to add, remove or modify Keycloak groups via the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will - be returned that way by this module. You may pass single values for attributes when calling the module, - and this will be translated into a list suitable for the API. - - - When updating a group, where possible provide the group ID to the module. This removes a lookup - to the API to translate the name into the group ID. - + - This module allows you to add, remove or modify Keycloak groups using the Keycloak REST API. It requires access to the + REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In + a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the + scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. + - When updating a group, where possible provide the group ID to the module. This removes a lookup to the API to translate + the name into the group ID. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 options: - state: - description: - - State of the group. - - On C(present), the group will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the group will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent + state: + description: + - State of the group. + - On V(present), the group is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the group is removed if it exists. Be aware that absenting a group with subgroups automatically deletes + all its subgroups too. + default: 'present' + type: str + choices: + - present + - absent - name: + name: + type: str + description: + - Name of the group. + - This parameter is required only when creating or updating the group. + realm: + type: str + description: + - They Keycloak realm under which this group resides. + default: 'master' + + id: + type: str + description: + - The unique identifier for this group. + - This parameter is not required for updating or deleting a group but providing it reduces the number of API calls required. + attributes: + type: dict + description: + - A dict of key/value pairs to set as custom attributes for the group. + - Values may be single values (for example a string) or a list of strings. + parents: + version_added: "6.4.0" + type: list + description: + - List of parent groups for the group to handle sorted top to bottom. + - Set this to create a group as a subgroup of another group or groups (parents) or when accessing an existing subgroup + by name. + - Not necessary to set when accessing an existing subgroup by its C(ID) because in that case the group can be directly + queried without necessarily knowing its parent(s). + elements: dict + suboptions: + id: type: str description: - - Name of the group. - - This parameter is required only when creating or updating the group. - - realm: + - Identify parent by ID. + - Needs less API calls than using O(parents[].name). + - A deep parent chain can be started at any point when first given parent is given as ID. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. + name: type: str description: - - They Keycloak realm under which this group resides. - default: 'master' - - id: - type: str - description: - - The unique identifier for this group. - - This parameter is not required for updating or deleting a group but - providing it will reduce the number of API calls required. - - attributes: - type: dict - description: - - A dict of key/value pairs to set as custom attributes for the group. - - Values may be single values (e.g. a string) or a list of strings. - + - Identify parent by name. + - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood. + - When giving a parent chain with only names it must be complete up to the top. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. notes: - - Presently, the I(realmRoles), I(clientRoles) and I(access) attributes returned by the Keycloak API - are read-only for groups. This limitation will be removed in a later version of this module. - + - Presently, the RV(end_state.realmRoles), RV(end_state.clientRoles), and RV(end_state.access) attributes returned by the + Keycloak API are read-only for groups. This limitation will be removed in a later version of this module. extends_documentation_fragment: -- community.general.keycloak - + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes author: - - Adam Goossens (@adamgoossens) -''' + - Adam Goossens (@adamgoossens) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a Keycloak group, authentication with credentials community.general.keycloak_group: name: my-new-kc-group @@ -91,6 +115,7 @@ EXAMPLES = ''' auth_realm: master auth_username: USERNAME auth_password: PASSWORD + register: result_new_kcgrp delegate_to: localhost - name: Create a Keycloak group, authentication with token @@ -147,78 +172,84 @@ EXAMPLES = ''' auth_password: PASSWORD name: my-new_group attributes: - attrib1: value1 - attrib2: value2 - attrib3: - - with - - numerous - - individual - - list - - items + attrib1: value1 + attrib2: value2 + attrib3: + - with + - numerous + - individual + - list + - items delegate_to: localhost -''' -RETURN = ''' +- name: Create a Keycloak subgroup of a base group (using parent name) + community.general.keycloak_group: + name: my-new-kc-group-sub + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + parents: + - name: my-new-kc-group + register: result_new_kcgrp_sub + delegate_to: localhost + +- name: Create a Keycloak subgroup of a base group (using parent id) + community.general.keycloak_group: + name: my-new-kc-group-sub2 + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + parents: + - id: "{{ result_new_kcgrp.end_state.id }}" + delegate_to: localhost + +- name: Create a Keycloak subgroup of a subgroup (using parent names) + community.general.keycloak_group: + name: my-new-kc-group-sub-sub + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + parents: + - name: my-new-kc-group + - name: my-new-kc-group-sub + delegate_to: localhost + +- name: Create a Keycloak subgroup of a subgroup (using direct parent id) + community.general.keycloak_group: + name: my-new-kc-group-sub-sub + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + parents: + - id: "{{ result_new_kcgrp_sub.end_state.id }}" + delegate_to: localhost +""" + +RETURN = r""" msg: - description: Message as to what action was taken. - returned: always - type: str + description: Message as to what action was taken. + returned: always + type: str end_state: - description: Representation of the group after module execution (sample is truncated). - returned: on success - type: complex - contains: - id: - description: GUID that identifies the group. - type: str - returned: always - sample: 23f38145-3195-462c-97e7-97041ccea73e - name: - description: Name of the group. - type: str - returned: always - sample: grp-test-123 - attributes: - description: Attributes applied to this group. - type: dict - returned: always - sample: - attr1: ["val1", "val2", "val3"] - path: - description: URI path to the group. - type: str - returned: always - sample: /grp-test-123 - realmRoles: - description: An array of the realm-level roles granted to this group. - type: list - returned: always - sample: [] - subGroups: - description: A list of groups that are children of this group. These groups will have the same parameters as - documented here. - type: list - returned: always - clientRoles: - description: A list of client-level roles granted to this group. - type: list - returned: always - sample: [] - access: - description: A dict describing the accesses you have to this group based on the credentials used. - type: dict - returned: always - sample: - manage: true - manageMembership: true - view: true - -group: - description: - - Representation of the group after module execution. - - Deprecated return value, it will be removed in community.general 6.0.0. Please use the return value I(end_state) instead. - returned: always + description: Representation of the group after module execution (sample is truncated). + returned: on success type: complex contains: id: @@ -248,8 +279,7 @@ group: returned: always sample: [] subGroups: - description: A list of groups that are children of this group. These groups will have the same parameters as - documented here. + description: A list of groups that are children of this group. These groups have the same parameters as documented here. type: list returned: always clientRoles: @@ -265,8 +295,7 @@ group: manage: true manageMembership: true view: true - -''' +""" from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ keycloak_argument_spec, get_token, KeycloakError @@ -287,6 +316,13 @@ def main(): id=dict(type='str'), name=dict(type='str'), attributes=dict(type='dict'), + parents=dict( + type='list', elements='dict', + options=dict( + id=dict(type='str'), + name=dict(type='str') + ), + ), ) argument_spec.update(meta_args) @@ -294,8 +330,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=([['id', 'name'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, group='') @@ -313,8 +351,10 @@ def main(): name = module.params.get('name') attributes = module.params.get('attributes') + parents = module.params.get('parents') + # attributes in Keycloak have their values returned as lists - # via the API. attributes is a dict, so we'll transparently convert + # using the API. attributes is a dict, so we'll transparently convert # the values to lists. if attributes is not None: for key, val in module.params['attributes'].items(): @@ -322,12 +362,12 @@ def main(): # Filter and map the parameters names that apply to the group group_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'parents'] and module.params.get(x) is not None] # See if it already exists in Keycloak if gid is None: - before_group = kc.get_group_by_name(name, realm=realm) + before_group = kc.get_group_by_name(name, realm=realm, parents=parents) else: before_group = kc.get_group_by_groupid(gid, realm=realm) @@ -355,7 +395,6 @@ def main(): result['diff'] = dict(before='', after='') result['changed'] = False result['end_state'] = {} - result['group'] = result['end_state'] result['msg'] = 'Group does not exist; doing nothing.' module.exit_json(**result) @@ -371,12 +410,17 @@ def main(): if module.check_mode: module.exit_json(**result) - # create it - kc.create_group(desired_group, realm=realm) - after_group = kc.get_group_by_name(name, realm) + # create it ... + if parents: + # ... as subgroup of another parent group + kc.create_subgroup(parents, desired_group, realm=realm) + else: + # ... as toplvl base group + kc.create_group(desired_group, realm=realm) + + after_group = kc.get_group_by_name(name, realm, parents=parents) result['end_state'] = after_group - result['group'] = result['end_state'] result['msg'] = 'Group {name} has been created with ID {id}'.format(name=after_group['name'], id=after_group['id']) @@ -390,7 +434,6 @@ def main(): if desired_group == before_group: result['changed'] = False result['end_state'] = desired_group - result['group'] = result['end_state'] result['msg'] = "No changes required to group {name}.".format(name=before_group['name']) module.exit_json(**result) @@ -409,7 +452,6 @@ def main(): after_group = kc.get_group_by_groupid(desired_group['id'], realm=realm) result['end_state'] = after_group - result['group'] = result['end_state'] result['msg'] = "Group {id} has been updated".format(id=after_group['id']) module.exit_json(**result) @@ -429,7 +471,6 @@ def main(): kc.delete_group(groupid=gid, realm=realm) result['end_state'] = {} - result['group'] = result['end_state'] result['msg'] = "Group {name} has been deleted".format(name=before_group['name']) diff --git a/plugins/modules/keycloak_identity_provider.py b/plugins/modules/keycloak_identity_provider.py new file mode 100644 index 0000000000..12aa2cc4ad --- /dev/null +++ b/plugins/modules/keycloak_identity_provider.py @@ -0,0 +1,730 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_identity_provider + +short_description: Allows administration of Keycloak identity providers using Keycloak API + +version_added: 3.6.0 + +description: + - This module allows you to add, remove or modify Keycloak identity providers using the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html). +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the identity provider. + - On V(present), the identity provider is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the identity provider is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + description: + - The Keycloak realm under which this identity provider resides. + default: 'master' + type: str + + alias: + description: + - The alias uniquely identifies an identity provider and it is also used to build the redirect URI. + required: true + type: str + + display_name: + description: + - Friendly name for identity provider. + aliases: + - displayName + type: str + + enabled: + description: + - Enable/disable this identity provider. + type: bool + + store_token: + description: + - Enable/disable whether tokens must be stored after authenticating users. + aliases: + - storeToken + type: bool + + add_read_token_role_on_create: + description: + - Enable/disable whether new users can read any stored tokens. This assigns the C(broker.read-token) role. + aliases: + - addReadTokenRoleOnCreate + type: bool + + trust_email: + description: + - If enabled, email provided by this provider is not verified even if verification is enabled for the realm. + aliases: + - trustEmail + type: bool + + link_only: + description: + - If true, users cannot log in through this provider. They can only link to this provider. This is useful if you do + not want to allow login from the provider, but want to integrate with a provider. + aliases: + - linkOnly + type: bool + + first_broker_login_flow_alias: + description: + - Alias of authentication flow, which is triggered after first login with this identity provider. + aliases: + - firstBrokerLoginFlowAlias + type: str + + post_broker_login_flow_alias: + description: + - Alias of authentication flow, which is triggered after each login with this identity provider. + aliases: + - postBrokerLoginFlowAlias + type: str + + authenticate_by_default: + description: + - Specifies if this identity provider should be used by default for authentication even before displaying login screen. + aliases: + - authenticateByDefault + type: bool + + provider_id: + description: + - Protocol used by this provider (supported values are V(oidc) or V(saml)). + aliases: + - providerId + type: str + + config: + description: + - Dict specifying the configuration options for the provider; the contents differ depending on the value of O(provider_id). + Examples are given below for V(oidc) and V(saml). It is easiest to obtain valid config values by dumping an already-existing + identity provider configuration through check-mode in the RV(existing) field. + type: dict + suboptions: + hide_on_login_page: + description: + - If hidden, login with this provider is possible only if requested explicitly, for example using the C(kc_idp_hint) + parameter. + aliases: + - hideOnLoginPage + type: bool + + gui_order: + description: + - Number defining order of the provider in GUI (for example, on Login page). + aliases: + - guiOrder + type: int + + sync_mode: + description: + - Default sync mode for all mappers. The sync mode determines when user data is synced using the mappers. + aliases: + - syncMode + type: str + + issuer: + description: + - The issuer identifier for the issuer of the response. If not provided, no validation is performed. + type: str + + authorizationUrl: + description: + - The Authorization URL. + type: str + + tokenUrl: + description: + - The Token URL. + type: str + + logoutUrl: + description: + - End session endpoint to use to logout user from external IDP. + type: str + + userInfoUrl: + description: + - The User Info URL. + type: str + + clientAuthMethod: + description: + - The client authentication method. + type: str + + clientId: + description: + - The client or client identifier registered within the identity provider. + type: str + + clientSecret: + description: + - The client or client secret registered within the identity provider. + type: str + + defaultScope: + description: + - The scopes to be sent when asking for authorization. + type: str + + validateSignature: + description: + - Enable/disable signature validation of external IDP signatures. + type: bool + + useJwksUrl: + description: + - If V(true), identity provider public keys are downloaded from given JWKS URL. + type: bool + + jwksUrl: + description: + - URL where identity provider keys in JWK format are stored. See JWK specification for more details. + type: str + + entityId: + description: + - The Entity ID that is used to uniquely identify this SAML Service Provider. + type: str + + singleSignOnServiceUrl: + description: + - The URL that must be used to send authentication requests (SAML AuthnRequest). + type: str + + singleLogoutServiceUrl: + description: + - The URL that must be used to send logout requests. + type: str + + backchannelSupported: + description: + - Does the external IDP support backchannel logout? + type: str + + nameIDPolicyFormat: + description: + - Specifies the URI reference corresponding to a name identifier format. + type: str + + principalType: + description: + - Way to identify and track external users from the assertion. + type: str + + fromUrl: + description: + - IDP well-known OpenID Connect configuration URL. + - Support only O(provider_id=oidc). + - O(config.fromUrl) is mutually exclusive with O(config.userInfoUrl), O(config.authorizationUrl), + O(config.tokenUrl), O(config.logoutUrl), O(config.issuer) and O(config.jwksUrl). + type: str + version_added: '11.2.0' + + mappers: + description: + - A list of dicts defining mappers associated with this Identity Provider. + type: list + elements: dict + suboptions: + id: + description: + - Unique ID of this mapper. + type: str + + name: + description: + - Name of the mapper. + type: str + + identityProviderAlias: + description: + - Alias of the identity provider for this mapper. + type: str + + identityProviderMapper: + description: + - Type of mapper. + type: str + + config: + description: + - Dict specifying the configuration options for the mapper; the contents differ depending on the value of O(mappers[].identityProviderMapper). + type: dict + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Laurent Paumier (@laurpaum) +""" + +EXAMPLES = r""" +- name: Create OIDC identity provider, authentication with credentials + community.general.keycloak_identity_provider: + state: present + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: admin + auth_password: admin + realm: myrealm + alias: oidc-idp + display_name: OpenID Connect IdP + enabled: true + provider_id: oidc + config: + issuer: https://idp.example.com + authorizationUrl: https://idp.example.com/auth + tokenUrl: https://idp.example.com/token + userInfoUrl: https://idp.example.com/userinfo + clientAuthMethod: client_secret_post + clientId: my-client + clientSecret: secret + syncMode: FORCE + mappers: + - name: first_name + identityProviderMapper: oidc-user-attribute-idp-mapper + config: + claim: first_name + user.attribute: first_name + syncMode: INHERIT + - name: last_name + identityProviderMapper: oidc-user-attribute-idp-mapper + config: + claim: last_name + user.attribute: last_name + syncMode: INHERIT + +- name: Create OIDC identity provider, with well-known configuration URL + community.general.keycloak_identity_provider: + state: present + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: admin + auth_password: admin + realm: myrealm + alias: oidc-idp + display_name: OpenID Connect IdP + enabled: true + provider_id: oidc + config: + fromUrl: https://the-idp.example.com/auth/realms/idprealm/.well-known/openid-configuration + clientAuthMethod: client_secret_post + clientId: my-client + clientSecret: secret + +- name: Create SAML identity provider, authentication with credentials + community.general.keycloak_identity_provider: + state: present + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: admin + auth_password: admin + realm: myrealm + alias: saml-idp + display_name: SAML IdP + enabled: true + provider_id: saml + config: + entityId: https://auth.example.com/auth/realms/myrealm + singleSignOnServiceUrl: https://idp.example.com/login + wantAuthnRequestsSigned: true + wantAssertionsSigned: true + mappers: + - name: roles + identityProviderMapper: saml-user-attribute-idp-mapper + config: + user.attribute: roles + attribute.friendly.name: User Roles + attribute.name: roles + syncMode: INHERIT +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Identity provider my-idp has been created" + +proposed: + description: Representation of proposed identity provider. + returned: always + type: dict + sample: + { + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "providerId": "oidc" + } + +existing: + description: Representation of existing identity provider. + returned: always + type: dict + sample: + { + "addReadTokenRoleOnCreate": false, + "alias": "my-idp", + "authenticateByDefault": false, + "config": { + "authorizationUrl": "https://old.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "**********", + "issuer": "https://old.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://old.example.com/token", + "userInfoUrl": "https://old.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": true, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", + "linkOnly": false, + "providerId": "oidc", + "storeToken": false, + "trustEmail": false + } + +end_state: + description: Representation of identity provider after module execution. + returned: on success + type: dict + sample: + { + "addReadTokenRoleOnCreate": false, + "alias": "my-idp", + "authenticateByDefault": false, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "**********", + "issuer": "https://idp.example.com", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": true, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", + "linkOnly": false, + "providerId": "oidc", + "storeToken": false, + "trustEmail": false + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from copy import deepcopy + + +def sanitize(idp): + idpcopy = deepcopy(idp) + if 'config' in idpcopy: + if 'clientSecret' in idpcopy['config']: + idpcopy['config']['clientSecret'] = '**********' + return idpcopy + + +def get_identity_provider_with_mappers(kc, alias, realm): + idp = kc.get_identity_provider(alias, realm) + if idp is not None: + idp['mappers'] = sorted(kc.get_identity_provider_mappers(alias, realm), key=lambda x: x.get('name')) + # clientSecret returned by API when using `get_identity_provider(alias, realm)` is always ********** + # to detect changes to the secret, we get the actual cleartext secret from the full realm info + if 'config' in idp: + if 'clientSecret' in idp['config']: + for idp_from_realm in kc.get_realm_by_id(realm).get('identityProviders', []): + if idp_from_realm['internalId'] == idp['internalId']: + cleartext_secret = idp_from_realm.get('config', {}).get('clientSecret') + if cleartext_secret: + idp['config']['clientSecret'] = cleartext_secret + if idp is None: + idp = {} + return idp + + +def fetch_identity_provider_wellknown_config(kc, config): + """ + Fetches OpenID Connect well-known configuration from a given URL and updates the config dict with discovered endpoints. + Support for oidc providers only. + :param kc: KeycloakAPI instance used to fetch endpoints and handle errors. + :param config: Dictionary containing identity provider configuration, must include 'fromUrl' key to trigger fetch. + :return: None. The config dict is updated in-place. + """ + if config and 'fromUrl' in config : + if 'providerId' in config and config['providerId'] != 'oidc': + kc.module.fail_json(msg="Only 'oidc' provider_id is supported when using 'fromUrl'.") + endpoints = ['userInfoUrl', 'authorizationUrl', 'tokenUrl', 'logoutUrl', 'issuer', 'jwksUrl'] + if any(k in config for k in endpoints): + kc.module.fail_json(msg="Cannot specify both 'fromUrl' and 'userInfoUrl', 'authorizationUrl', 'tokenUrl', 'logoutUrl', 'issuer' or 'jwksUrl'.") + openIdConfig = kc.fetch_idp_endpoints_import_config_url( + fromUrl=config['fromUrl'], + realm=kc.module.params.get('realm', 'master')) + for k in endpoints: + if k in openIdConfig: + config[k] = openIdConfig[k] + del config['fromUrl'] + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + mapper_spec = dict( + id=dict(type='str'), + name=dict(type='str'), + identityProviderAlias=dict(type='str'), + identityProviderMapper=dict(type='str'), + config=dict(type='dict'), + ) + + meta_args = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + realm=dict(type='str', default='master'), + alias=dict(type='str', required=True), + add_read_token_role_on_create=dict(type='bool', aliases=['addReadTokenRoleOnCreate']), + authenticate_by_default=dict(type='bool', aliases=['authenticateByDefault']), + config=dict(type='dict'), + display_name=dict(type='str', aliases=['displayName']), + enabled=dict(type='bool'), + first_broker_login_flow_alias=dict(type='str', aliases=['firstBrokerLoginFlowAlias']), + link_only=dict(type='bool', aliases=['linkOnly']), + post_broker_login_flow_alias=dict(type='str', aliases=['postBrokerLoginFlowAlias']), + provider_id=dict(type='str', aliases=['providerId']), + store_token=dict(type='bool', aliases=['storeToken']), + trust_email=dict(type='bool', aliases=['trustEmail']), + mappers=dict(type='list', elements='dict', options=mapper_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + alias = module.params.get('alias') + state = module.params.get('state') + config = module.params.get('config') + + fetch_identity_provider_wellknown_config(kc, config) + + # Filter and map the parameters names that apply to the identity provider. + idp_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and + module.params.get(x) is not None] + + # See if it already exists in Keycloak + before_idp = get_identity_provider_with_mappers(kc, alias, realm) + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for param in idp_params: + new_param_value = module.params.get(param) + old_value = before_idp[camel(param)] if camel(param) in before_idp else None + if new_param_value != old_value: + changeset[camel(param)] = new_param_value + + # special handling of mappers list to allow change detection + if module.params.get('mappers') is not None: + for change in module.params['mappers']: + change = {k: v for k, v in change.items() if v is not None} + if change.get('id') is None and change.get('name') is None: + module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') + if before_idp == dict(): + old_mapper = dict() + elif change.get('id') is not None: + old_mapper = kc.get_identity_provider_mapper(change['id'], alias, realm) + if old_mapper is None: + old_mapper = dict() + else: + found = [x for x in kc.get_identity_provider_mappers(alias, realm) if x['name'] == change['name']] + if len(found) == 1: + old_mapper = found[0] + else: + old_mapper = dict() + new_mapper = old_mapper.copy() + new_mapper.update(change) + + if changeset.get('mappers') is None: + changeset['mappers'] = list() + # eventually this holds all desired mappers, unchanged, modified and newly added + changeset['mappers'].append(new_mapper) + + # ensure idempotency in case module.params.mappers is not sorted by name + changeset['mappers'] = sorted(changeset['mappers'], key=lambda x: x.get('id') if x.get('name') is None else x['name']) + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_idp = before_idp.copy() + desired_idp.update(changeset) + + result['proposed'] = sanitize(changeset) + result['existing'] = sanitize(before_idp) + + # Cater for when it doesn't exist (an empty dict) + if not before_idp: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Identity provider does not exist; doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if module._diff: + result['diff'] = dict(before='', after=sanitize(desired_idp)) + + if module.check_mode: + module.exit_json(**result) + + # create it + desired_idp = desired_idp.copy() + mappers = desired_idp.pop('mappers', []) + kc.create_identity_provider(desired_idp, realm) + for mapper in mappers: + if mapper.get('identityProviderAlias') is None: + mapper['identityProviderAlias'] = alias + kc.create_identity_provider_mapper(mapper, alias, realm) + after_idp = get_identity_provider_with_mappers(kc, alias, realm) + + result['end_state'] = sanitize(after_idp) + + result['msg'] = 'Identity provider {alias} has been created'.format(alias=alias) + module.exit_json(**result) + + else: + if state == 'present': + # Process an update + + # no changes + if desired_idp == before_idp: + result['changed'] = False + result['end_state'] = sanitize(desired_idp) + result['msg'] = "No changes required to identity provider {alias}.".format(alias=alias) + module.exit_json(**result) + + # doing an update + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_idp), after=sanitize(desired_idp)) + + if module.check_mode: + module.exit_json(**result) + + # do the update + desired_idp = desired_idp.copy() + updated_mappers = desired_idp.pop('mappers', []) + original_mappers = list(before_idp.get('mappers', [])) + + kc.update_identity_provider(desired_idp, realm) + for mapper in updated_mappers: + if mapper.get('id') is not None: + # only update existing if there is a change + for i, orig in enumerate(original_mappers): + if mapper['id'] == orig['id']: + del original_mappers[i] + if mapper != orig: + kc.update_identity_provider_mapper(mapper, alias, realm) + else: + if mapper.get('identityProviderAlias') is None: + mapper['identityProviderAlias'] = alias + kc.create_identity_provider_mapper(mapper, alias, realm) + for mapper in [x for x in before_idp['mappers'] + if [y for y in updated_mappers if y["name"] == x['name']] == []]: + kc.delete_identity_provider_mapper(mapper['id'], alias, realm) + + after_idp = get_identity_provider_with_mappers(kc, alias, realm) + + result['end_state'] = sanitize(after_idp) + + result['msg'] = "Identity provider {alias} has been updated".format(alias=alias) + module.exit_json(**result) + + elif state == 'absent': + # Process a deletion + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_idp), after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_identity_provider(alias, realm) + + result['end_state'] = {} + + result['msg'] = "Identity provider {alias} has been deleted".format(alias=alias) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_realm.py b/plugins/modules/keycloak_realm.py new file mode 100644 index 0000000000..47ad90ee4e --- /dev/null +++ b/plugins/modules/keycloak_realm.py @@ -0,0 +1,1111 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_realm + +short_description: Allows administration of Keycloak realm using Keycloak API + +version_added: 3.0.0 + +description: + - This module allows the administration of Keycloak realm using the Keycloak REST API. It requires access to the REST API + using OpenID Connect; the user connecting and the realm being used must have the requisite access rights. In a default + Keycloak installation, admin-cli and an admin user would work, as would a separate realm definition with the scope tailored + to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). Aliases are provided so camelCased versions can be used + as well. + - The Keycloak API does not always sanity check inputs, for example you can set SAML-specific settings on an OpenID Connect + client for instance and also the other way around. B(Be careful). If you do not specify a setting, usually a sensible + default is chosen. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the realm. + - On V(present), the realm is created (or updated if it exists already). + - On V(absent), the realm is removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + + id: + description: + - The realm to create. + type: str + realm: + description: + - The realm name. + type: str + access_code_lifespan: + description: + - The realm access code lifespan. + aliases: + - accessCodeLifespan + type: int + access_code_lifespan_login: + description: + - The realm access code lifespan login. + aliases: + - accessCodeLifespanLogin + type: int + access_code_lifespan_user_action: + description: + - The realm access code lifespan user action. + aliases: + - accessCodeLifespanUserAction + type: int + access_token_lifespan: + description: + - The realm access token lifespan. + aliases: + - accessTokenLifespan + type: int + access_token_lifespan_for_implicit_flow: + description: + - The realm access token lifespan for implicit flow. + aliases: + - accessTokenLifespanForImplicitFlow + type: int + account_theme: + description: + - The realm account theme. + aliases: + - accountTheme + type: str + action_token_generated_by_admin_lifespan: + description: + - The realm action token generated by admin lifespan. + aliases: + - actionTokenGeneratedByAdminLifespan + type: int + action_token_generated_by_user_lifespan: + description: + - The realm action token generated by user lifespan. + aliases: + - actionTokenGeneratedByUserLifespan + type: int + admin_events_details_enabled: + description: + - The realm admin events details enabled. + aliases: + - adminEventsDetailsEnabled + type: bool + admin_events_enabled: + description: + - The realm admin events enabled. + aliases: + - adminEventsEnabled + type: bool + admin_theme: + description: + - The realm admin theme. + aliases: + - adminTheme + type: str + attributes: + description: + - The realm attributes. + type: dict + browser_flow: + description: + - The realm browser flow. + aliases: + - browserFlow + type: str + browser_security_headers: + description: + - The realm browser security headers. + aliases: + - browserSecurityHeaders + type: dict + brute_force_protected: + description: + - The realm brute force protected. + aliases: + - bruteForceProtected + type: bool + brute_force_strategy: + description: + - The realm brute force strategy. + aliases: + - bruteForceStrategy + choices: ['LINEAR', 'MULTIPLE'] + type: str + version_added: 11.2.0 + client_authentication_flow: + description: + - The realm client authentication flow. + aliases: + - clientAuthenticationFlow + type: str + client_scope_mappings: + description: + - The realm client scope mappings. + aliases: + - clientScopeMappings + type: dict + default_default_client_scopes: + description: + - The realm default default client scopes. + aliases: + - defaultDefaultClientScopes + type: list + elements: str + default_groups: + description: + - The realm default groups. + aliases: + - defaultGroups + type: list + elements: str + default_locale: + description: + - The realm default locale. + aliases: + - defaultLocale + type: str + default_optional_client_scopes: + description: + - The realm default optional client scopes. + aliases: + - defaultOptionalClientScopes + type: list + elements: str + default_roles: + description: + - The realm default roles. + aliases: + - defaultRoles + type: list + elements: str + default_signature_algorithm: + description: + - The realm default signature algorithm. + aliases: + - defaultSignatureAlgorithm + type: str + direct_grant_flow: + description: + - The realm direct grant flow. + aliases: + - directGrantFlow + type: str + display_name: + description: + - The realm display name. + aliases: + - displayName + type: str + display_name_html: + description: + - The realm display name HTML. + aliases: + - displayNameHtml + type: str + docker_authentication_flow: + description: + - The realm docker authentication flow. + aliases: + - dockerAuthenticationFlow + type: str + duplicate_emails_allowed: + description: + - The realm duplicate emails allowed option. + aliases: + - duplicateEmailsAllowed + type: bool + edit_username_allowed: + description: + - The realm edit username allowed option. + aliases: + - editUsernameAllowed + type: bool + email_theme: + description: + - The realm email theme. + aliases: + - emailTheme + type: str + enabled: + description: + - The realm enabled option. + type: bool + enabled_event_types: + description: + - The realm enabled event types. + aliases: + - enabledEventTypes + type: list + elements: str + events_enabled: + description: + - Enables or disables login events for this realm. + aliases: + - eventsEnabled + type: bool + version_added: 3.6.0 + events_expiration: + description: + - The realm events expiration. + aliases: + - eventsExpiration + type: int + events_listeners: + description: + - The realm events listeners. + aliases: + - eventsListeners + type: list + elements: str + failure_factor: + description: + - The realm failure factor. + aliases: + - failureFactor + type: int + internationalization_enabled: + description: + - The realm internationalization enabled option. + aliases: + - internationalizationEnabled + type: bool + login_theme: + description: + - The realm login theme. + aliases: + - loginTheme + type: str + login_with_email_allowed: + description: + - The realm login with email allowed option. + aliases: + - loginWithEmailAllowed + type: bool + max_delta_time_seconds: + description: + - The realm max delta time in seconds. + aliases: + - maxDeltaTimeSeconds + type: int + max_failure_wait_seconds: + description: + - The realm max failure wait in seconds. + aliases: + - maxFailureWaitSeconds + type: int + max_temporary_lockouts: + description: + - The realm max temporary lockouts. + aliases: + - maxTemporaryLockouts + type: int + version_added: 11.2.0 + minimum_quick_login_wait_seconds: + description: + - The realm minimum quick login wait in seconds. + aliases: + - minimumQuickLoginWaitSeconds + type: int + not_before: + description: + - The realm not before. + aliases: + - notBefore + type: int + offline_session_idle_timeout: + description: + - The realm offline session idle timeout. + aliases: + - offlineSessionIdleTimeout + type: int + offline_session_max_lifespan: + description: + - The realm offline session max lifespan. + aliases: + - offlineSessionMaxLifespan + type: int + offline_session_max_lifespan_enabled: + description: + - The realm offline session max lifespan enabled option. + aliases: + - offlineSessionMaxLifespanEnabled + type: bool + otp_policy_algorithm: + description: + - The realm otp policy algorithm. + aliases: + - otpPolicyAlgorithm + type: str + otp_policy_digits: + description: + - The realm otp policy digits. + aliases: + - otpPolicyDigits + type: int + otp_policy_initial_counter: + description: + - The realm otp policy initial counter. + aliases: + - otpPolicyInitialCounter + type: int + otp_policy_look_ahead_window: + description: + - The realm otp policy look ahead window. + aliases: + - otpPolicyLookAheadWindow + type: int + otp_policy_period: + description: + - The realm otp policy period. + aliases: + - otpPolicyPeriod + type: int + otp_policy_type: + description: + - The realm otp policy type. + aliases: + - otpPolicyType + type: str + otp_supported_applications: + description: + - The realm otp supported applications. + aliases: + - otpSupportedApplications + type: list + elements: str + password_policy: + description: + - The realm password policy. + aliases: + - passwordPolicy + type: str + organizations_enabled: + description: + - Enables support for experimental organization feature. + aliases: + - organizationsEnabled + type: bool + version_added: 10.0.0 + permanent_lockout: + description: + - The realm permanent lockout. + aliases: + - permanentLockout + type: bool + quick_login_check_milli_seconds: + description: + - The realm quick login check in milliseconds. + aliases: + - quickLoginCheckMilliSeconds + type: int + refresh_token_max_reuse: + description: + - The realm refresh token max reuse. + aliases: + - refreshTokenMaxReuse + type: int + registration_allowed: + description: + - The realm registration allowed option. + aliases: + - registrationAllowed + type: bool + registration_email_as_username: + description: + - The realm registration email as username option. + aliases: + - registrationEmailAsUsername + type: bool + registration_flow: + description: + - The realm registration flow. + aliases: + - registrationFlow + type: str + remember_me: + description: + - The realm remember me option. + aliases: + - rememberMe + type: bool + reset_credentials_flow: + description: + - The realm reset credentials flow. + aliases: + - resetCredentialsFlow + type: str + reset_password_allowed: + description: + - The realm reset password allowed option. + aliases: + - resetPasswordAllowed + type: bool + revoke_refresh_token: + description: + - The realm revoke refresh token option. + aliases: + - revokeRefreshToken + type: bool + smtp_server: + description: + - The realm smtp server. + aliases: + - smtpServer + type: dict + ssl_required: + description: + - The realm ssl required option. + choices: ['all', 'external', 'none'] + aliases: + - sslRequired + type: str + sso_session_idle_timeout: + description: + - The realm sso session idle timeout. + aliases: + - ssoSessionIdleTimeout + type: int + sso_session_idle_timeout_remember_me: + description: + - The realm sso session idle timeout remember me. + aliases: + - ssoSessionIdleTimeoutRememberMe + type: int + sso_session_max_lifespan: + description: + - The realm sso session max lifespan. + aliases: + - ssoSessionMaxLifespan + type: int + sso_session_max_lifespan_remember_me: + description: + - The realm sso session max lifespan remember me. + aliases: + - ssoSessionMaxLifespanRememberMe + type: int + supported_locales: + description: + - The realm supported locales. + aliases: + - supportedLocales + type: list + elements: str + user_managed_access_allowed: + description: + - The realm user managed access allowed option. + aliases: + - userManagedAccessAllowed + type: bool + verify_email: + description: + - The realm verify email option. + aliases: + - verifyEmail + type: bool + wait_increment_seconds: + description: + - The realm wait increment in seconds. + aliases: + - waitIncrementSeconds + type: int + client_session_idle_timeout: + description: + - All Clients will inherit from this setting, time a session is allowed to be idle before it expires. + aliases: + - clientSessionIdleTimeout + type: int + version_added: 11.2.0 + client_session_max_lifespan: + description: + - All Clients will inherit from this setting, max time before a session is expired. + aliases: + - clientSessionMaxLifespan + type: int + version_added: 11.2.0 + client_offline_session_idle_timeout: + description: + - All Clients will inherit from this setting, time an offline session is allowed to be idle before it expires. + aliases: + - clientOfflineSessionIdleTimeout + type: int + version_added: 11.2.0 + client_offline_session_max_lifespan: + description: + - All Clients will inherit from this setting, max time before an offline session is expired regardless of activity. + aliases: + - clientOfflineSessionMaxLifespan + type: int + version_added: 11.2.0 + oauth2_device_code_lifespan: + description: + - Max time before the device code and user code are expired. + aliases: + - oauth2DeviceCodeLifespan + type: int + version_added: 11.2.0 + oauth2_device_polling_interval: + description: + - The minimum amount of time in seconds that the client should wait between polling requests to the token endpoint. + aliases: + - oauth2DevicePollingInterval + type: int + version_added: 11.2.0 + web_authn_policy_rp_entity_name: + description: + - WebAuthn Relying Party Entity Name. + aliases: + - webAuthnPolicyRpEntityName + type: str + version_added: 11.3.0 + web_authn_policy_signature_algorithms: + description: + - List of acceptable WebAuthn signature algorithms. + aliases: + - webAuthnPolicySignatureAlgorithms + type: list + version_added: 11.3.0 + elements: str + web_authn_policy_rp_id: + description: + - WebAuthn Relying Party ID (domain). Empty string means use request host. + aliases: + - webAuthnPolicyRpId + type: str + version_added: 11.3.0 + web_authn_policy_attestation_conveyance_preference: + description: + - Attestation conveyance preference for WebAuthn. + aliases: + - webAuthnPolicyAttestationConveyancePreference + type: str + version_added: 11.3.0 + web_authn_policy_authenticator_attachment: + description: + - Authenticator attachment preference for WebAuthn authenticators. + aliases: + - webAuthnPolicyAuthenticatorAttachment + type: str + version_added: 11.3.0 + web_authn_policy_require_resident_key: + description: + - Whether resident keys are required for WebAuthn (Yes/No/not specified). + aliases: + - webAuthnPolicyRequireResidentKey + type: str + version_added: 11.3.0 + web_authn_policy_user_verification_requirement: + description: + - User verification requirement for WebAuthn. + aliases: + - webAuthnPolicyUserVerificationRequirement + type: str + version_added: 11.3.0 + web_authn_policy_create_timeout: + description: + - Timeout for WebAuthn credential creation (ms). + aliases: + - webAuthnPolicyCreateTimeout + type: int + version_added: 11.3.0 + web_authn_policy_avoid_same_authenticator_register: + description: + - Avoid registering the same authenticator multiple times. + aliases: + - webAuthnPolicyAvoidSameAuthenticatorRegister + type: bool + version_added: 11.3.0 + web_authn_policy_acceptable_aaguids: + description: + - List of acceptable AAGUIDs for WebAuthn authenticators. + aliases: + - webAuthnPolicyAcceptableAaguids + type: list + version_added: 11.3.0 + elements: str + web_authn_policy_extra_origins: + description: + - Additional acceptable origins for WebAuthn requests. + aliases: + - webAuthnPolicyExtraOrigins + type: list + version_added: 11.3.0 + elements: str + web_authn_policy_passwordless_rp_entity_name: + description: + - WebAuthn Passwordless Relying Party Entity Name. + aliases: + - webAuthnPolicyPasswordlessRpEntityName + type: str + version_added: 11.3.0 + web_authn_policy_passwordless_signature_algorithms: + description: + - List of acceptable WebAuthn signature algorithms for passwordless. + aliases: + - webAuthnPolicyPasswordlessSignatureAlgorithms + type: list + version_added: 11.3.0 + elements: str + web_authn_policy_passwordless_rp_id: + description: + - WebAuthn Passwordless Relying Party ID (domain). + aliases: + - webAuthnPolicyPasswordlessRpId + type: str + version_added: 11.3.0 + web_authn_policy_passwordless_attestation_conveyance_preference: + description: + - Attestation conveyance preference for WebAuthn passwordless. + aliases: + - webAuthnPolicyPasswordlessAttestationConveyancePreference + type: str + version_added: 11.3.0 + web_authn_policy_passwordless_authenticator_attachment: + description: + - Authenticator attachment for WebAuthn passwordless. + aliases: + - webAuthnPolicyPasswordlessAuthenticatorAttachment + type: str + version_added: 11.3.0 + web_authn_policy_passwordless_require_resident_key: + description: + - Whether resident keys are required for WebAuthn passwordless (V(Yes)/V(No)/V(not specified)). + aliases: + - webAuthnPolicyPasswordlessRequireResidentKey + type: str + version_added: 11.3.0 + web_authn_policy_passwordless_user_verification_requirement: + description: + - User verification requirement for WebAuthn passwordless. + aliases: + - webAuthnPolicyPasswordlessUserVerificationRequirement + type: str + version_added: 11.3.0 + web_authn_policy_passwordless_create_timeout: + description: + - Timeout for WebAuthn passwordless credential creation (ms). + aliases: + - webAuthnPolicyPasswordlessCreateTimeout + type: int + version_added: 11.3.0 + web_authn_policy_passwordless_avoid_same_authenticator_register: + description: + - Avoid registering the same authenticator multiple times for passwordless. + aliases: + - webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister + type: bool + version_added: 11.3.0 + web_authn_policy_passwordless_acceptable_aaguids: + description: + - List of acceptable AAGUIDs for WebAuthn passwordless authenticators. + aliases: + - webAuthnPolicyPasswordlessAcceptableAaguids + type: list + version_added: 11.3.0 + elements: str + web_authn_policy_passwordless_extra_origins: + description: + - Additional acceptable origins for WebAuthn passwordless requests. + aliases: + - webAuthnPolicyPasswordlessExtraOrigins + type: list + version_added: 11.3.0 + elements: str + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Christophe Gilles (@kris2kris) +""" + +EXAMPLES = r""" +- name: Create or update Keycloak realm (minimal example) + community.general.keycloak_realm: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: unique_realm_name + state: present + +- name: Delete a Keycloak realm + community.general.keycloak_realm: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: unique_realm_name + state: absent +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Realm testrealm has been updated" + +proposed: + description: Representation of proposed realm. + returned: always + type: dict + sample: {"realm": "test"} + +existing: + description: Representation of existing realm (sample is truncated). + returned: always + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } + +end_state: + description: Representation of realm after module execution (sample is truncated). + returned: on success + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def normalise_cr(realmrep): + """ Re-sorts any properties where the order is important so that diff's is minimised and the change detection is more effective. + + :param realmrep: the realmrep dict to be sanitized + :return: normalised realmrep dict + """ + # Avoid the dict passed in to be modified + realmrep = realmrep.copy() + + if 'enabledEventTypes' in realmrep: + realmrep['enabledEventTypes'] = list(sorted(realmrep['enabledEventTypes'])) + + if 'otpSupportedApplications' in realmrep: + realmrep['otpSupportedApplications'] = list(sorted(realmrep['otpSupportedApplications'])) + + if 'supportedLocales' in realmrep: + realmrep['supportedLocales'] = list(sorted(realmrep['supportedLocales'])) + + return realmrep + + +def sanitize_cr(realmrep): + """ Removes probably sensitive details from a realm representation. + + :param realmrep: the realmrep dict to be sanitized + :return: sanitized realmrep dict + """ + result = realmrep.copy() + if 'secret' in result: + result['secret'] = '********' + if 'attributes' in result: + if 'saml.signing.private.key' in result['attributes']: + result['attributes'] = result['attributes'].copy() + result['attributes']['saml.signing.private.key'] = '********' + return normalise_cr(result) + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + + id=dict(type='str'), + realm=dict(type='str'), + access_code_lifespan=dict(type='int', aliases=['accessCodeLifespan']), + access_code_lifespan_login=dict(type='int', aliases=['accessCodeLifespanLogin']), + access_code_lifespan_user_action=dict(type='int', aliases=['accessCodeLifespanUserAction']), + access_token_lifespan=dict(type='int', aliases=['accessTokenLifespan'], no_log=False), + access_token_lifespan_for_implicit_flow=dict(type='int', aliases=['accessTokenLifespanForImplicitFlow'], no_log=False), + account_theme=dict(type='str', aliases=['accountTheme']), + action_token_generated_by_admin_lifespan=dict(type='int', aliases=['actionTokenGeneratedByAdminLifespan'], no_log=False), + action_token_generated_by_user_lifespan=dict(type='int', aliases=['actionTokenGeneratedByUserLifespan'], no_log=False), + admin_events_details_enabled=dict(type='bool', aliases=['adminEventsDetailsEnabled']), + admin_events_enabled=dict(type='bool', aliases=['adminEventsEnabled']), + admin_theme=dict(type='str', aliases=['adminTheme']), + attributes=dict(type='dict'), + browser_flow=dict(type='str', aliases=['browserFlow']), + browser_security_headers=dict(type='dict', aliases=['browserSecurityHeaders']), + brute_force_protected=dict(type='bool', aliases=['bruteForceProtected']), + brute_force_strategy=dict(type='str', choices=['LINEAR', 'MULTIPLE'], aliases=['bruteForceStrategy']), + client_authentication_flow=dict(type='str', aliases=['clientAuthenticationFlow']), + client_scope_mappings=dict(type='dict', aliases=['clientScopeMappings']), + default_default_client_scopes=dict(type='list', elements='str', aliases=['defaultDefaultClientScopes']), + default_groups=dict(type='list', elements='str', aliases=['defaultGroups']), + default_locale=dict(type='str', aliases=['defaultLocale']), + default_optional_client_scopes=dict(type='list', elements='str', aliases=['defaultOptionalClientScopes']), + default_roles=dict(type='list', elements='str', aliases=['defaultRoles']), + default_signature_algorithm=dict(type='str', aliases=['defaultSignatureAlgorithm']), + direct_grant_flow=dict(type='str', aliases=['directGrantFlow']), + display_name=dict(type='str', aliases=['displayName']), + display_name_html=dict(type='str', aliases=['displayNameHtml']), + docker_authentication_flow=dict(type='str', aliases=['dockerAuthenticationFlow']), + duplicate_emails_allowed=dict(type='bool', aliases=['duplicateEmailsAllowed']), + edit_username_allowed=dict(type='bool', aliases=['editUsernameAllowed']), + email_theme=dict(type='str', aliases=['emailTheme']), + enabled=dict(type='bool'), + enabled_event_types=dict(type='list', elements='str', aliases=['enabledEventTypes']), + events_enabled=dict(type='bool', aliases=['eventsEnabled']), + events_expiration=dict(type='int', aliases=['eventsExpiration']), + events_listeners=dict(type='list', elements='str', aliases=['eventsListeners']), + failure_factor=dict(type='int', aliases=['failureFactor']), + internationalization_enabled=dict(type='bool', aliases=['internationalizationEnabled']), + login_theme=dict(type='str', aliases=['loginTheme']), + login_with_email_allowed=dict(type='bool', aliases=['loginWithEmailAllowed']), + max_delta_time_seconds=dict(type='int', aliases=['maxDeltaTimeSeconds']), + max_failure_wait_seconds=dict(type='int', aliases=['maxFailureWaitSeconds']), + max_temporary_lockouts=dict(type='int', aliases=['maxTemporaryLockouts']), + minimum_quick_login_wait_seconds=dict(type='int', aliases=['minimumQuickLoginWaitSeconds']), + not_before=dict(type='int', aliases=['notBefore']), + offline_session_idle_timeout=dict(type='int', aliases=['offlineSessionIdleTimeout']), + offline_session_max_lifespan=dict(type='int', aliases=['offlineSessionMaxLifespan']), + offline_session_max_lifespan_enabled=dict(type='bool', aliases=['offlineSessionMaxLifespanEnabled']), + otp_policy_algorithm=dict(type='str', aliases=['otpPolicyAlgorithm']), + otp_policy_digits=dict(type='int', aliases=['otpPolicyDigits']), + otp_policy_initial_counter=dict(type='int', aliases=['otpPolicyInitialCounter']), + otp_policy_look_ahead_window=dict(type='int', aliases=['otpPolicyLookAheadWindow']), + otp_policy_period=dict(type='int', aliases=['otpPolicyPeriod']), + otp_policy_type=dict(type='str', aliases=['otpPolicyType']), + otp_supported_applications=dict(type='list', elements='str', aliases=['otpSupportedApplications']), + password_policy=dict(type='str', aliases=['passwordPolicy'], no_log=False), + organizations_enabled=dict(type='bool', aliases=['organizationsEnabled']), + permanent_lockout=dict(type='bool', aliases=['permanentLockout']), + quick_login_check_milli_seconds=dict(type='int', aliases=['quickLoginCheckMilliSeconds']), + refresh_token_max_reuse=dict(type='int', aliases=['refreshTokenMaxReuse'], no_log=False), + registration_allowed=dict(type='bool', aliases=['registrationAllowed']), + registration_email_as_username=dict(type='bool', aliases=['registrationEmailAsUsername']), + registration_flow=dict(type='str', aliases=['registrationFlow']), + remember_me=dict(type='bool', aliases=['rememberMe']), + reset_credentials_flow=dict(type='str', aliases=['resetCredentialsFlow']), + reset_password_allowed=dict(type='bool', aliases=['resetPasswordAllowed'], no_log=False), + revoke_refresh_token=dict(type='bool', aliases=['revokeRefreshToken']), + smtp_server=dict(type='dict', aliases=['smtpServer']), + ssl_required=dict(choices=["external", "all", "none"], aliases=['sslRequired']), + sso_session_idle_timeout=dict(type='int', aliases=['ssoSessionIdleTimeout']), + sso_session_idle_timeout_remember_me=dict(type='int', aliases=['ssoSessionIdleTimeoutRememberMe']), + sso_session_max_lifespan=dict(type='int', aliases=['ssoSessionMaxLifespan']), + sso_session_max_lifespan_remember_me=dict(type='int', aliases=['ssoSessionMaxLifespanRememberMe']), + supported_locales=dict(type='list', elements='str', aliases=['supportedLocales']), + user_managed_access_allowed=dict(type='bool', aliases=['userManagedAccessAllowed']), + verify_email=dict(type='bool', aliases=['verifyEmail']), + wait_increment_seconds=dict(type='int', aliases=['waitIncrementSeconds']), + client_session_idle_timeout=dict(type='int', aliases=['clientSessionIdleTimeout']), + client_session_max_lifespan=dict(type='int', aliases=['clientSessionMaxLifespan']), + client_offline_session_idle_timeout=dict(type='int', aliases=['clientOfflineSessionIdleTimeout']), + client_offline_session_max_lifespan=dict(type='int', aliases=['clientOfflineSessionMaxLifespan']), + oauth2_device_code_lifespan=dict(type='int', aliases=['oauth2DeviceCodeLifespan']), + oauth2_device_polling_interval=dict(type='int', aliases=['oauth2DevicePollingInterval']), + web_authn_policy_rp_entity_name=dict(type='str', aliases=['webAuthnPolicyRpEntityName']), + web_authn_policy_signature_algorithms=dict(type='list', elements='str', aliases=['webAuthnPolicySignatureAlgorithms']), + web_authn_policy_rp_id=dict(type='str', aliases=['webAuthnPolicyRpId']), + web_authn_policy_attestation_conveyance_preference=dict(type='str', aliases=['webAuthnPolicyAttestationConveyancePreference']), + web_authn_policy_authenticator_attachment=dict(type='str', aliases=['webAuthnPolicyAuthenticatorAttachment']), + web_authn_policy_require_resident_key=dict(type='str', aliases=['webAuthnPolicyRequireResidentKey'], no_log=False), + web_authn_policy_user_verification_requirement=dict(type='str', aliases=['webAuthnPolicyUserVerificationRequirement']), + web_authn_policy_create_timeout=dict(type='int', aliases=['webAuthnPolicyCreateTimeout']), + web_authn_policy_avoid_same_authenticator_register=dict(type='bool', aliases=['webAuthnPolicyAvoidSameAuthenticatorRegister']), + web_authn_policy_acceptable_aaguids=dict(type='list', elements='str', aliases=['webAuthnPolicyAcceptableAaguids']), + web_authn_policy_extra_origins=dict(type='list', elements='str', aliases=['webAuthnPolicyExtraOrigins']), + web_authn_policy_passwordless_rp_entity_name=dict(type='str', aliases=['webAuthnPolicyPasswordlessRpEntityName']), + web_authn_policy_passwordless_signature_algorithms=dict( + type='list', elements='str', aliases=['webAuthnPolicyPasswordlessSignatureAlgorithms'], no_log=False + ), + web_authn_policy_passwordless_rp_id=dict(type='str', aliases=['webAuthnPolicyPasswordlessRpId']), + web_authn_policy_passwordless_attestation_conveyance_preference=dict( + type='str', aliases=['webAuthnPolicyPasswordlessAttestationConveyancePreference'], no_log=False + ), + web_authn_policy_passwordless_authenticator_attachment=dict( + type='str', aliases=['webAuthnPolicyPasswordlessAuthenticatorAttachment'], no_log=False + ), + web_authn_policy_passwordless_require_resident_key=dict( + type='str', aliases=['webAuthnPolicyPasswordlessRequireResidentKey'], no_log=False + ), + web_authn_policy_passwordless_user_verification_requirement=dict( + type='str', aliases=['webAuthnPolicyPasswordlessUserVerificationRequirement'], no_log=False + ), + web_authn_policy_passwordless_create_timeout=dict(type='int', aliases=['webAuthnPolicyPasswordlessCreateTimeout']), + web_authn_policy_passwordless_avoid_same_authenticator_register=dict(type='bool', aliases=['webAuthnPolicyPasswordlessAvoidSameAuthenticatorRegister']), + web_authn_policy_passwordless_acceptable_aaguids=dict( + type='list', elements='str', aliases=['webAuthnPolicyPasswordlessAcceptableAaguids'], no_log=False + ), + web_authn_policy_passwordless_extra_origins=dict( + type='list', elements='str', aliases=['webAuthnPolicyPasswordlessExtraOrigins'], no_log=False + ), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'realm', 'enabled'], + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + + # convert module parameters to realm representation parameters (if they belong in there) + params_to_ignore = list(keycloak_argument_spec().keys()) + ['state'] + + # Filter and map the parameters names that apply to the role + realm_params = [x for x in module.params + if x not in params_to_ignore and + module.params.get(x) is not None] + + # See whether the realm already exists in Keycloak + before_realm = kc.get_realm_by_id(realm=realm) + + if before_realm is None: + before_realm = {} + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for realm_param in realm_params: + new_param_value = module.params.get(realm_param) + changeset[camel(realm_param)] = new_param_value + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_realm = before_realm.copy() + desired_realm.update(changeset) + + result['proposed'] = sanitize_cr(changeset) + before_realm_sanitized = sanitize_cr(before_realm) + result['existing'] = before_realm_sanitized + + # Cater for when it doesn't exist (an empty dict) + if not before_realm: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Realm does not exist, doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if module._diff: + result['diff'] = dict(before='', after=sanitize_cr(desired_realm)) + + if module.check_mode: + module.exit_json(**result) + + # create it + kc.create_realm(desired_realm) + after_realm = kc.get_realm_by_id(desired_realm['realm']) + + result['end_state'] = sanitize_cr(after_realm) + + result['msg'] = 'Realm %s has been created.' % desired_realm['realm'] + module.exit_json(**result) + + else: + if state == 'present': + # Process an update + + # doing an update + result['changed'] = True + if module.check_mode: + # We can only compare the current realm with the proposed updates we have + before_norm = normalise_cr(before_realm) + desired_norm = normalise_cr(desired_realm) + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_norm), + after=sanitize_cr(desired_norm)) + result['changed'] = (before_norm != desired_norm) + + module.exit_json(**result) + + # do the update + kc.update_realm(desired_realm, realm=realm) + + after_realm = kc.get_realm_by_id(realm=realm) + + if before_realm == after_realm: + result['changed'] = False + + result['end_state'] = sanitize_cr(after_realm) + + if module._diff: + result['diff'] = dict(before=before_realm_sanitized, + after=sanitize_cr(after_realm)) + + result['msg'] = 'Realm %s has been updated.' % desired_realm['realm'] + module.exit_json(**result) + + else: + # Process a deletion (because state was not 'present') + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_realm_sanitized, after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_realm(realm=realm) + + result['proposed'] = {} + result['end_state'] = {} + + result['msg'] = 'Realm %s has been deleted.' % before_realm['realm'] + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_realm_info.py b/plugins/modules/keycloak_realm_info.py new file mode 100644 index 0000000000..db16970046 --- /dev/null +++ b/plugins/modules/keycloak_realm_info.py @@ -0,0 +1,132 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_realm_info + +short_description: Allows obtaining Keycloak realm public information using Keycloak API + +version_added: 4.3.0 + +description: + - This module allows you to get Keycloak realm public information using the Keycloak REST API. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module + +options: + auth_keycloak_url: + description: + - URL to the Keycloak instance. + type: str + required: true + aliases: + - url + validate_certs: + description: + - Verify TLS certificates (do not disable this in production). + type: bool + default: true + + realm: + type: str + description: + - They Keycloak realm ID. + default: 'master' + +author: + - Fynn Chen (@fynncfchen) +""" + +EXAMPLES = r""" +- name: Get a Keycloak public key + community.general.keycloak_realm_info: + realm: MyCustomRealm + auth_keycloak_url: https://auth.example.com/auth + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +realm_info: + description: + - Representation of the realm public information. + returned: always + type: dict + contains: + realm: + description: Realm ID. + type: str + returned: always + sample: MyRealm + public_key: + description: Public key of the realm. + type: str + returned: always + sample: MIIBIjANBgkqhkiG9w0BAQEFAAO... + token-service: + description: Token endpoint URL. + type: str + returned: always + sample: https://auth.example.com/auth/realms/MyRealm/protocol/openid-connect + account-service: + description: Account console URL. + type: str + returned: always + sample: https://auth.example.com/auth/realms/MyRealm/account + tokens-not-before: + description: The token not before. + type: int + returned: always + sample: 0 +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = dict( + auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False), + validate_certs=dict(type='bool', default=True), + + realm=dict(default='master'), + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + result = dict(changed=False, msg='', realm_info='') + + kc = KeycloakAPI(module, {}) + + realm = module.params.get('realm') + + realm_info = kc.get_realm_info_by_id(realm=realm) + + result['realm_info'] = realm_info + result['msg'] = 'Get realm public info successful for ID {realm}'.format(realm=realm) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_realm_key.py b/plugins/modules/keycloak_realm_key.py new file mode 100644 index 0000000000..df9200016c --- /dev/null +++ b/plugins/modules/keycloak_realm_key.py @@ -0,0 +1,470 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or +# https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_realm_key + +short_description: Allows administration of Keycloak realm keys using Keycloak API + +version_added: 7.5.0 + +description: + - This module allows the administration of Keycloak realm keys using the Keycloak REST API. It requires access to the REST + API using OpenID Connect; the user connecting and the realm being used must have the requisite access rights. In a default + Keycloak installation, admin-cli and an admin user would work, as would a separate realm definition with the scope tailored + to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). Aliases are provided so camelCased versions can be used + as well. + - This module is unable to detect changes to the actual cryptographic key after importing it. However, if some other property + is changed alongside the cryptographic key, then the key also changes as a side-effect, as the JSON payload needs to include + the private key. This can be considered either a bug or a feature, as the alternative would be to always update the realm + key whether it has changed or not. +attributes: + check_mode: + support: full + diff_mode: + support: partial + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the keycloak realm key. + - On V(present), the realm key is created (or updated if it exists already). + - On V(absent), the realm key is removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + name: + description: + - Name of the realm key to create. + type: str + required: true + force: + description: + - Enforce the state of the private key and certificate. This is not automatically the case as this module is unable + to determine the current state of the private key and thus cannot trigger an update based on an actual divergence. + That said, a private key update may happen even if force is false as a side-effect of other changes. + default: false + type: bool + parent_id: + description: + - The parent_id of the realm key. In practice the name of the realm. + type: str + required: true + provider_id: + description: + - The name of the "provider ID" for the key. + - The value V(rsa-enc) has been added in community.general 8.2.0. + choices: ['rsa', 'rsa-enc'] + default: 'rsa' + type: str + config: + description: + - Dict specifying the key and its properties. + type: dict + suboptions: + active: + description: + - Whether they key is active or inactive. Not to be confused with the state of the Ansible resource managed by the + O(state) parameter. + default: true + type: bool + enabled: + description: + - Whether the key is enabled or disabled. Not to be confused with the state of the Ansible resource managed by the + O(state) parameter. + default: true + type: bool + priority: + description: + - The priority of the key. + type: int + required: true + algorithm: + description: + - Key algorithm. + - The values V(RS384), V(RS512), V(PS256), V(PS384), V(PS512), V(RSA1_5), V(RSA-OAEP), V(RSA-OAEP-256) have been + added in community.general 8.2.0. + default: RS256 + choices: ['RS256', 'RS384', 'RS512', 'PS256', 'PS384', 'PS512', 'RSA1_5', 'RSA-OAEP', 'RSA-OAEP-256'] + type: str + private_key: + description: + - The private key as an ASCII string. Contents of the key must match O(config.algorithm) and O(provider_id). + - Please note that the module cannot detect whether the private key specified differs from the current state's private + key. Use O(force=true) to force the module to update the private key if you expect it to be updated. + required: true + type: str + certificate: + description: + - A certificate signed with the private key as an ASCII string. Contents of the key must match O(config.algorithm) + and O(provider_id). + - If you want Keycloak to automatically generate a certificate using your private key then set this to an empty + string. + required: true + type: str +notes: + - Current value of the private key cannot be fetched from Keycloak. Therefore comparing its desired state to the current + state is not possible. + - If O(config.certificate) is not explicitly provided it is dynamically created by Keycloak. Therefore comparing the current + state of the certificate to the desired state (which may be empty) is not possible. + - Due to the private key and certificate options the module is B(not fully idempotent). You can use O(force=true) to force + the module to ensure updating if you know that the private key might have changed. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Samuli Seppänen (@mattock) +""" + +EXAMPLES = r""" +- name: Manage Keycloak realm key (certificate autogenerated by Keycloak) + community.general.keycloak_realm_key: + name: custom + state: present + parent_id: master + provider_id: rsa + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master + config: + private_key: "{{ private_key }}" + certificate: "" + enabled: true + active: true + priority: 120 + algorithm: RS256 +- name: Manage Keycloak realm key and certificate + community.general.keycloak_realm_key: + name: custom + state: present + parent_id: master + provider_id: rsa + auth_keycloak_url: http://localhost:8080/auth + auth_username: keycloak + auth_password: keycloak + auth_realm: master + config: + private_key: "{{ private_key }}" + certificate: "{{ certificate }}" + enabled: true + active: true + priority: 120 + algorithm: RS256 +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +end_state: + description: Representation of the keycloak_realm_key after module execution. + returned: on success + type: dict + contains: + id: + description: ID of the realm key. + type: str + returned: when O(state=present) + sample: 5b7ec13f-99da-46ad-8326-ab4c73cf4ce4 + name: + description: Name of the realm key. + type: str + returned: when O(state=present) + sample: mykey + parentId: + description: ID of the realm this key belongs to. + type: str + returned: when O(state=present) + sample: myrealm + providerId: + description: The ID of the key provider. + type: str + returned: when O(state=present) + sample: rsa + providerType: + description: The type of provider. + type: str + returned: when O(state=present) + config: + description: Realm key configuration. + type: dict + returned: when O(state=present) + sample: + { + "active": [ + "true" + ], + "algorithm": [ + "RS256" + ], + "enabled": [ + "true" + ], + "priority": [ + "140" + ] + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from urllib.parse import urlencode +from copy import deepcopy + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type='str', required=True), + force=dict(type='bool', default=False), + parent_id=dict(type='str', required=True), + provider_id=dict(type='str', default='rsa', choices=['rsa', 'rsa-enc']), + config=dict( + type='dict', + options=dict( + active=dict(type='bool', default=True), + enabled=dict(type='bool', default=True), + priority=dict(type='int', required=True), + algorithm=dict( + type="str", + default="RS256", + choices=[ + "RS256", + "RS384", + "RS512", + "PS256", + "PS384", + "PS512", + "RSA1_5", + "RSA-OAEP", + "RSA-OAEP-256", + ], + ), + private_key=dict(type='str', required=True, no_log=True), + certificate=dict(type='str', required=True) + ) + ) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + # Initialize the result object. Only "changed" seems to have special + # meaning for Ansible. + result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + + # This will include the current state of the realm key if it is already + # present. This is only used for diff-mode. + before_realm_key = {} + before_realm_key['config'] = {} + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + params_to_ignore = list(keycloak_argument_spec().keys()) + ["state", "force", "parent_id"] + + # Filter and map the parameters names that apply to the role + component_params = [x for x in module.params + if x not in params_to_ignore and + module.params.get(x) is not None] + + # We only support one component provider type in this module + provider_type = 'org.keycloak.keys.KeyProvider' + + # Build a proposed changeset from parameters given to this module + changeset = {} + changeset['config'] = {} + + # Generate a JSON payload for Keycloak Admin API from the module + # parameters. Parameters that do not belong to the JSON payload (e.g. + # "state" or "auth_keycloal_url") have been filtered away earlier (see + # above). + # + # This loop converts Ansible module parameters (snake-case) into + # Keycloak-compatible format (camel-case). For example private_key + # becomes privateKey. + # + # It also converts bool, str and int parameters into lists with a single + # entry of 'str' type. Bool values are also lowercased. This is required + # by Keycloak. + # + for component_param in component_params: + if component_param == 'config': + for config_param in module.params.get('config'): + changeset['config'][camel(config_param)] = [] + raw_value = module.params.get('config')[config_param] + if isinstance(raw_value, bool): + value = str(raw_value).lower() + else: + value = str(raw_value) + + changeset['config'][camel(config_param)].append(value) + else: + # No need for camelcase in here as these are one word parameters + new_param_value = module.params.get(component_param) + changeset[camel(component_param)] = new_param_value + + # As provider_type is not a module parameter we have to add it to the + # changeset explicitly. + changeset['providerType'] = provider_type + + # Make a deep copy of the changeset. This is use when determining + # changes to the current state. + changeset_copy = deepcopy(changeset) + + # It is not possible to compare current keys to desired keys, because the + # certificate parameter is a base64-encoded binary blob created on the fly + # when a key is added. Moreover, the Keycloak Admin API does not seem to + # return the value of the private key for comparison. So, in effect, it we + # just have to ignore changes to the keys. However, as the privateKey + # parameter needs be present in the JSON payload, any changes done to any + # other parameters (e.g. config.priority) will trigger update of the keys + # as a side-effect. + del changeset_copy['config']['privateKey'] + del changeset_copy['config']['certificate'] + + # Make it easier to refer to current module parameters + name = module.params.get('name') + force = module.params.get('force') + state = module.params.get('state') + enabled = module.params.get('enabled') + provider_id = module.params.get('provider_id') + parent_id = module.params.get('parent_id') + + # Get a list of all Keycloak components that are of keyprovider type. + realm_keys = kc.get_components(urlencode(dict(type=provider_type)), parent_id) + + # If this component is present get its key ID. Confusingly the key ID is + # also known as the Provider ID. + key_id = None + + # Track individual parameter changes + changes = "" + + # This tells Ansible whether the key was changed (added, removed, modified) + result['changed'] = False + + # Loop through the list of components. If we encounter a component whose + # name matches the value of the name parameter then assume the key is + # already present. + for key in realm_keys: + if key['name'] == name: + key_id = key['id'] + changeset['id'] = key_id + changeset_copy['id'] = key_id + + # Compare top-level parameters + for param, value in changeset.items(): + before_realm_key[param] = key[param] + + if changeset_copy[param] != key[param] and param != 'config': + changes += "%s: %s -> %s, " % (param, key[param], changeset_copy[param]) + result['changed'] = True + + # Compare parameters under the "config" key + for p, v in changeset_copy['config'].items(): + before_realm_key['config'][p] = key['config'][p] + if changeset_copy['config'][p] != key['config'][p]: + changes += "config.%s: %s -> %s, " % (p, key['config'][p], changeset_copy['config'][p]) + result['changed'] = True + + # Sanitize linefeeds for the privateKey. Without this the JSON payload + # will be invalid. + changeset['config']['privateKey'][0] = changeset['config']['privateKey'][0].replace('\\n', '\n') + changeset['config']['certificate'][0] = changeset['config']['certificate'][0].replace('\\n', '\n') + + # Check all the possible states of the resource and do what is needed to + # converge current state with desired state (create, update or delete + # the key). + if key_id and state == 'present': + if result['changed']: + if module._diff: + del before_realm_key['config']['privateKey'] + del before_realm_key['config']['certificate'] + result['diff'] = dict(before=before_realm_key, after=changeset_copy) + + if module.check_mode: + result['msg'] = "Realm key %s would be changed: %s" % (name, changes.strip(", ")) + else: + kc.update_component(changeset, parent_id) + result['msg'] = "Realm key %s changed: %s" % (name, changes.strip(", ")) + elif not result['changed'] and force: + kc.update_component(changeset, parent_id) + result['changed'] = True + result['msg'] = "Realm key %s was forcibly updated" % (name) + else: + result['msg'] = "Realm key %s was in sync" % (name) + + result['end_state'] = changeset_copy + elif key_id and state == 'absent': + if module._diff: + del before_realm_key['config']['privateKey'] + del before_realm_key['config']['certificate'] + result['diff'] = dict(before=before_realm_key, after={}) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Realm key %s would be deleted" % (name) + else: + kc.delete_component(key_id, parent_id) + result['changed'] = True + result['msg'] = "Realm key %s deleted" % (name) + + result['end_state'] = {} + elif not key_id and state == 'present': + if module._diff: + result['diff'] = dict(before={}, after=changeset_copy) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Realm key %s would be created" % (name) + else: + kc.create_component(changeset, parent_id) + result['changed'] = True + result['msg'] = "Realm key %s created" % (name) + + result['end_state'] = changeset_copy + elif not key_id and state == 'absent': + result['changed'] = False + result['msg'] = "Realm key %s not present" % (name) + result['end_state'] = {} + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_realm_keys_metadata_info.py b/plugins/modules/keycloak_realm_keys_metadata_info.py new file mode 100644 index 0000000000..71ce5acffa --- /dev/null +++ b/plugins/modules/keycloak_realm_keys_metadata_info.py @@ -0,0 +1,132 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: keycloak_realm_keys_metadata_info + +short_description: Allows obtaining Keycloak realm keys metadata using Keycloak API + +version_added: 9.3.0 + +description: + - This module allows you to get Keycloak realm keys metadata using the Keycloak REST API. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/latest/rest-api/index.html). +attributes: + action_group: + version_added: 10.2.0 + +options: + realm: + type: str + description: + - They Keycloak realm to fetch keys metadata. + default: 'master' + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + - community.general.attributes.info_module + +author: + - Thomas Bach (@thomasbach-dev) +""" + +EXAMPLES = r""" +- name: Fetch Keys metadata + community.general.keycloak_realm_keys_metadata_info: + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + realm: MyCustomRealm + delegate_to: localhost + register: keycloak_keys_metadata + +- name: Write the Keycloak keys certificate into a file + ansible.builtin.copy: + dest: /tmp/keycloak.cert + content: | + {{ keys_metadata['keycloak_keys_metadata']['keys'] + | selectattr('algorithm', 'equalto', 'RS256') + | map(attribute='certificate') + | first + }} + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + +keys_metadata: + description: + + - Representation of the realm keys metadata (see U(https://www.keycloak.org/docs-api/latest/rest-api/index.html#KeysMetadataRepresentation)). + returned: always + type: dict + contains: + active: + description: A mapping (that is, a dict) from key algorithms to UUIDs. + type: dict + returned: always + keys: + description: A list of dicts providing detailed information on the keys. + type: list + elements: dict + returned: always +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, KeycloakError, get_token, keycloak_argument_spec) + + +def main(): + argument_spec = keycloak_argument_spec() + + meta_args = dict( + realm=dict(default="master"), + ) + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg="", keys_metadata="") + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get("realm") + + keys_metadata = kc.get_realm_keys_metadata_by_id(realm=realm) + + result["keys_metadata"] = keys_metadata + result["msg"] = "Get realm keys metadata successful for ID {realm}".format( + realm=realm + ) + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/keycloak_realm_rolemapping.py b/plugins/modules/keycloak_realm_rolemapping.py new file mode 100644 index 0000000000..b8034a260b --- /dev/null +++ b/plugins/modules/keycloak_realm_rolemapping.py @@ -0,0 +1,380 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_realm_rolemapping + +short_description: Allows administration of Keycloak realm role mappings into groups with the Keycloak API + +version_added: 8.2.0 + +description: + - This module allows you to add, remove or modify Keycloak realm role mappings into groups with the Keycloak REST API. It + requires access to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite + access rights. In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client + definition with the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/18.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. + - When updating a group_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API + to translate the name into the role ID. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the realm_rolemapping. + - On C(present), the realm_rolemapping is created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the realm_rolemapping is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + type: str + description: + - They Keycloak realm under which this role_representation resides. + default: 'master' + + group_name: + type: str + description: + - Name of the group to be mapped. + - This parameter is required (can be replaced by gid for less API call). + parents: + type: list + description: + - List of parent groups for the group to handle sorted top to bottom. + - Set this if your group is a subgroup and you do not provide the GID in O(gid). + elements: dict + suboptions: + id: + type: str + description: + - Identify parent by ID. + - Needs less API calls than using O(parents[].name). + - A deep parent chain can be started at any point when first given parent is given as ID. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. + name: + type: str + description: + - Identify parent by name. + - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood. + - When giving a parent chain with only names it must be complete up to the top. + - Note that in principle both ID and name can be specified at the same time but current implementation only always + use just one of them, with ID being preferred. + gid: + type: str + description: + - ID of the group to be mapped. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. + roles: + description: + - Roles to be mapped to the group. + type: list + elements: dict + suboptions: + name: + type: str + description: + - Name of the role_representation. + - This parameter is required only when creating or updating the role_representation. + id: + type: str + description: + - The unique identifier for this role_representation. + - This parameter is not required for updating or deleting a role_representation but providing it reduces the number + of API calls required. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Gaëtan Daubresse (@Gaetan2907) + - Marius Huysamen (@mhuysamen) + - Alexander Groß (@agross) +""" + +EXAMPLES = r""" +- name: Map a client role to a group, authentication with credentials + community.general.keycloak_realm_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a group, authentication with token + community.general.keycloak_realm_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + state: present + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a subgroup, authentication with token + community.general.keycloak_realm_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + state: present + group_name: subgroup1 + parents: + - name: parent-group + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Unmap realm role from a group + community.general.keycloak_realm_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: absent + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Role role1 assigned to group group1." + +proposed: + description: Representation of proposed client role mapping. + returned: always + type: dict + sample: {"clientId": "test"} + +existing: + description: + - Representation of existing client role mapping. + - The sample is truncated. + returned: always + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } + +end_state: + description: + - Representation of client role mapping after module execution. + - The sample is truncated. + returned: on success + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import ( + KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError, +) +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + roles_spec = dict( + name=dict(type='str'), + id=dict(type='str'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + gid=dict(type='str'), + group_name=dict(type='str'), + parents=dict( + type='list', elements='dict', + options=dict( + id=dict(type='str'), + name=dict(type='str') + ), + ), + roles=dict(type='list', elements='dict', options=roles_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + gid = module.params.get('gid') + group_name = module.params.get('group_name') + roles = module.params.get('roles') + parents = module.params.get('parents') + + # Check the parameters + if gid is None and group_name is None: + module.fail_json(msg='Either the `group_name` or `gid` has to be specified.') + + # Get the potential missing parameters + if gid is None: + group_rep = kc.get_group_by_name(group_name, realm=realm, parents=parents) + if group_rep is not None: + gid = group_rep['id'] + else: + module.fail_json(msg='Could not fetch group %s:' % group_name) + else: + group_rep = kc.get_group_by_groupid(gid, realm=realm) + + if roles is None: + module.exit_json(msg="Nothing to do (no roles specified).") + else: + for role_index, role in enumerate(roles, start=0): + if role['name'] is None and role['id'] is None: + module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') + # Fetch missing role_id + if role['id'] is None: + role_rep = kc.get_realm_role(role['name'], realm=realm) + if role_rep is not None: + role['id'] = role_rep['id'] + else: + module.fail_json(msg='Could not fetch realm role %s by name:' % (role['name'])) + # Fetch missing role_name + else: + for realm_role in kc.get_realm_roles(realm=realm): + if realm_role['id'] == role['id']: + role['name'] = realm_role['name'] + break + + if role['name'] is None: + module.fail_json(msg='Could not fetch realm role %s by ID' % (role['id'])) + + assigned_roles_before = group_rep.get('realmRoles', []) + + result['existing'] = assigned_roles_before + result['proposed'] = list(assigned_roles_before) if assigned_roles_before else [] + + update_roles = [] + for role_index, role in enumerate(roles, start=0): + # Fetch roles to assign if state present + if state == 'present': + if any(assigned == role['name'] for assigned in assigned_roles_before): + pass + else: + update_roles.append({ + 'id': role['id'], + 'name': role['name'], + }) + result['proposed'].append(role['name']) + # Fetch roles to remove if state absent + else: + if any(assigned == role['name'] for assigned in assigned_roles_before): + update_roles.append({ + 'id': role['id'], + 'name': role['name'], + }) + if role['name'] in result['proposed']: # Handle double removal + result['proposed'].remove(role['name']) + + if len(update_roles): + result['changed'] = True + if module._diff: + result['diff'] = dict(before=assigned_roles_before, after=result['proposed']) + if module.check_mode: + module.exit_json(**result) + + if state == 'present': + # Assign roles + kc.add_group_realm_rolemapping(gid=gid, role_rep=update_roles, realm=realm) + result['msg'] = 'Realm roles %s assigned to groupId %s.' % (update_roles, gid) + else: + # Remove mapping of role + kc.delete_group_realm_rolemapping(gid=gid, role_rep=update_roles, realm=realm) + result['msg'] = 'Realm roles %s removed from groupId %s.' % (update_roles, gid) + + if gid is None: + assigned_roles_after = kc.get_group_by_name(group_name, realm=realm, parents=parents).get('realmRoles', []) + else: + assigned_roles_after = kc.get_group_by_groupid(gid, realm=realm).get('realmRoles', []) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + # Do nothing + else: + result['changed'] = False + result['msg'] = 'Nothing to do, roles %s are %s with group %s.' % (roles, 'mapped' if state == 'present' else 'not mapped', group_name) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/keycloak/keycloak_role.py b/plugins/modules/keycloak_role.py similarity index 52% rename from plugins/modules/identity/keycloak/keycloak_role.py rename to plugins/modules/keycloak_role.py index 2dd2438e42..1480965ab6 100644 --- a/plugins/modules/identity/keycloak/keycloak_role.py +++ b/plugins/modules/keycloak_role.py @@ -1,86 +1,121 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019, Adam Goossens -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: keycloak_role -short_description: Allows administration of Keycloak roles via Keycloak API +short_description: Allows administration of Keycloak roles using Keycloak API version_added: 3.4.0 description: - - This module allows you to add, remove or modify Keycloak roles via the Keycloak REST API. - It requires access to the REST API via OpenID Connect; the user connecting and the client being - used must have the requisite access rights. In a default Keycloak installation, admin-cli - and an admin user would work, as would a separate client definition with the scope tailored - to your needs and a user having the expected roles. - - - The names of module options are snake_cased versions of the camelCase ones found in the - Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). - - - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will - be returned that way by this module. You may pass single values for attributes when calling the module, - and this will be translated into a list suitable for the API. - + - This module allows you to add, remove or modify Keycloak roles using the Keycloak REST API. It requires access to the + REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. In + a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with the + scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 options: - state: - description: - - State of the role. - - On C(present), the role will be created if it does not yet exist, or updated with the parameters you provide. - - On C(absent), the role will be removed if it exists. - default: 'present' - type: str - choices: - - present - - absent + state: + description: + - State of the role. + - On V(present), the role is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the role is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent - name: + name: + type: str + required: true + description: + - Name of the role. + - This parameter is required. + description: + type: str + description: + - The role description. + realm: + type: str + description: + - The Keycloak realm under which this role resides. + default: 'master' + + client_id: + type: str + description: + - If the role is a client role, the client ID under which it resides. + - If this parameter is absent, the role is considered a realm role. + attributes: + type: dict + description: + - A dict of key/value pairs to set as custom attributes for the role. + - Values may be single values (for example a string) or a list of strings. + composite: + description: + - If V(true), the role is a composition of other realm and/or client role. + default: false + type: bool + version_added: 7.1.0 + composites: + description: + - List of roles to include to the composite realm role. + - If the composite role is a client role, the C(clientId) (not ID of the client) must be specified. + default: [] + type: list + elements: dict + version_added: 7.1.0 + suboptions: + name: + description: + - Name of the role. This can be the name of a REALM role or a client role. type: str required: true + client_id: description: - - Name of the role. - - This parameter is required. - - description: + - Client ID if the role is a client role. Do not include this option for a REALM role. + - Use the client ID you can see in the Keycloak console, not the technical ID of the client. type: str + required: false + aliases: + - clientId + state: description: - - The role description. - - realm: + - Create the composite if present, remove it if absent. type: str - description: - - The Keycloak realm under which this role resides. - default: 'master' - - client_id: - type: str - description: - - If the role is a client role, the client id under which it resides. - - If this parameter is absent, the role is considered a realm role. - - attributes: - type: dict - description: - - A dict of key/value pairs to set as custom attributes for the role. - - Values may be single values (e.g. a string) or a list of strings. + choices: + - present + - absent + default: present extends_documentation_fragment: -- community.general.keycloak - + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes author: - - Laurent Paumier (@laurpaum) -''' + - Laurent Paumier (@laurpaum) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a Keycloak realm role, authentication with credentials community.general.keycloak_role: name: my-new-kc-role @@ -136,64 +171,65 @@ EXAMPLES = ''' auth_password: PASSWORD name: my-new-role attributes: - attrib1: value1 - attrib2: value2 - attrib3: - - with - - numerous - - individual - - list - - items + attrib1: value1 + attrib2: value2 + attrib3: + - with + - numerous + - individual + - list + - items delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: Message as to what action was taken. - returned: always - type: str - sample: "Role myrole has been updated" + description: Message as to what action was taken. + returned: always + type: str + sample: "Role myrole has been updated" proposed: - description: Representation of proposed role. - returned: always - type: dict - sample: { - "description": "My updated test description" - } + description: Representation of proposed role. + returned: always + type: dict + sample: {"description": "My updated test description"} existing: - description: Representation of existing role. - returned: always - type: dict - sample: { - "attributes": {}, - "clientRole": true, - "composite": false, - "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", - "description": "My client test role", - "id": "561703dd-0f38-45ff-9a5a-0c978f794547", - "name": "myrole" + description: Representation of existing role. + returned: always + type: dict + sample: + { + "attributes": {}, + "clientRole": true, + "composite": false, + "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", + "description": "My client test role", + "id": "561703dd-0f38-45ff-9a5a-0c978f794547", + "name": "myrole" } end_state: - description: Representation of role after module execution (sample is truncated). - returned: on success - type: dict - sample: { - "attributes": {}, - "clientRole": true, - "composite": false, - "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", - "description": "My updated client test role", - "id": "561703dd-0f38-45ff-9a5a-0c978f794547", - "name": "myrole" + description: Representation of role after module execution (sample is truncated). + returned: on success + type: dict + sample: + { + "attributes": {}, + "clientRole": true, + "composite": false, + "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", + "description": "My updated client test role", + "id": "561703dd-0f38-45ff-9a5a-0c978f794547", + "name": "myrole" } -''' +""" from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ - keycloak_argument_spec, get_token, KeycloakError + keycloak_argument_spec, get_token, KeycloakError, is_struct_included from ansible.module_utils.basic import AnsibleModule +import copy def main(): @@ -204,6 +240,12 @@ def main(): """ argument_spec = keycloak_argument_spec() + composites_spec = dict( + name=dict(type='str', required=True), + client_id=dict(type='str', aliases=['clientId']), + state=dict(type='str', default='present', choices=['present', 'absent']) + ) + meta_args = dict( state=dict(type='str', default='present', choices=['present', 'absent']), name=dict(type='str', required=True), @@ -211,14 +253,18 @@ def main(): realm=dict(type='str', default='master'), client_id=dict(type='str'), attributes=dict(type='dict'), + composites=dict(type='list', default=[], options=composites_spec, elements='dict'), + composite=dict(type='bool', default=False), ) argument_spec.update(meta_args) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']])) + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) @@ -236,7 +282,7 @@ def main(): state = module.params.get('state') # attributes in Keycloak have their values returned as lists - # via the API. attributes is a dict, so we'll transparently convert + # using the API. attributes is a dict, so we'll transparently convert # the values to lists. if module.params.get('attributes') is not None: for key, val in module.params['attributes'].items(): @@ -244,7 +290,7 @@ def main(): # Filter and map the parameters names that apply to the role role_params = [x for x in module.params - if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'client_id', 'composites'] and + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'client_id'] and module.params.get(x) is not None] # See if it already exists in Keycloak @@ -263,10 +309,10 @@ def main(): new_param_value = module.params.get(param) old_value = before_role[param] if param in before_role else None if new_param_value != old_value: - changeset[camel(param)] = new_param_value + changeset[camel(param)] = copy.deepcopy(new_param_value) # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) - desired_role = before_role.copy() + desired_role = copy.deepcopy(before_role) desired_role.update(changeset) result['proposed'] = changeset @@ -303,6 +349,9 @@ def main(): kc.create_client_role(desired_role, clientid, realm) after_role = kc.get_client_role(name, clientid, realm) + if after_role['composite']: + after_role['composites'] = kc.get_role_composites(rolerep=after_role, clientid=clientid, realm=realm) + result['end_state'] = after_role result['msg'] = 'Role {name} has been created'.format(name=name) @@ -310,10 +359,25 @@ def main(): else: if state == 'present': + compare_exclude = ['clientId'] + if 'composites' in desired_role and isinstance(desired_role['composites'], list) and len(desired_role['composites']) > 0: + composites = kc.get_role_composites(rolerep=before_role, clientid=clientid, realm=realm) + before_role['composites'] = [] + for composite in composites: + before_composite = {} + if composite['clientRole']: + composite_client = kc.get_client_by_id(id=composite['containerId'], realm=realm) + before_composite['client_id'] = composite_client['clientId'] + else: + before_composite['client_id'] = None + before_composite['name'] = composite['name'] + before_composite['state'] = 'present' + before_role['composites'].append(before_composite) + else: + compare_exclude.append('composites') # Process an update - # no changes - if desired_role == before_role: + if is_struct_included(desired_role, before_role, exclude=compare_exclude): result['changed'] = False result['end_state'] = desired_role result['msg'] = "No changes required to role {name}.".format(name=name) @@ -335,6 +399,8 @@ def main(): else: kc.update_client_role(desired_role, clientid, realm) after_role = kc.get_client_role(name, clientid, realm) + if after_role['composite']: + after_role['composites'] = kc.get_role_composites(rolerep=after_role, clientid=clientid, realm=realm) result['end_state'] = after_role diff --git a/plugins/modules/keycloak_user.py b/plugins/modules/keycloak_user.py new file mode 100644 index 0000000000..8ff657e322 --- /dev/null +++ b/plugins/modules/keycloak_user.py @@ -0,0 +1,539 @@ +#!/usr/bin/python + +# Copyright (c) 2019, INSPQ (@elfelip) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: keycloak_user +short_description: Create and configure a user in Keycloak +description: + - This module creates, removes, or updates Keycloak users. +version_added: 7.1.0 +options: + auth_username: + aliases: [] + realm: + description: + - The name of the realm in which is the client. + default: master + type: str + username: + description: + - Username for the user. + required: true + type: str + id: + description: + - ID of the user on the Keycloak server if known. + type: str + enabled: + description: + - Enabled user. + type: bool + email_verified: + description: + - Check the validity of user email. + default: false + type: bool + aliases: + - emailVerified + first_name: + description: + - The user's first name. + required: false + type: str + aliases: + - firstName + last_name: + description: + - The user's last name. + required: false + type: str + aliases: + - lastName + email: + description: + - User email. + required: false + type: str + federation_link: + description: + - Federation Link. + required: false + type: str + aliases: + - federationLink + service_account_client_id: + description: + - Description of the client Application. + required: false + type: str + aliases: + - serviceAccountClientId + client_consents: + description: + - Client Authenticator Type. + type: list + elements: dict + default: [] + aliases: + - clientConsents + suboptions: + client_id: + description: + - Client ID of the client role. Not the technical ID of the client. + type: str + required: true + aliases: + - clientId + roles: + description: + - List of client roles to assign to the user. + type: list + required: true + elements: str + groups: + description: + - List of groups for the user. + - Groups can be referenced by their name, like V(staff), or their path, like V(/staff/engineering). The path syntax + allows you to reference subgroups, which is not possible otherwise. + - Using the path is possible since community.general 10.6.0. + type: list + elements: dict + default: [] + suboptions: + name: + description: + - Name of the group. + type: str + state: + description: + - Control whether the user must be member of this group or not. + choices: ["present", "absent"] + default: present + type: str + credentials: + description: + - User credentials. + default: [] + type: list + elements: dict + suboptions: + type: + description: + - Credential type. + type: str + required: true + value: + description: + - Value of the credential. + type: str + required: true + temporary: + description: + - If V(true), the users are required to reset their credentials at next login. + type: bool + default: false + required_actions: + description: + - RequiredActions user Auth. + default: [] + type: list + elements: str + aliases: + - requiredActions + federated_identities: + description: + - List of IDPs of user. + default: [] + type: list + elements: str + aliases: + - federatedIdentities + attributes: + description: + - List of user attributes. + required: false + type: list + elements: dict + suboptions: + name: + description: + - Name of the attribute. + type: str + values: + description: + - Values for the attribute as list. + type: list + elements: str + state: + description: + - Control whether the attribute must exists or not. + choices: ["present", "absent"] + default: present + type: str + access: + description: + - List user access. + required: false + type: dict + disableable_credential_types: + description: + - List user Credential Type. + default: [] + type: list + elements: str + aliases: + - disableableCredentialTypes + origin: + description: + - User origin. + required: false + type: str + self: + description: + - User self administration. + required: false + type: str + state: + description: + - Control whether the user should exists or not. + choices: ["present", "absent"] + default: present + type: str + force: + description: + - If V(true), allows to remove user and recreate it. + type: bool + default: false +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 +notes: + - The module does not modify the user ID of an existing user. +author: + - Philippe Gauthier (@elfelip) +""" + +EXAMPLES = r""" +- name: Create a user user1 + community.general.keycloak_user: + auth_keycloak_url: http://localhost:8080/auth + auth_username: admin + auth_password: password + realm: master + username: user1 + firstName: user1 + lastName: user1 + email: user1 + enabled: true + emailVerified: false + credentials: + - type: password + value: password + temporary: false + attributes: + - name: attr1 + values: + - value1 + state: present + - name: attr2 + values: + - value2 + state: absent + groups: + - name: group1 + state: present + state: present + +- name: Re-create a User + community.general.keycloak_user: + auth_keycloak_url: http://localhost:8080/auth + auth_username: admin + auth_password: password + realm: master + username: user1 + firstName: user1 + lastName: user1 + email: user1 + enabled: true + emailVerified: false + credentials: + - type: password + value: password + temporary: false + attributes: + - name: attr1 + values: + - value1 + state: present + - name: attr2 + values: + - value2 + state: absent + groups: + - name: group1 + state: present + state: present + +- name: Re-create a User + community.general.keycloak_user: + auth_keycloak_url: http://localhost:8080/auth + auth_username: admin + auth_password: password + realm: master + username: user1 + firstName: user1 + lastName: user1 + email: user1 + enabled: true + emailVerified: false + credentials: + - type: password + value: password + temporary: false + attributes: + - name: attr1 + values: + - value1 + state: present + - name: attr2 + values: + - value2 + state: absent + groups: + - name: group1 + state: present + state: present + force: true + +- name: Remove User + community.general.keycloak_user: + auth_keycloak_url: http://localhost:8080/auth + auth_username: admin + auth_password: password + realm: master + username: user1 + state: absent +""" + +RETURN = r""" +proposed: + description: Representation of the proposed user. + returned: on success + type: dict +existing: + description: Representation of the existing user. + returned: on success + type: dict +end_state: + description: Representation of the user after module execution. + returned: on success + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible.module_utils.basic import AnsibleModule +import copy + + +def main(): + argument_spec = keycloak_argument_spec() + argument_spec['auth_username']['aliases'] = [] + credential_spec = dict( + type=dict(type='str', required=True), + value=dict(type='str', required=True), + temporary=dict(type='bool', default=False) + ) + client_consents_spec = dict( + client_id=dict(type='str', required=True, aliases=['clientId']), + roles=dict(type='list', elements='str', required=True) + ) + attributes_spec = dict( + name=dict(type='str'), + values=dict(type='list', elements='str'), + state=dict(type='str', choices=['present', 'absent'], default='present') + ) + groups_spec = dict( + name=dict(type='str'), + state=dict(type='str', choices=['present', 'absent'], default='present') + ) + meta_args = dict( + realm=dict(type='str', default='master'), + self=dict(type='str'), + id=dict(type='str'), + username=dict(type='str', required=True), + first_name=dict(type='str', aliases=['firstName']), + last_name=dict(type='str', aliases=['lastName']), + email=dict(type='str'), + enabled=dict(type='bool'), + email_verified=dict(type='bool', default=False, aliases=['emailVerified']), + federation_link=dict(type='str', aliases=['federationLink']), + service_account_client_id=dict(type='str', aliases=['serviceAccountClientId']), + attributes=dict(type='list', elements='dict', options=attributes_spec), + access=dict(type='dict'), + groups=dict(type='list', default=[], elements='dict', options=groups_spec), + disableable_credential_types=dict(type='list', default=[], aliases=['disableableCredentialTypes'], elements='str'), + required_actions=dict(type='list', default=[], aliases=['requiredActions'], elements='str'), + credentials=dict(type='list', default=[], elements='dict', options=credential_spec), + federated_identities=dict(type='list', default=[], aliases=['federatedIdentities'], elements='str'), + client_consents=dict(type='list', default=[], aliases=['clientConsents'], elements='dict', options=client_consents_spec), + origin=dict(type='str'), + state=dict(choices=["absent", "present"], default='present'), + force=dict(type='bool', default=False), + ) + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + force = module.params.get('force') + username = module.params.get('username') + groups = module.params.get('groups') + + # Filter and map the parameters names that apply to the user + user_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'force', 'groups'] and + module.params.get(x) is not None] + + before_user = kc.get_user_by_username(username=username, realm=realm) + + if before_user is None: + before_user = {} + + changeset = {} + + for param in user_params: + new_param_value = module.params.get(param) + if param == 'attributes' and param in before_user: + old_value = kc.convert_keycloak_user_attributes_dict_to_module_list(attributes=before_user['attributes']) + else: + old_value = before_user[param] if param in before_user else None + if new_param_value != old_value: + if old_value is not None and param == 'attributes': + for old_attribute in old_value: + old_attribute_found = False + for new_attribute in new_param_value: + if new_attribute['name'] == old_attribute['name']: + old_attribute_found = True + if not old_attribute_found: + new_param_value.append(copy.deepcopy(old_attribute)) + if isinstance(new_param_value, dict): + changeset[camel(param)] = copy.deepcopy(new_param_value) + else: + changeset[camel(param)] = new_param_value + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_user = copy.deepcopy(before_user) + desired_user.update(changeset) + + result['proposed'] = changeset + result['existing'] = before_user + + changed = False + + # Cater for when it doesn't exist (an empty dict) + if state == 'absent': + if not before_user: + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'Role does not exist, doing nothing.' + module.exit_json(**result) + else: + # Delete user + kc.delete_user(user_id=before_user['id'], realm=realm) + result["msg"] = 'User %s deleted' % (before_user['username']) + changed = True + + else: + after_user = {} + if force and before_user: # If the force option is set to true + # Delete the existing user + kc.delete_user(user_id=before_user["id"], realm=realm) + + if not before_user or force: + # Process a creation + changed = True + + if username is None: + module.fail_json(msg='username must be specified when creating a new user') + + if module._diff: + result['diff'] = dict(before='', after=desired_user) + + if module.check_mode: + module.exit_json(**result) + # Create the user + after_user = kc.create_user(userrep=desired_user, realm=realm) + result["msg"] = 'User %s created' % (desired_user['username']) + # Add user ID to new representation + desired_user['id'] = after_user["id"] + else: + excludes = [ + "access", + "notBefore", + "createdTimestamp", + "totp", + "credentials", + "disableableCredentialTypes", + "groups", + "clientConsents", + "federatedIdentities", + "requiredActions"] + # Add user ID to new representation + desired_user['id'] = before_user["id"] + + # Compare users + if not (is_struct_included(desired_user, before_user, excludes)): # If the new user does not introduce a change to the existing user + # Update the user + after_user = kc.update_user(userrep=desired_user, realm=realm) + changed = True + + # set user groups + if kc.update_user_groups_membership(userrep=desired_user, groups=groups, realm=realm): + changed = True + # Get the user groups + after_user["groups"] = kc.get_user_groups(user_id=desired_user["id"], realm=realm) + result["end_state"] = after_user + if changed: + result["msg"] = 'User %s updated' % (desired_user['username']) + else: + result["msg"] = 'No changes made for user %s' % (desired_user['username']) + + result['changed'] = changed + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py new file mode 100644 index 0000000000..c856e31d29 --- /dev/null +++ b/plugins/modules/keycloak_user_federation.py @@ -0,0 +1,1102 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_user_federation + +short_description: Allows administration of Keycloak user federations using Keycloak API + +version_added: 3.7.0 + +description: + - This module allows you to add, remove or modify Keycloak user federations using the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html). +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the user federation. + - On V(present), the user federation is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the user federation is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + description: + - The Keycloak realm under which this user federation resides. + default: 'master' + type: str + + id: + description: + - The unique ID for this user federation. If left empty, the user federation is searched by its O(name). + type: str + + name: + description: + - Display name of provider when linked in admin console. + type: str + + provider_id: + description: + - Provider for this user federation. Built-in providers are V(ldap), V(kerberos), and V(sssd). Custom user storage providers + can also be used. + aliases: + - providerId + type: str + + provider_type: + description: + - Component type for user federation (only supported value is V(org.keycloak.storage.UserStorageProvider)). + aliases: + - providerType + default: org.keycloak.storage.UserStorageProvider + type: str + + parent_id: + description: + - Unique ID for the parent of this user federation. Realm ID is automatically used if left blank. + aliases: + - parentId + type: str + + remove_unspecified_mappers: + description: + - Remove mappers that are not specified in the configuration for this federation. + - Set to V(false) to keep mappers that are not listed in O(mappers). + type: bool + default: true + version_added: 9.4.0 + + bind_credential_update_mode: + description: + - The value of the config parameter O(config.bindCredential) is redacted in the Keycloak responses. Comparing the redacted + value with the desired value always evaluates to not equal. This means the before and desired states are never equal + if the parameter is set. + - Set to V(always) to include O(config.bindCredential) in the comparison of before and desired state. Because of the + redacted value returned by Keycloak the module always detects a change and make an update if a O(config.bindCredential) + value is set. + - Set to V(only_indirect) to exclude O(config.bindCredential) when comparing the before state with the desired state. + The value of O(config.bindCredential) is only updated if there are other changes to the user federation that require + an update. + type: str + default: always + choices: + - always + - only_indirect + version_added: 9.5.0 + + config: + description: + - Dict specifying the configuration options for the provider; the contents differ depending on the value of O(provider_id). + Examples are given below for V(ldap), V(kerberos) and V(sssd). It is easiest to obtain valid config values by dumping + an already-existing user federation configuration through check-mode in the RV(existing) field. + - The value V(sssd) has been supported since community.general 4.2.0. + type: dict + suboptions: + enabled: + description: + - Enable/disable this user federation. + default: true + type: bool + + priority: + description: + - Priority of provider when doing a user lookup. Lowest first. + default: 0 + type: int + + importEnabled: + description: + - If V(true), LDAP users are imported into Keycloak DB and synced by the configured sync policies. + default: true + type: bool + + editMode: + description: + - V(READ_ONLY) is a read-only LDAP store. V(WRITABLE) means data is synced back to LDAP on demand. V(UNSYNCED) means + user data is imported, but not synced back to LDAP. + type: str + choices: + - READ_ONLY + - WRITABLE + - UNSYNCED + + syncRegistrations: + description: + - Should newly created users be created within LDAP store? Priority effects which provider is chosen to sync the + new user. + default: false + type: bool + + vendor: + description: + - LDAP vendor (provider). + - Use short name. For instance, write V(rhds) for "Red Hat Directory Server". + type: str + + usernameLDAPAttribute: + description: + - Name of LDAP attribute, which is mapped as Keycloak username. For many LDAP server vendors it can be V(uid). For + Active directory it can be V(sAMAccountName) or V(cn). The attribute should be filled for all LDAP user records + you want to import from LDAP to Keycloak. + type: str + + rdnLDAPAttribute: + description: + - Name of LDAP attribute, which is used as RDN (top attribute) of typical user DN. Usually it is the same as Username + LDAP attribute, however it is not required. For example for Active directory, it is common to use V(cn) as RDN + attribute when username attribute might be V(sAMAccountName). + type: str + + uuidLDAPAttribute: + description: + - Name of LDAP attribute, which is used as unique object identifier (UUID) for objects in LDAP. For many LDAP server + vendors, it is V(entryUUID); however some are different. For example for Active directory it should be V(objectGUID). + If your LDAP server does not support the notion of UUID, you can use any other attribute that is supposed to be + unique among LDAP users in tree. + type: str + + userObjectClasses: + description: + - All values of LDAP objectClass attribute for users in LDAP divided by comma. For example V(inetOrgPerson, organizationalPerson). + Newly created Keycloak users are written to LDAP with all those object classes and existing LDAP user records + are found just if they contain all those object classes. + type: str + + connectionUrl: + description: + - Connection URL to your LDAP server. + type: str + + usersDn: + description: + - Full DN of LDAP tree where your users are. This DN is the parent of LDAP users. + type: str + + customUserSearchFilter: + description: + - Additional LDAP Filter for filtering searched users. Leave this empty if you do not need additional filter. + type: str + + searchScope: + description: + - For one level, the search applies only for users in the DNs specified by User DNs. For subtree, the search applies + to the whole subtree. See LDAP documentation for more details. + default: '1' + type: str + choices: + - '1' + - '2' + + authType: + description: + - Type of the Authentication method used during LDAP Bind operation. It is used in most of the requests sent to + the LDAP server. + default: 'none' + type: str + choices: + - none + - simple + + bindDn: + description: + - DN of LDAP user which is used by Keycloak to access LDAP server. + type: str + + bindCredential: + description: + - Password of LDAP admin. + type: str + + startTls: + description: + - Encrypts the connection to LDAP using STARTTLS, which disables connection pooling. + default: false + type: bool + + usePasswordModifyExtendedOp: + description: + - Use the LDAPv3 Password Modify Extended Operation (RFC-3062). The password modify extended operation usually requires + that LDAP user already has password in the LDAP server. So when this is used with 'Sync Registrations', it can + be good to add also 'Hardcoded LDAP attribute mapper' with randomly generated initial password. + default: false + type: bool + + validatePasswordPolicy: + description: + - Determines if Keycloak should validate the password with the realm password policy before updating it. + default: false + type: bool + + trustEmail: + description: + - If enabled, email provided by this provider is not verified even if verification is enabled for the realm. + default: false + type: bool + + useTruststoreSpi: + description: + - Specifies whether LDAP connection uses the truststore SPI with the truststore configured in standalone.xml/domain.xml. + V(always) means that it always uses it. V(never) means that it does not use it. V(ldapsOnly) means that it uses + if your connection URL use ldaps. + - Note even if standalone.xml/domain.xml is not configured, the default Java cacerts or certificate specified by + C(javax.net.ssl.trustStore) property is used. + default: ldapsOnly + type: str + choices: + - always + - ldapsOnly + - never + + connectionTimeout: + description: + - LDAP Connection Timeout in milliseconds. + type: int + + readTimeout: + description: + - LDAP Read Timeout in milliseconds. This timeout applies for LDAP read operations. + type: int + + pagination: + description: + - Does the LDAP server support pagination. + default: true + type: bool + + connectionPooling: + description: + - Determines if Keycloak should use connection pooling for accessing LDAP server. + default: true + type: bool + + connectionPoolingAuthentication: + description: + - A list of space-separated authentication types of connections that may be pooled. + type: str + choices: + - none + - simple + - DIGEST-MD5 + + connectionPoolingDebug: + description: + - A string that indicates the level of debug output to produce. Example valid values are V(fine) (trace connection + creation and removal) and V(all) (all debugging information). + type: str + + connectionPoolingInitSize: + description: + - The number of connections per connection identity to create when initially creating a connection for the identity. + type: int + + connectionPoolingMaxSize: + description: + - The maximum number of connections per connection identity that can be maintained concurrently. + type: int + + connectionPoolingPrefSize: + description: + - The preferred number of connections per connection identity that should be maintained concurrently. + type: int + + connectionPoolingProtocol: + description: + - A list of space-separated protocol types of connections that may be pooled. Valid types are V(plain) and V(ssl). + type: str + + connectionPoolingTimeout: + description: + - The number of milliseconds that an idle connection may remain in the pool without being closed and removed from + the pool. + type: int + + allowKerberosAuthentication: + description: + - Enable/disable HTTP authentication of users with SPNEGO/Kerberos tokens. The data about authenticated users is + provisioned from this LDAP server. + default: false + type: bool + + kerberosRealm: + description: + - Name of kerberos realm. + type: str + + krbPrincipalAttribute: + description: + - Name of the LDAP attribute, which refers to Kerberos principal. This is used to lookup appropriate LDAP user after + successful Kerberos/SPNEGO authentication in Keycloak. When this is empty, the LDAP user is looked up based on + LDAP username corresponding to the first part of his Kerberos principal. For instance, for principal C(john@KEYCLOAK.ORG), + it assumes that LDAP username is V(john). + type: str + version_added: 8.1.0 + + serverPrincipal: + description: + - Full name of server principal for HTTP service including server and domain name. For example V(HTTP/host.foo.org@FOO.ORG). + Use V(*) to accept any service principal in the KeyTab file. + type: str + + keyTab: + description: + - Location of Kerberos KeyTab file containing the credentials of server principal. For example V(/etc/krb5.keytab). + type: str + + debug: + description: + - Enable/disable debug logging to standard output for Krb5LoginModule. + type: bool + + useKerberosForPasswordAuthentication: + description: + - Use Kerberos login module for authenticate username/password against Kerberos server instead of authenticating + against LDAP server with Directory Service API. + default: false + type: bool + + allowPasswordAuthentication: + description: + - Enable/disable possibility of username/password authentication against Kerberos database. + type: bool + + batchSizeForSync: + description: + - Count of LDAP users to be imported from LDAP to Keycloak within a single transaction. + default: 1000 + type: int + + fullSyncPeriod: + description: + - Period for full synchronization in seconds. + default: -1 + type: int + + changedSyncPeriod: + description: + - Period for synchronization of changed or newly created LDAP users in seconds. + default: -1 + type: int + + updateProfileFirstLogin: + description: + - Update profile on first login. + type: bool + + cachePolicy: + description: + - Cache Policy for this storage provider. + type: str + default: 'DEFAULT' + choices: + - DEFAULT + - EVICT_DAILY + - EVICT_WEEKLY + - MAX_LIFESPAN + - NO_CACHE + + evictionDay: + description: + - Day of the week the entry is set to become invalid on. + type: str + + evictionHour: + description: + - Hour of day the entry is set to become invalid on. + type: str + + evictionMinute: + description: + - Minute of day the entry is set to become invalid on. + type: str + + maxLifespan: + description: + - Max lifespan of cache entry in milliseconds. + type: int + + referral: + description: + - Specifies if LDAP referrals should be followed or ignored. Please note that enabling referrals can slow down authentication + as it allows the LDAP server to decide which other LDAP servers to use. This could potentially include untrusted + servers. + type: str + choices: + - ignore + - follow + version_added: 9.5.0 + + mappers: + description: + - A list of dicts defining mappers associated with this Identity Provider. + type: list + elements: dict + suboptions: + id: + description: + - Unique ID of this mapper. + type: str + + name: + description: + - Name of the mapper. If no ID is given, the mapper is searched by name. + type: str + + parentId: + description: + - Unique ID for the parent of this mapper. ID of the user federation is automatically used if left blank. + type: str + + providerId: + description: + - The mapper type for this mapper (for instance V(user-attribute-ldap-mapper)). + type: str + + providerType: + description: + - Component type for this mapper. + type: str + default: org.keycloak.storage.ldap.mappers.LDAPStorageMapper + + config: + description: + - Dict specifying the configuration options for the mapper; the contents differ depending on the value of I(identityProviderMapper). + type: dict + +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Laurent Paumier (@laurpaum) +""" + +EXAMPLES = r""" +- name: Create LDAP user federation + community.general.keycloak_user_federation: + auth_keycloak_url: https://keycloak.example.com/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: my-realm + name: my-ldap + state: present + provider_id: ldap + provider_type: org.keycloak.storage.UserStorageProvider + config: + priority: 0 + enabled: true + cachePolicy: DEFAULT + batchSizeForSync: 1000 + editMode: READ_ONLY + importEnabled: true + syncRegistrations: false + vendor: other + usernameLDAPAttribute: uid + rdnLDAPAttribute: uid + uuidLDAPAttribute: entryUUID + userObjectClasses: inetOrgPerson, organizationalPerson + connectionUrl: ldaps://ldap.example.com:636 + usersDn: ou=Users,dc=example,dc=com + authType: simple + bindDn: cn=directory reader + bindCredential: password + searchScope: 1 + validatePasswordPolicy: false + trustEmail: false + useTruststoreSpi: ldapsOnly + connectionPooling: true + pagination: true + allowKerberosAuthentication: false + debug: false + useKerberosForPasswordAuthentication: false + mappers: + - name: "full name" + providerId: "full-name-ldap-mapper" + providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" + config: + ldap.full.name.attribute: cn + read.only: true + write.only: false + +- name: Create Kerberos user federation + community.general.keycloak_user_federation: + auth_keycloak_url: https://keycloak.example.com/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: my-realm + name: my-kerberos + state: present + provider_id: kerberos + provider_type: org.keycloak.storage.UserStorageProvider + config: + priority: 0 + enabled: true + cachePolicy: DEFAULT + kerberosRealm: EXAMPLE.COM + serverPrincipal: HTTP/host.example.com@EXAMPLE.COM + keyTab: keytab + allowPasswordAuthentication: false + updateProfileFirstLogin: false + +- name: Create sssd user federation + community.general.keycloak_user_federation: + auth_keycloak_url: https://keycloak.example.com/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: my-realm + name: my-sssd + state: present + provider_id: sssd + provider_type: org.keycloak.storage.UserStorageProvider + config: + priority: 0 + enabled: true + cachePolicy: DEFAULT + +- name: Delete user federation + community.general.keycloak_user_federation: + auth_keycloak_url: https://keycloak.example.com/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: my-realm + name: my-federation + state: absent +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "No changes required to user federation 164bb483-c613-482e-80fe-7f1431308799." + +proposed: + description: Representation of proposed user federation. + returned: always + type: dict + sample: + { + "config": { + "allowKerberosAuthentication": "false", + "authType": "simple", + "batchSizeForSync": "1000", + "bindCredential": "**********", + "bindDn": "cn=directory reader", + "cachePolicy": "DEFAULT", + "connectionPooling": "true", + "connectionUrl": "ldaps://ldap.example.com:636", + "debug": "false", + "editMode": "READ_ONLY", + "enabled": "true", + "importEnabled": "true", + "pagination": "true", + "priority": "0", + "rdnLDAPAttribute": "uid", + "searchScope": "1", + "syncRegistrations": "false", + "trustEmail": "false", + "useKerberosForPasswordAuthentication": "false", + "useTruststoreSpi": "ldapsOnly", + "userObjectClasses": "inetOrgPerson, organizationalPerson", + "usernameLDAPAttribute": "uid", + "usersDn": "ou=Users,dc=example,dc=com", + "uuidLDAPAttribute": "entryUUID", + "validatePasswordPolicy": "false", + "vendor": "other" + }, + "name": "ldap", + "providerId": "ldap", + "providerType": "org.keycloak.storage.UserStorageProvider" + } + +existing: + description: Representation of existing user federation. + returned: always + type: dict + sample: + { + "config": { + "allowKerberosAuthentication": "false", + "authType": "simple", + "batchSizeForSync": "1000", + "bindCredential": "**********", + "bindDn": "cn=directory reader", + "cachePolicy": "DEFAULT", + "changedSyncPeriod": "-1", + "connectionPooling": "true", + "connectionUrl": "ldaps://ldap.example.com:636", + "debug": "false", + "editMode": "READ_ONLY", + "enabled": "true", + "fullSyncPeriod": "-1", + "importEnabled": "true", + "pagination": "true", + "priority": "0", + "rdnLDAPAttribute": "uid", + "searchScope": "1", + "syncRegistrations": "false", + "trustEmail": "false", + "useKerberosForPasswordAuthentication": "false", + "useTruststoreSpi": "ldapsOnly", + "userObjectClasses": "inetOrgPerson, organizationalPerson", + "usernameLDAPAttribute": "uid", + "usersDn": "ou=Users,dc=example,dc=com", + "uuidLDAPAttribute": "entryUUID", + "validatePasswordPolicy": "false", + "vendor": "other" + }, + "id": "01122837-9047-4ae4-8ca0-6e2e891a765f", + "mappers": [ + { + "config": { + "always.read.value.from.ldap": "false", + "is.mandatory.in.ldap": "false", + "ldap.attribute": "mail", + "read.only": "true", + "user.model.attribute": "email" + }, + "id": "17d60ce2-2d44-4c2c-8b1f-1fba601b9a9f", + "name": "email", + "parentId": "01122837-9047-4ae4-8ca0-6e2e891a765f", + "providerId": "user-attribute-ldap-mapper", + "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" + } + ], + "name": "myfed", + "parentId": "myrealm", + "providerId": "ldap", + "providerType": "org.keycloak.storage.UserStorageProvider" + } + +end_state: + description: Representation of user federation after module execution. + returned: on success + type: dict + sample: + { + "config": { + "allowPasswordAuthentication": "false", + "cachePolicy": "DEFAULT", + "enabled": "true", + "kerberosRealm": "EXAMPLE.COM", + "keyTab": "/etc/krb5.keytab", + "priority": "0", + "serverPrincipal": "HTTP/host.example.com@EXAMPLE.COM", + "updateProfileFirstLogin": "false" + }, + "id": "cf52ae4f-4471-4435-a0cf-bb620cadc122", + "mappers": [], + "name": "kerberos", + "parentId": "myrealm", + "providerId": "kerberos", + "providerType": "org.keycloak.storage.UserStorageProvider" + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from urllib.parse import urlencode +from copy import deepcopy + + +def normalize_kc_comp(comp): + if 'config' in comp: + # kc completely removes the parameter `krbPrincipalAttribute` if it is set to `''`; the unset kc parameter is equivalent to `''`; + # to make change detection and diff more accurate we set it again in the kc responses + if 'krbPrincipalAttribute' not in comp['config']: + comp['config']['krbPrincipalAttribute'] = [''] + + # kc stores a timestamp of the last sync in `lastSync` to time the periodic sync, it is removed to minimize diff/changes + comp['config'].pop('lastSync', None) + + +def sanitize(comp): + compcopy = deepcopy(comp) + if 'config' in compcopy: + compcopy['config'] = {k: v[0] for k, v in compcopy['config'].items()} + if 'bindCredential' in compcopy['config']: + compcopy['config']['bindCredential'] = '**********' + if 'mappers' in compcopy: + for mapper in compcopy['mappers']: + if 'config' in mapper: + mapper['config'] = {k: v[0] for k, v in mapper['config'].items()} + return compcopy + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + config_spec = dict( + allowKerberosAuthentication=dict(type='bool', default=False), + allowPasswordAuthentication=dict(type='bool'), + authType=dict(type='str', choices=['none', 'simple'], default='none'), + batchSizeForSync=dict(type='int', default=1000), + bindCredential=dict(type='str', no_log=True), + bindDn=dict(type='str'), + cachePolicy=dict(type='str', choices=['DEFAULT', 'EVICT_DAILY', 'EVICT_WEEKLY', 'MAX_LIFESPAN', 'NO_CACHE'], default='DEFAULT'), + changedSyncPeriod=dict(type='int', default=-1), + connectionPooling=dict(type='bool', default=True), + connectionPoolingAuthentication=dict(type='str', choices=['none', 'simple', 'DIGEST-MD5']), + connectionPoolingDebug=dict(type='str'), + connectionPoolingInitSize=dict(type='int'), + connectionPoolingMaxSize=dict(type='int'), + connectionPoolingPrefSize=dict(type='int'), + connectionPoolingProtocol=dict(type='str'), + connectionPoolingTimeout=dict(type='int'), + connectionTimeout=dict(type='int'), + connectionUrl=dict(type='str'), + customUserSearchFilter=dict(type='str'), + debug=dict(type='bool'), + editMode=dict(type='str', choices=['READ_ONLY', 'WRITABLE', 'UNSYNCED']), + enabled=dict(type='bool', default=True), + evictionDay=dict(type='str'), + evictionHour=dict(type='str'), + evictionMinute=dict(type='str'), + fullSyncPeriod=dict(type='int', default=-1), + importEnabled=dict(type='bool', default=True), + kerberosRealm=dict(type='str'), + keyTab=dict(type='str', no_log=False), + maxLifespan=dict(type='int'), + pagination=dict(type='bool', default=True), + priority=dict(type='int', default=0), + rdnLDAPAttribute=dict(type='str'), + readTimeout=dict(type='int'), + referral=dict(type='str', choices=['ignore', 'follow']), + searchScope=dict(type='str', choices=['1', '2'], default='1'), + serverPrincipal=dict(type='str'), + krbPrincipalAttribute=dict(type='str'), + startTls=dict(type='bool', default=False), + syncRegistrations=dict(type='bool', default=False), + trustEmail=dict(type='bool', default=False), + updateProfileFirstLogin=dict(type='bool'), + useKerberosForPasswordAuthentication=dict(type='bool', default=False), + usePasswordModifyExtendedOp=dict(type='bool', default=False, no_log=False), + useTruststoreSpi=dict(type='str', choices=['always', 'ldapsOnly', 'never'], default='ldapsOnly'), + userObjectClasses=dict(type='str'), + usernameLDAPAttribute=dict(type='str'), + usersDn=dict(type='str'), + uuidLDAPAttribute=dict(type='str'), + validatePasswordPolicy=dict(type='bool', default=False), + vendor=dict(type='str'), + ) + + mapper_spec = dict( + id=dict(type='str'), + name=dict(type='str'), + parentId=dict(type='str'), + providerId=dict(type='str'), + providerType=dict(type='str', default='org.keycloak.storage.ldap.mappers.LDAPStorageMapper'), + config=dict(type='dict'), + ) + + meta_args = dict( + config=dict(type='dict', options=config_spec), + state=dict(type='str', default='present', choices=['present', 'absent']), + realm=dict(type='str', default='master'), + id=dict(type='str'), + name=dict(type='str'), + provider_id=dict(type='str', aliases=['providerId']), + provider_type=dict(type='str', aliases=['providerType'], default='org.keycloak.storage.UserStorageProvider'), + parent_id=dict(type='str', aliases=['parentId']), + remove_unspecified_mappers=dict(type='bool', default=True), + bind_credential_update_mode=dict(type='str', default='always', choices=['always', 'only_indirect']), + mappers=dict(type='list', elements='dict', options=mapper_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'name'], + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + config = module.params.get('config') + mappers = module.params.get('mappers') + cid = module.params.get('id') + name = module.params.get('name') + + # Keycloak API expects config parameters to be arrays containing a single string element + if config is not None: + module.params['config'] = { + k: [str(v).lower() if not isinstance(v, str) else v] + for k, v in config.items() + if config[k] is not None + } + + if mappers is not None: + for mapper in mappers: + if mapper.get('config') is not None: + mapper['config'] = { + k: [str(v).lower() if not isinstance(v, str) else v] + for k, v in mapper['config'].items() + if mapper['config'][k] is not None + } + + # Filter and map the parameters names that apply + comp_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + + ['state', 'realm', 'mappers', 'remove_unspecified_mappers', 'bind_credential_update_mode'] + and module.params.get(x) is not None] + + # See if it already exists in Keycloak + if cid is None: + found = kc.get_components(urlencode(dict(type='org.keycloak.storage.UserStorageProvider', name=name)), realm) + if len(found) > 1: + module.fail_json(msg='No ID given and found multiple user federations with name `{name}`. Cannot continue.'.format(name=name)) + before_comp = next(iter(found), None) + if before_comp is not None: + cid = before_comp['id'] + else: + before_comp = kc.get_component(cid, realm) + + if before_comp is None: + before_comp = {} + + # if user federation exists, get associated mappers + if cid is not None and before_comp: + before_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name') or '') + + normalize_kc_comp(before_comp) + + # Build a proposed changeset from parameters given to this module + changeset = {} + + for param in comp_params: + new_param_value = module.params.get(param) + old_value = before_comp[camel(param)] if camel(param) in before_comp else None + if param == 'mappers': + new_param_value = [{k: v for k, v in x.items() if v is not None} for x in new_param_value] + if new_param_value != old_value: + changeset[camel(param)] = new_param_value + + # special handling of mappers list to allow change detection + if module.params.get('mappers') is not None: + if module.params['provider_id'] in ['kerberos', 'sssd']: + module.fail_json(msg='Cannot configure mappers for {type} provider.'.format(type=module.params['provider_id'])) + for change in module.params['mappers']: + change = {k: v for k, v in change.items() if v is not None} + if change.get('id') is None and change.get('name') is None: + module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.') + if cid is None: + old_mapper = {} + elif change.get('id') is not None: + old_mapper = next((before_mapper for before_mapper in before_comp.get('mappers', []) if before_mapper["id"] == change['id']), None) + if old_mapper is None: + old_mapper = {} + else: + found = [before_mapper for before_mapper in before_comp.get('mappers', []) if before_mapper['name'] == change['name']] + if len(found) > 1: + module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=change['name'])) + if len(found) == 1: + old_mapper = found[0] + else: + old_mapper = {} + new_mapper = old_mapper.copy() + new_mapper.update(change) + # changeset contains all desired mappers: those existing, to update or to create + if changeset.get('mappers') is None: + changeset['mappers'] = list() + changeset['mappers'].append(new_mapper) + changeset['mappers'] = sorted(changeset['mappers'], key=lambda x: x.get('name') or '') + + # to keep unspecified existing mappers we add them to the desired mappers list, unless they're already present + if not module.params['remove_unspecified_mappers'] and 'mappers' in before_comp: + changeset_mapper_ids = [mapper['id'] for mapper in changeset['mappers'] if 'id' in mapper] + changeset['mappers'].extend([mapper for mapper in before_comp['mappers'] if mapper['id'] not in changeset_mapper_ids]) + + # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis) + desired_comp = before_comp.copy() + desired_comp.update(changeset) + + result['proposed'] = sanitize(changeset) + result['existing'] = sanitize(before_comp) + + # Cater for when it doesn't exist (an empty dict) + if not before_comp: + if state == 'absent': + # Do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = {} + result['msg'] = 'User federation does not exist; doing nothing.' + module.exit_json(**result) + + # Process a creation + result['changed'] = True + + if module.check_mode: + if module._diff: + result['diff'] = dict(before='', after=sanitize(desired_comp)) + module.exit_json(**result) + + # create it + desired_mappers = desired_comp.pop('mappers', []) + after_comp = kc.create_component(desired_comp, realm) + cid = after_comp['id'] + updated_mappers = [] + # when creating a user federation, keycloak automatically creates default mappers + default_mappers = kc.get_components(urlencode(dict(parent=cid)), realm) + + # create new mappers or update existing default mappers + for desired_mapper in desired_mappers: + found = [default_mapper for default_mapper in default_mappers if default_mapper['name'] == desired_mapper['name']] + if len(found) > 1: + module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=desired_mapper['name'])) + if len(found) == 1: + old_mapper = found[0] + else: + old_mapper = {} + + new_mapper = old_mapper.copy() + new_mapper.update(desired_mapper) + + if new_mapper.get('id') is not None: + kc.update_component(new_mapper, realm) + updated_mappers.append(new_mapper) + else: + if new_mapper.get('parentId') is None: + new_mapper['parentId'] = cid + updated_mappers.append(kc.create_component(new_mapper, realm)) + + if module.params['remove_unspecified_mappers']: + # we remove all unwanted default mappers + # we use ids so we dont accidently remove one of the previously updated default mapper + for default_mapper in default_mappers: + if not default_mapper['id'] in [x['id'] for x in updated_mappers]: + kc.delete_component(default_mapper['id'], realm) + + after_comp['mappers'] = kc.get_components(urlencode(dict(parent=cid)), realm) + normalize_kc_comp(after_comp) + if module._diff: + result['diff'] = dict(before='', after=sanitize(after_comp)) + result['end_state'] = sanitize(after_comp) + result['msg'] = "User federation {id} has been created".format(id=cid) + module.exit_json(**result) + + else: + if state == 'present': + # Process an update + + desired_copy = deepcopy(desired_comp) + before_copy = deepcopy(before_comp) + # exclude bindCredential when checking wether an update is required, therefore + # updating it only if there are other changes + if module.params['bind_credential_update_mode'] == 'only_indirect': + desired_copy.get('config', []).pop('bindCredential', None) + before_copy.get('config', []).pop('bindCredential', None) + # no changes + if desired_copy == before_copy: + result['changed'] = False + result['end_state'] = sanitize(desired_comp) + result['msg'] = "No changes required to user federation {id}.".format(id=cid) + module.exit_json(**result) + + # doing an update + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_comp), after=sanitize(desired_comp)) + + if module.check_mode: + module.exit_json(**result) + + # do the update + desired_mappers = desired_comp.pop('mappers', []) + kc.update_component(desired_comp, realm) + + for before_mapper in before_comp.get('mappers', []): + # remove unwanted existing mappers that will not be updated + if not before_mapper['id'] in [x['id'] for x in desired_mappers if 'id' in x]: + kc.delete_component(before_mapper['id'], realm) + + for mapper in desired_mappers: + if mapper in before_comp.get('mappers', []): + continue + if mapper.get('id') is not None: + kc.update_component(mapper, realm) + else: + if mapper.get('parentId') is None: + mapper['parentId'] = desired_comp['id'] + kc.create_component(mapper, realm) + + after_comp = kc.get_component(cid, realm) + after_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name') or '') + normalize_kc_comp(after_comp) + after_comp_sanitized = sanitize(after_comp) + before_comp_sanitized = sanitize(before_comp) + result['end_state'] = after_comp_sanitized + if module._diff: + result['diff'] = dict(before=before_comp_sanitized, after=after_comp_sanitized) + result['changed'] = before_comp_sanitized != after_comp_sanitized + result['msg'] = "User federation {id} has been updated".format(id=cid) + module.exit_json(**result) + + elif state == 'absent': + # Process a deletion + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_comp), after='') + + if module.check_mode: + module.exit_json(**result) + + # delete it + kc.delete_component(cid, realm) + + result['end_state'] = {} + + result['msg'] = "User federation {id} has been deleted".format(id=cid) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_user_rolemapping.py b/plugins/modules/keycloak_user_rolemapping.py new file mode 100644 index 0000000000..2d7024fd5f --- /dev/null +++ b/plugins/modules/keycloak_user_rolemapping.py @@ -0,0 +1,397 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Dušan Marković (@bratwurzt) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_user_rolemapping + +short_description: Allows administration of Keycloak user_rolemapping with the Keycloak API + +version_added: 5.7.0 + +description: + - This module allows you to add, remove or modify Keycloak user_rolemapping with the Keycloak REST API. It requires access + to the REST API using OpenID Connect; the user connecting and the client being used must have the requisite access rights. + In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with + the scope tailored to your needs and a user having the expected roles. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way + by this module. You may pass single values for attributes when calling the module, and this is translated into a list + suitable for the API. + - When updating a user_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API to + translate the name into the role ID. +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the user_rolemapping. + - On V(present), the user_rolemapping is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the user_rolemapping is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + type: str + description: + - They Keycloak realm under which this role_representation resides. + default: 'master' + + target_username: + type: str + description: + - Username of the user roles are mapped to. + - This parameter is not required (can be replaced by uid for less API call). + uid: + type: str + description: + - ID of the user to be mapped. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. + service_account_user_client_id: + type: str + description: + - Client ID of the service-account-user to be mapped. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. + client_id: + type: str + description: + - Name of the client to be mapped (different than O(cid)). + - This parameter is required if O(cid) is not provided (can be replaced by O(cid) to reduce the number of API calls + that must be made). + cid: + type: str + description: + - ID of the client to be mapped. + - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API + calls required. + roles: + description: + - Roles to be mapped to the user. + type: list + elements: dict + suboptions: + name: + type: str + description: + - Name of the role representation. + - This parameter is required only when creating or updating the role_representation. + id: + type: str + description: + - The unique identifier for this role_representation. + - This parameter is not required for updating or deleting a role_representation but providing it reduces the number + of API calls required. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Dušan Marković (@bratwurzt) +""" + +EXAMPLES = r""" +- name: Map a client role to a user, authentication with credentials + community.general.keycloak_user_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + client_id: client1 + user_id: user1Id + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a service account user for a client, authentication with credentials + community.general.keycloak_user_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + client_id: client1 + service_account_user_client_id: clientIdOfServiceAccount + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a user, authentication with token + community.general.keycloak_user_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + state: present + client_id: client1 + target_username: user1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Unmap client role from a user + community.general.keycloak_user_rolemapping: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: absent + client_id: client1 + uid: 70e3ae72-96b6-11e6-9056-9737fd4d0764 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost +""" + +RETURN = r""" +msg: + description: Message as to what action was taken. + returned: always + type: str + sample: "Role role1 assigned to user user1." + +proposed: + description: Representation of proposed client role mapping. + returned: always + type: dict + sample: {"clientId": "test"} + +existing: + description: + - Representation of existing client role mapping. + - The sample is truncated. + returned: always + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } + +end_state: + description: + - Representation of client role mapping after module execution. + - The sample is truncated. + returned: on success + type: dict + sample: + { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256" + } + } +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + roles_spec = dict( + name=dict(type='str'), + id=dict(type='str'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + uid=dict(type='str'), + target_username=dict(type='str'), + service_account_user_client_id=dict(type='str'), + cid=dict(type='str'), + client_id=dict(type='str'), + roles=dict(type='list', elements='dict', options=roles_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret'], + ['uid', 'target_username', 'service_account_user_client_id']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + cid = module.params.get('cid') + client_id = module.params.get('client_id') + uid = module.params.get('uid') + target_username = module.params.get('target_username') + service_account_user_client_id = module.params.get('service_account_user_client_id') + roles = module.params.get('roles') + + # Check the parameters + if uid is None and target_username is None and service_account_user_client_id is None: + module.fail_json(msg='Either the `target_username`, `uid` or `service_account_user_client_id` has to be specified.') + + # Get the potential missing parameters + if uid is None and service_account_user_client_id is None: + user_rep = kc.get_user_by_username(username=target_username, realm=realm) + if user_rep is not None: + uid = user_rep.get('id') + else: + module.fail_json(msg='Could not fetch user for username %s:' % target_username) + else: + if uid is None and target_username is None: + user_rep = kc.get_service_account_user_by_client_id(client_id=service_account_user_client_id, realm=realm) + if user_rep is not None: + uid = user_rep['id'] + else: + module.fail_json(msg='Could not fetch service-account-user for client_id %s:' % target_username) + + if cid is None and client_id is not None: + cid = kc.get_client_id(client_id=client_id, realm=realm) + if cid is None: + module.fail_json(msg='Could not fetch client %s:' % client_id) + if roles is None: + module.exit_json(msg="Nothing to do (no roles specified).") + else: + for role_index, role in enumerate(roles, start=0): + if role.get('name') is None and role.get('id') is None: + module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') + # Fetch missing role_id + if role.get('id') is None: + if cid is None: + role_id = kc.get_realm_role(name=role.get('name'), realm=realm)['id'] + else: + role_id = kc.get_client_role_id_by_name(cid=cid, name=role.get('name'), realm=realm) + if role_id is not None: + role['id'] = role_id + else: + module.fail_json(msg='Could not fetch role %s for client_id %s or realm %s' % (role.get('name'), client_id, realm)) + # Fetch missing role_name + else: + if cid is None: + role['name'] = kc.get_realm_user_rolemapping_by_id(uid=uid, rid=role.get('id'), realm=realm)['name'] + else: + role['name'] = kc.get_client_user_rolemapping_by_id(uid=uid, cid=cid, rid=role.get('id'), realm=realm)['name'] + if role.get('name') is None: + module.fail_json(msg='Could not fetch role %s for client_id %s or realm %s' % (role.get('id'), client_id, realm)) + + # Get effective role mappings + if cid is None: + available_roles_before = kc.get_realm_user_available_rolemappings(uid=uid, realm=realm) + assigned_roles_before = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm) + else: + available_roles_before = kc.get_client_user_available_rolemappings(uid=uid, cid=cid, realm=realm) + assigned_roles_before = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm) + + result['existing'] = assigned_roles_before + result['proposed'] = roles + + update_roles = [] + for role_index, role in enumerate(roles, start=0): + # Fetch roles to assign if state present + if state == 'present': + for available_role in available_roles_before: + if role.get('name') == available_role.get('name'): + update_roles.append({ + 'id': role.get('id'), + 'name': role.get('name'), + }) + # Fetch roles to remove if state absent + else: + for assigned_role in assigned_roles_before: + if role.get('name') == assigned_role.get('name'): + update_roles.append({ + 'id': role.get('id'), + 'name': role.get('name'), + }) + + if len(update_roles): + if state == 'present': + # Assign roles + result['changed'] = True + if module._diff: + result['diff'] = dict(before={"roles": assigned_roles_before}, after={"roles": update_roles}) + if module.check_mode: + module.exit_json(**result) + kc.add_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm) + result['msg'] = 'Roles %s assigned to userId %s.' % (update_roles, uid) + if cid is None: + assigned_roles_after = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm) + else: + assigned_roles_after = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + else: + # Remove mapping of role + result['changed'] = True + if module._diff: + result['diff'] = dict(before={"roles": assigned_roles_before}, after={"roles": update_roles}) + if module.check_mode: + module.exit_json(**result) + kc.delete_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm) + result['msg'] = 'Roles %s removed from userId %s.' % (update_roles, uid) + if cid is None: + assigned_roles_after = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm) + else: + assigned_roles_after = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + # Do nothing + else: + result['changed'] = False + result['msg'] = 'Nothing to do, roles %s are correctly mapped to user for username %s.' % (roles, target_username) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_userprofile.py b/plugins/modules/keycloak_userprofile.py new file mode 100644 index 0000000000..a09ab8818b --- /dev/null +++ b/plugins/modules/keycloak_userprofile.py @@ -0,0 +1,734 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: keycloak_userprofile + +short_description: Allows managing Keycloak User Profiles + +description: + - This module allows you to create, update, or delete Keycloak User Profiles using the Keycloak API. You can also customize + the "Unmanaged Attributes" with it. + - The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation + at U(https://www.keycloak.org/docs-api/24.0.5/rest-api/index.html). For compatibility reasons, the module also accepts + the camelCase versions of the options. +version_added: "9.4.0" + +attributes: + check_mode: + support: full + diff_mode: + support: full + action_group: + version_added: 10.2.0 + +options: + state: + description: + - State of the User Profile provider. + - On V(present), the User Profile provider is created if it does not yet exist, or updated with the parameters you provide. + - On V(absent), the User Profile provider is removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + parent_id: + description: + - The parent ID of the realm key. In practice the ID (name) of the realm. + aliases: + - parentId + - realm + type: str + required: true + + provider_id: + description: + - The name of the provider ID for the key (supported value is V(declarative-user-profile)). + aliases: + - providerId + choices: ['declarative-user-profile'] + default: 'declarative-user-profile' + type: str + + provider_type: + description: + - Component type for User Profile (only supported value is V(org.keycloak.userprofile.UserProfileProvider)). + aliases: + - providerType + choices: ['org.keycloak.userprofile.UserProfileProvider'] + default: org.keycloak.userprofile.UserProfileProvider + type: str + + config: + description: + - The configuration of the User Profile Provider. + type: dict + required: false + suboptions: + kc_user_profile_config: + description: + - Define a declarative User Profile. See EXAMPLES for more context. + aliases: + - kcUserProfileConfig + type: list + elements: dict + suboptions: + attributes: + description: + - A list of attributes to be included in the User Profile. + type: list + elements: dict + suboptions: + name: + description: + - The name of the attribute. + type: str + required: true + + display_name: + description: + - The display name of the attribute. + aliases: + - displayName + type: str + required: true + + validations: + description: + - The validations to be applied to the attribute. + type: dict + suboptions: + length: + description: + - The length validation for the attribute. + type: dict + suboptions: + min: + description: + - The minimum length of the attribute. + type: int + max: + description: + - The maximum length of the attribute. + type: int + required: true + + email: + description: + - The email validation for the attribute. + type: dict + + username_prohibited_characters: + description: + - The prohibited characters validation for the username attribute. + type: dict + aliases: + - usernameProhibitedCharacters + + up_username_not_idn_homograph: + description: + - The validation to prevent IDN homograph attacks in usernames. + type: dict + aliases: + - upUsernameNotIdnHomograph + + person_name_prohibited_characters: + description: + - The prohibited characters validation for person name attributes. + type: dict + aliases: + - personNameProhibitedCharacters + + uri: + description: + - The URI validation for the attribute. + type: dict + + pattern: + description: + - The pattern validation for the attribute using regular expressions. + type: dict + + options: + description: + - Validation to ensure the attribute matches one of the provided options. + type: dict + + annotations: + description: + - Annotations for the attribute. + type: dict + + group: + description: + - Specifies the User Profile group where this attribute is added. + type: str + + permissions: + description: + - The permissions for viewing and editing the attribute. + type: dict + suboptions: + view: + description: + - The roles that can view the attribute. + - Supported values are V(admin) and V(user). + type: list + elements: str + default: + - admin + - user + + edit: + description: + - The roles that can edit the attribute. + - Supported values are V(admin) and V(user). + type: list + elements: str + default: + - admin + - user + + multivalued: + description: + - Whether the attribute can have multiple values. + type: bool + default: false + + required: + description: + - The roles that require this attribute. + type: dict + suboptions: + roles: + description: + - The roles for which this attribute is required. + - Supported values are V(admin) and V(user). + type: list + elements: str + default: + - user + + groups: + description: + - A list of attribute groups to be included in the User Profile. + type: list + elements: dict + suboptions: + name: + description: + - The name of the group. + type: str + required: true + + display_header: + description: + - The display header for the group. + aliases: + - displayHeader + type: str + required: true + + display_description: + description: + - The display description for the group. + aliases: + - displayDescription + type: str + required: false + + annotations: + description: + - The annotations included in the group. + type: dict + required: false + + unmanaged_attribute_policy: + description: + - Policy for unmanaged attributes. + aliases: + - unmanagedAttributePolicy + type: str + choices: + - ENABLED + - ADMIN_EDIT + - ADMIN_VIEW + +notes: + - Currently, only a single V(declarative-user-profile) entry is supported for O(provider_id) (design of the Keyckoak API). + However, there can be multiple O(config.kc_user_profile_config[].attributes[]) entries. +extends_documentation_fragment: + - community.general.keycloak + - community.general.keycloak.actiongroup_keycloak + - community.general.attributes + +author: + - Eike Waldt (@yeoldegrove) +""" + +EXAMPLES = r""" +- name: Create a Declarative User Profile with default settings + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - attributes: + - name: username + displayName: ${username} + validations: + length: + min: 3 + max: 255 + username_prohibited_characters: {} + up_username_not_idn_homograph: {} + annotations: {} + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: email + displayName: ${email} + validations: + email: {} + length: + max: 255 + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: firstName + displayName: ${firstName} + validations: + length: + max: 255 + person_name_prohibited_characters: {} + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + - name: lastName + displayName: ${lastName} + validations: + length: + max: 255 + person_name_prohibited_characters: {} + annotations: {} + required: + roles: + - user + permissions: + view: + - admin + - user + edit: [] + multivalued: false + groups: + - name: user-metadata + displayHeader: User metadata + displayDescription: Attributes, which refer to user metadata + annotations: {} + +- name: Delete a Keycloak User Profile Provider + keycloak_userprofile: + state: absent + parent_id: master + +# Unmanaged attributes are user attributes not explicitly defined in the User Profile +# configuration. By default, unmanaged attributes are "Disabled" and are not +# available from any context such as registration, account, and the +# administration console. By setting "Enabled", unmanaged attributes are fully +# recognized by the server and accessible through all contexts, useful if you are +# starting migrating an existing realm to the declarative User Profile +# and you don't have yet all user attributes defined in the User Profile configuration. +- name: Enable Unmanaged Attributes + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - unmanagedAttributePolicy: ENABLED + +# By setting "Only administrators can write", unmanaged attributes can be managed +# only through the administration console and API, useful if you have already +# defined any custom attribute that can be managed by users but you are unsure +# about adding other attributes that should only be managed by administrators. +- name: Enable ADMIN_EDIT on Unmanaged Attributes + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - unmanagedAttributePolicy: ADMIN_EDIT + +# By setting `Only administrators can view`, unmanaged attributes are read-only +# and only available through the administration console and API. +- name: Enable ADMIN_VIEW on Unmanaged Attributes + community.general.keycloak_userprofile: + state: present + parent_id: master + config: + kc_user_profile_config: + - unmanagedAttributePolicy: ADMIN_VIEW +""" + +RETURN = r""" +msg: + description: The output message generated by the module. + returned: always + type: str + sample: UserProfileProvider created successfully +data: + description: The data returned by the Keycloak API. + returned: when state is present + type: dict +""" + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule +from copy import deepcopy +from urllib.parse import urlencode +import json + + +def remove_null_values(data): + if isinstance(data, dict): + # Recursively remove null values from dictionaries + return {k: remove_null_values(v) for k, v in data.items() if v is not None} + elif isinstance(data, list): + # Recursively remove null values from lists + return [remove_null_values(item) for item in data if item is not None] + else: + # Return the data if it is neither a dictionary nor a list + return data + + +def camel_recursive(data): + if isinstance(data, dict): + # Convert keys to camelCase and apply recursively + return {camel(k): camel_recursive(v) for k, v in data.items()} + elif isinstance(data, list): + # Apply camelCase conversion to each item in the list + return [camel_recursive(item) for item in data] + else: + # Return the data as-is if it is not a dict or list + return data + + +def main(): + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + parent_id=dict(type='str', aliases=['parentId', 'realm'], required=True), + provider_id=dict(type='str', aliases=['providerId'], default='declarative-user-profile', choices=['declarative-user-profile']), + provider_type=dict( + type='str', + aliases=['providerType'], + default='org.keycloak.userprofile.UserProfileProvider', + choices=['org.keycloak.userprofile.UserProfileProvider'] + ), + config=dict( + type='dict', + options={ + 'kc_user_profile_config': dict( + type='list', + aliases=['kcUserProfileConfig'], + elements='dict', + options={ + 'attributes': dict( + type='list', + elements='dict', + options={ + 'name': dict(type='str', required=True), + 'display_name': dict(type='str', aliases=['displayName'], required=True), + 'validations': dict( + type='dict', + options={ + 'length': dict( + type='dict', + options={ + 'min': dict(type='int'), + 'max': dict(type='int', required=True) + } + ), + 'email': dict(type='dict'), + 'username_prohibited_characters': dict(type='dict', aliases=['usernameProhibitedCharacters']), + 'up_username_not_idn_homograph': dict(type='dict', aliases=['upUsernameNotIdnHomograph']), + 'person_name_prohibited_characters': dict(type='dict', aliases=['personNameProhibitedCharacters']), + 'uri': dict(type='dict'), + 'pattern': dict(type='dict'), + 'options': dict(type='dict') + } + ), + 'annotations': dict(type='dict'), + 'group': dict(type='str'), + 'permissions': dict( + type='dict', + options={ + 'view': dict(type='list', elements='str', default=['admin', 'user']), + 'edit': dict(type='list', elements='str', default=['admin', 'user']) + } + ), + 'multivalued': dict(type='bool', default=False), + 'required': dict( + type='dict', + options={ + 'roles': dict(type='list', elements='str', default=['user']) + } + ) + } + ), + 'groups': dict( + type='list', + elements='dict', + options={ + 'name': dict(type='str', required=True), + 'display_header': dict(type='str', aliases=['displayHeader'], required=True), + 'display_description': dict(type='str', aliases=['displayDescription']), + 'annotations': dict(type='dict') + } + ), + 'unmanaged_attribute_policy': dict( + type='str', + aliases=['unmanagedAttributePolicy'], + choices=['ENABLED', 'ADMIN_EDIT', 'ADMIN_VIEW'], + + ) + } + ) + } + ) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), + required_by={'refresh_token': 'auth_realm'}, + ) + + # Initialize the result object. Only "changed" seems to have special + # meaning for Ansible. + result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={})) + + # This will include the current state of the realm userprofile if it is already + # present. This is only used for diff-mode. + before_realm_userprofile = {} + before_realm_userprofile['config'] = {} + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + params_to_ignore = list(keycloak_argument_spec().keys()) + ["state"] + + # Filter and map the parameters names that apply to the role + component_params = [ + x + for x in module.params + if x not in params_to_ignore and module.params.get(x) is not None + ] + + # Build a proposed changeset from parameters given to this module + changeset = {} + + # Build the changeset with proper JSON serialization for kc_user_profile_config + config = module.params.get('config') + changeset['config'] = {} + + # Generate a JSON payload for Keycloak Admin API from the module + # parameters. Parameters that do not belong to the JSON payload (e.g. + # "state" or "auth_keycloal_url") have been filtered away earlier (see + # above). + # + # This loop converts Ansible module parameters (snake-case) into + # Keycloak-compatible format (camel-case). For example proider_id + # becomes providerId. It also handles some special cases, e.g. aliases. + for component_param in component_params: + # realm/parent_id parameter + if component_param == 'realm' or component_param == 'parent_id': + changeset['parent_id'] = module.params.get(component_param) + changeset.pop(component_param, None) + # complex parameters in config suboptions + elif component_param == 'config': + for config_param in config: + # special parameter kc_user_profile_config + if config_param in ('kcUserProfileConfig', 'kc_user_profile_config'): + config_param_org = config_param + # rename parameter to be accepted by Keycloak API + config_param = 'kc.user.profile.config' + # make sure no null values are passed to Keycloak API + kc_user_profile_config = remove_null_values(config[config_param_org]) + changeset[camel(component_param)][config_param] = [] + if len(kc_user_profile_config) > 0: + # convert aliases to camelCase + kc_user_profile_config = camel_recursive(kc_user_profile_config) + # rename validations to be accepted by Keycloak API + if 'attributes' in kc_user_profile_config[0]: + for attribute in kc_user_profile_config[0]['attributes']: + if 'validations' in attribute: + if 'usernameProhibitedCharacters' in attribute['validations']: + attribute['validations']['username-prohibited-characters'] = ( + attribute['validations'].pop('usernameProhibitedCharacters') + ) + if 'upUsernameNotIdnHomograph' in attribute['validations']: + attribute['validations']['up-username-not-idn-homograph'] = ( + attribute['validations'].pop('upUsernameNotIdnHomograph') + ) + if 'personNameProhibitedCharacters' in attribute['validations']: + attribute['validations']['person-name-prohibited-characters'] = ( + attribute['validations'].pop('personNameProhibitedCharacters') + ) + changeset[camel(component_param)][config_param].append(kc_user_profile_config[0]) + # usual camelCase parameters + else: + changeset[camel(component_param)][camel(config_param)] = [] + raw_value = module.params.get(component_param)[config_param] + if isinstance(raw_value, bool): + value = str(raw_value).lower() + else: + value = raw_value # Directly use the raw value + changeset[camel(component_param)][camel(config_param)].append(value) + # usual parameters + else: + new_param_value = module.params.get(component_param) + changeset[camel(component_param)] = new_param_value + + # Make it easier to refer to current module parameters + state = module.params.get('state') + enabled = module.params.get('enabled') + parent_id = module.params.get('parent_id') + provider_type = module.params.get('provider_type') + provider_id = module.params.get('provider_id') + + # Make a deep copy of the changeset. This is use when determining + # changes to the current state. + changeset_copy = deepcopy(changeset) + + # Get a list of all Keycloak components that are of userprofile provider type. + realm_userprofiles = kc.get_components(urlencode(dict(type=provider_type)), parent_id) + + # If this component is present get its userprofile ID. Confusingly the userprofile ID is + # also known as the Provider ID. + userprofile_id = None + + # Track individual parameter changes + changes = "" + + # This tells Ansible whether the userprofile was changed (added, removed, modified) + result['changed'] = False + + # Loop through the list of components. If we encounter a component whose + # name matches the value of the name parameter then assume the userprofile is + # already present. + for userprofile in realm_userprofiles: + if provider_id == "declarative-user-profile": + userprofile_id = userprofile['id'] + changeset['id'] = userprofile_id + changeset_copy['id'] = userprofile_id + + # keycloak returns kc.user.profile.config as a single JSON formatted string, so we have to deserialize it + if 'config' in userprofile and 'kc.user.profile.config' in userprofile['config']: + userprofile['config']['kc.user.profile.config'][0] = json.loads(userprofile['config']['kc.user.profile.config'][0]) + + # Compare top-level parameters + for param, value in changeset.items(): + before_realm_userprofile[param] = userprofile[param] + + if changeset_copy[param] != userprofile[param] and param != 'config': + changes += "%s: %s -> %s, " % (param, userprofile[param], changeset_copy[param]) + result['changed'] = True + + # Compare parameters under the "config" userprofile + for p, v in changeset_copy['config'].items(): + before_realm_userprofile['config'][p] = userprofile['config'][p] + if changeset_copy['config'][p] != userprofile['config'][p]: + changes += "config.%s: %s -> %s, " % (p, userprofile['config'][p], changeset_copy['config'][p]) + result['changed'] = True + + # Check all the possible states of the resource and do what is needed to + # converge current state with desired state (create, update or delete + # the userprofile). + + # keycloak expects kc.user.profile.config as a single JSON formatted string, so we have to serialize it + if 'config' in changeset and 'kc.user.profile.config' in changeset['config']: + changeset['config']['kc.user.profile.config'][0] = json.dumps(changeset['config']['kc.user.profile.config'][0]) + if userprofile_id and state == 'present': + if result['changed']: + if module._diff: + result['diff'] = dict(before=before_realm_userprofile, after=changeset_copy) + + if module.check_mode: + result['msg'] = "Userprofile %s would be changed: %s" % (provider_id, changes.strip(", ")) + else: + kc.update_component(changeset, parent_id) + result['msg'] = "Userprofile %s changed: %s" % (provider_id, changes.strip(", ")) + else: + result['msg'] = "Userprofile %s was in sync" % (provider_id) + + result['end_state'] = changeset_copy + elif userprofile_id and state == 'absent': + if module._diff: + result['diff'] = dict(before=before_realm_userprofile, after={}) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Userprofile %s would be deleted" % (provider_id) + else: + kc.delete_component(userprofile_id, parent_id) + result['changed'] = True + result['msg'] = "Userprofile %s deleted" % (provider_id) + + result['end_state'] = {} + elif not userprofile_id and state == 'present': + if module._diff: + result['diff'] = dict(before={}, after=changeset_copy) + + if module.check_mode: + result['changed'] = True + result['msg'] = "Userprofile %s would be created" % (provider_id) + else: + kc.create_component(changeset, parent_id) + result['changed'] = True + result['msg'] = "Userprofile %s created" % (provider_id) + + result['end_state'] = changeset_copy + elif not userprofile_id and state == 'absent': + result['changed'] = False + result['msg'] = "Userprofile %s not present" % (provider_id) + result['end_state'] = {} + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keyring.py b/plugins/modules/keyring.py new file mode 100644 index 0000000000..a201d214c2 --- /dev/null +++ b/plugins/modules/keyring.py @@ -0,0 +1,276 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Alexander Hussey +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +""" +Ansible Module - community.general.keyring +""" + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: keyring +version_added: 5.2.0 +author: + - Alexander Hussey (@ahussey-redhat) +short_description: Set or delete a passphrase using the Operating System's native keyring +description: >- + This module uses the L(keyring Python library, https://pypi.org/project/keyring/) to set or delete passphrases for a given + service and username from the OS' native keyring. +requirements: + - keyring (Python library) + - gnome-keyring (application - required for headless Gnome keyring access) + - dbus-run-session (application - required for headless Gnome keyring access) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + service: + description: The name of the service. + required: true + type: str + username: + description: The user belonging to the service. + required: true + type: str + user_password: + description: The password to set. + required: false + type: str + aliases: + - password + keyring_password: + description: Password to unlock keyring. + required: true + type: str + state: + description: Whether the password should exist. + required: false + default: present + type: str + choices: + - present + - absent +""" + +EXAMPLES = r""" +- name: Set a password for test/test1 + community.general.keyring: + service: test + username: test1 + user_password: "{{ user_password }}" + keyring_password: "{{ keyring_password }}" + +- name: Delete the password for test/test1 + community.general.keyring: + service: test + username: test1 + user_password: "{{ user_password }}" + keyring_password: "{{ keyring_password }}" + state: absent +""" + +try: + from shlex import quote +except ImportError: + from pipes import quote +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +try: + import keyring + + HAS_KEYRING = True + KEYRING_IMP_ERR = None +except ImportError: + HAS_KEYRING = False + KEYRING_IMP_ERR = traceback.format_exc() + + +def del_passphrase(module): + """ + Attempt to delete a passphrase in the keyring using the Python API and fallback to using a shell. + """ + if module.check_mode: + return None + try: + keyring.delete_password(module.params["service"], module.params["username"]) + return None + except keyring.errors.KeyringLocked: + delete_argument = ( + 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring del %s %s\n' + % ( + quote(module.params["keyring_password"]), + quote(module.params["service"]), + quote(module.params["username"]), + ) + ) + dummy, dummy, stderr = module.run_command( + "dbus-run-session -- /bin/bash", + use_unsafe_shell=True, + data=delete_argument, + encoding=None, + ) + + if not stderr.decode("UTF-8"): + return None + return stderr.decode("UTF-8") + + +def set_passphrase(module): + """ + Attempt to set passphrase in the keyring using the Python API and fallback to using a shell. + """ + if module.check_mode: + return None + try: + keyring.set_password( + module.params["service"], + module.params["username"], + module.params["user_password"], + ) + return None + except keyring.errors.KeyringLocked: + set_argument = ( + 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring set %s %s\n%s\n' + % ( + quote(module.params["keyring_password"]), + quote(module.params["service"]), + quote(module.params["username"]), + quote(module.params["user_password"]), + ) + ) + dummy, dummy, stderr = module.run_command( + "dbus-run-session -- /bin/bash", + use_unsafe_shell=True, + data=set_argument, + encoding=None, + ) + if not stderr.decode("UTF-8"): + return None + return stderr.decode("UTF-8") + + +def get_passphrase(module): + """ + Attempt to retrieve passphrase from keyring using the Python API and fallback to using a shell. + """ + try: + passphrase = keyring.get_password( + module.params["service"], module.params["username"] + ) + return passphrase + except keyring.errors.KeyringLocked: + pass + except keyring.errors.InitError: + pass + except AttributeError: + pass + get_argument = 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring get %s %s\n' % ( + quote(module.params["keyring_password"]), + quote(module.params["service"]), + quote(module.params["username"]), + ) + dummy, stdout, dummy = module.run_command( + "dbus-run-session -- /bin/bash", + use_unsafe_shell=True, + data=get_argument, + encoding=None, + ) + try: + return stdout.decode("UTF-8").splitlines()[1] # Only return the line containing the password + except IndexError: + return None + + +def run_module(): + """ + Attempts to retrieve a passphrase from a keyring. + """ + result = dict( + changed=False, + msg="", + ) + + module_args = dict( + service=dict(type="str", required=True), + username=dict(type="str", required=True), + keyring_password=dict(type="str", required=True, no_log=True), + user_password=dict( + type="str", no_log=True, aliases=["password"] + ), + state=dict( + type="str", default="present", choices=["absent", "present"] + ), + ) + + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + if not HAS_KEYRING: + module.fail_json(msg=missing_required_lib("keyring"), exception=KEYRING_IMP_ERR) + + passphrase = get_passphrase(module) + if module.params["state"] == "present": + if passphrase is not None: + if passphrase == module.params["user_password"]: + result["msg"] = "Passphrase already set for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + if passphrase != module.params["user_password"]: + set_result = set_passphrase(module) + if set_result is None: + result["changed"] = True + result["msg"] = "Passphrase has been updated for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + if set_result is not None: + module.fail_json(msg=set_result) + if passphrase is None: + set_result = set_passphrase(module) + if set_result is None: + result["changed"] = True + result["msg"] = "Passphrase has been updated for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + if set_result is not None: + module.fail_json(msg=set_result) + + if module.params["state"] == "absent": + if not passphrase: + result["result"] = "Passphrase already absent for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + if passphrase: + del_result = del_passphrase(module) + if del_result is None: + result["changed"] = True + result["msg"] = "Passphrase has been removed for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + if del_result is not None: + module.fail_json(msg=del_result) + + module.exit_json(**result) + + +def main(): + """ + main module loop + """ + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/keyring_info.py b/plugins/modules/keyring_info.py new file mode 100644 index 0000000000..fb186c8e44 --- /dev/null +++ b/plugins/modules/keyring_info.py @@ -0,0 +1,153 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Alexander Hussey +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +""" +Ansible Module - community.general.keyring_info +""" + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: keyring_info +version_added: 5.2.0 +author: + - Alexander Hussey (@ahussey-redhat) +short_description: Get a passphrase using the Operating System's native keyring +description: >- + This module uses the L(keyring Python library, https://pypi.org/project/keyring/) to retrieve passphrases for a given service + and username from the OS' native keyring. +requirements: + - keyring (Python library) + - gnome-keyring (application - required for headless Linux keyring access) + - dbus-run-session (application - required for headless Linux keyring access) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + service: + description: The name of the service. + required: true + type: str + username: + description: The user belonging to the service. + required: true + type: str + keyring_password: + description: Password to unlock keyring. + required: true + type: str +""" + +EXAMPLES = r""" +- name: Retrieve password for service_name/user_name + community.general.keyring_info: + service: test + username: test1 + keyring_password: "{{ keyring_password }}" + register: test_password + +- name: Display password + ansible.builtin.debug: + msg: "{{ test_password.passphrase }}" +""" + +RETURN = r""" +passphrase: + description: A string containing the password. + returned: success and the password exists + type: str + sample: Password123 +""" + +try: + from shlex import quote +except ImportError: + from pipes import quote +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +try: + import keyring + + HAS_KEYRING = True + KEYRING_IMP_ERR = None +except ImportError: + HAS_KEYRING = False + KEYRING_IMP_ERR = traceback.format_exc() + + +def _alternate_retrieval_method(module): + get_argument = 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring get %s %s\n' % ( + quote(module.params["keyring_password"]), + quote(module.params["service"]), + quote(module.params["username"]), + ) + dummy, stdout, dummy = module.run_command( + "dbus-run-session -- /bin/bash", + use_unsafe_shell=True, + data=get_argument, + encoding=None, + ) + try: + return stdout.decode("UTF-8").splitlines()[1] + except IndexError: + return None + + +def run_module(): + """ + Attempts to retrieve a passphrase from a keyring. + """ + result = dict(changed=False, msg="") + + module_args = dict( + service=dict(type="str", required=True), + username=dict(type="str", required=True), + keyring_password=dict(type="str", required=True, no_log=True), + ) + + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + if not HAS_KEYRING: + module.fail_json(msg=missing_required_lib("keyring"), exception=KEYRING_IMP_ERR) + try: + passphrase = keyring.get_password( + module.params["service"], module.params["username"] + ) + except keyring.errors.KeyringLocked: + pass + except keyring.errors.InitError: + pass + except AttributeError: + pass + + if passphrase is None: + passphrase = _alternate_retrieval_method(module) + + if passphrase is not None: + result["msg"] = "Successfully retrieved password for %s@%s" % ( + module.params["service"], + module.params["username"], + ) + result["passphrase"] = passphrase + if passphrase is None: + result["msg"] = "Password for %s@%s does not exist." % ( + module.params["service"], + module.params["username"], + ) + module.exit_json(**result) + + +def main(): + """ + main module loop + """ + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/database/misc/kibana_plugin.py b/plugins/modules/kibana_plugin.py similarity index 75% rename from plugins/modules/database/misc/kibana_plugin.py rename to plugins/modules/kibana_plugin.py index db5091e400..b464d363bb 100644 --- a/plugins/modules/database/misc/kibana_plugin.py +++ b/plugins/modules/kibana_plugin.py @@ -1,73 +1,78 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016, Thierno IB. BARRY @barryib # Sponsored by Polyconseil http://polyconseil.fr. # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: kibana_plugin short_description: Manage Kibana plugins description: - - This module can be used to manage Kibana plugins. + - This module can be used to manage Kibana plugins. author: Thierno IB. BARRY (@barryib) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: + name: + description: - Name of the plugin to install. - required: True - type: str - state: - description: + required: true + type: str + state: + description: - Desired state of a plugin. - choices: ["present", "absent"] - default: present - type: str - url: - description: + choices: ["present", "absent"] + default: present + type: str + url: + description: - Set exact URL to download the plugin from. - - For local file, prefix its absolute path with file:// - type: str - timeout: - description: - - "Timeout setting: 30s, 1m, 1h etc." - default: 1m - type: str - plugin_bin: - description: + - For local file, prefix its absolute path with C(file://). + type: str + timeout: + description: + - 'Timeout setting: V(30s), V(1m), V(1h) and so on.' + default: 1m + type: str + plugin_bin: + description: - Location of the Kibana binary. - default: /opt/kibana/bin/kibana - type: path - plugin_dir: - description: + default: /opt/kibana/bin/kibana + type: path + plugin_dir: + description: - Your configured plugin directory specified in Kibana. - default: /opt/kibana/installedPlugins/ - type: path - version: - description: + default: /opt/kibana/installedPlugins/ + type: path + version: + description: - Version of the plugin to be installed. - - If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes. - type: str - force: - description: - - Delete and re-install the plugin. Can be useful for plugins update. - type: bool - default: false - allow_root: - description: + - If the plugin is installed with in a previous version, it is B(not) updated unless O(force=true). + type: str + force: + description: + - Delete and re-install the plugin. It can be useful for plugins update. + type: bool + default: false + allow_root: + description: - Whether to allow C(kibana) and C(kibana-plugin) to be run as root. Passes the C(--allow-root) flag to these commands. - type: bool - default: false - version_added: 2.3.0 -''' + type: bool + default: false + version_added: 2.3.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install Elasticsearch head plugin community.general.kibana_plugin: state: present @@ -83,38 +88,30 @@ EXAMPLES = ''' community.general.kibana_plugin: state: absent name: elasticsearch/marvel -''' +""" -RETURN = ''' +RETURN = r""" cmd: - description: the launched command during plugin management (install / remove) - returned: success - type: str + description: The launched command during plugin management (install / remove). + returned: success + type: str name: - description: the plugin name to install or remove - returned: success - type: str + description: The plugin name to install or remove. + returned: success + type: str url: - description: the url from where the plugin is installed from - returned: success - type: str + description: The URL from where the plugin is installed from. + returned: success + type: str timeout: - description: the timeout for plugin download - returned: success - type: str -stdout: - description: the command stdout - returned: success - type: str -stderr: - description: the command stderr - returned: success - type: str + description: The timeout for plugin download. + returned: success + type: str state: - description: the state for the managed plugin - returned: success - type: str -''' + description: The state for the managed plugin. + returned: success + type: str +""" import os from ansible.module_utils.basic import AnsibleModule @@ -229,11 +226,11 @@ def main(): argument_spec=dict( name=dict(required=True), state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), - url=dict(default=None), + url=dict(), timeout=dict(default="1m"), plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"), plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"), - version=dict(default=None), + version=dict(), force=dict(default=False, type="bool"), allow_root=dict(default=False, type="bool"), ), diff --git a/plugins/modules/krb_ticket.py b/plugins/modules/krb_ticket.py new file mode 100644 index 0000000000..995319e715 --- /dev/null +++ b/plugins/modules/krb_ticket.py @@ -0,0 +1,381 @@ +#!/usr/bin/python +# Copyright (c) 2024 Alexander Bakanovskii +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: krb_ticket +short_description: Kerberos utils for managing tickets +version_added: 10.0.0 +description: + - Manage Kerberos tickets with C(kinit), C(klist) and C(kdestroy) base utilities. + - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/user/user_commands/index.html) for reference. +author: "Alexander Bakanovskii (@abakanovskii)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + password: + description: + - Principal password. + - It is required to specify O(password) or O(keytab_path). + type: str + principal: + description: + - The principal name. + - If not set, the user running this module is used. + type: str + state: + description: + - The state of the Kerberos ticket. + - V(present) is equivalent of C(kinit) command. + - V(absent) is equivalent of C(kdestroy) command. + type: str + default: present + choices: ["present", "absent"] + kdestroy_all: + description: + - When O(state=absent) destroys all credential caches in collection. + - Equivalent of running C(kdestroy -A). + type: bool + cache_name: + description: + - Use O(cache_name) as the ticket cache name and location. + - If this option is not used, the default cache name and location are used. + - The default credentials cache may vary between systems. + - If not set the value of E(KRB5CCNAME) environment variable is used instead, its value is used to name the default + ticket cache. + type: str + lifetime: + description: + - Requests a ticket with the lifetime, if the O(lifetime) is not specified, the default ticket lifetime is used. + - Specifying a ticket lifetime longer than the maximum ticket lifetime (configured by each site) does not override the + configured maximum ticket lifetime. + - 'The value for O(lifetime) must be followed by one of the following suffixes: V(s) - seconds, V(m) - minutes, V(h) + - hours, V(d) - days.' + - You cannot mix units; a value of V(3h30m) results in an error. + - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference. + type: str + start_time: + description: + - Requests a postdated ticket. + - Postdated tickets are issued with the invalid flag set, and need to be resubmitted to the KDC for validation before + use. + - O(start_time) specifies the duration of the delay before the ticket can become valid. + - You can use absolute time formats, for example V(July 27, 2012 at 20:30) you would neet to set O(start_time=20120727203000). + - You can also use time duration format similar to O(lifetime) or O(renewable). + - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference. + type: str + renewable: + description: + - Requests renewable tickets, with a total lifetime equal to O(renewable). + - 'The value for O(renewable) must be followed by one of the following delimiters: V(s) - seconds, V(m) - minutes, V(h) + - hours, V(d) - days.' + - You cannot mix units; a value of V(3h30m) results in an error. + - See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference. + type: str + forwardable: + description: + - Request forwardable or non-forwardable tickets. + type: bool + proxiable: + description: + - Request proxiable or non-proxiable tickets. + type: bool + address_restricted: + description: + - Request tickets restricted to the host's local address or non-restricted. + type: bool + anonymous: + description: + - Requests anonymous processing. + type: bool + canonicalization: + description: + - Requests canonicalization of the principal name, and allows the KDC to reply with a different client principal from + the one requested. + type: bool + enterprise: + description: + - Treats the principal name as an enterprise name (implies the O(canonicalization) option). + type: bool + renewal: + description: + - Requests renewal of the ticket-granting ticket. + - Note that an expired ticket cannot be renewed, even if the ticket is still within its renewable life. + type: bool + validate: + description: + - Requests that the ticket-granting ticket in the cache (with the invalid flag set) be passed to the KDC for validation. + - If the ticket is within its requested time range, the cache is replaced with the validated ticket. + type: bool + keytab: + description: + - Requests a ticket, obtained from a key in the local host's keytab. + - If O(keytab_path) is not specified it tries to use default client keytab path (C(-i) option). + type: bool + keytab_path: + description: + - Use when O(keytab=true) to specify path to a keytab file. + - It is required to specify O(password) or O(keytab_path). + type: path +requirements: + - krb5-user and krb5-config packages +extends_documentation_fragment: + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Get Kerberos ticket using default principal + community.general.krb_ticket: + password: some_password + +- name: Get Kerberos ticket using keytab + community.general.krb_ticket: + keytab: true + keytab_path: /etc/ipa/file.keytab + +- name: Get Kerberos ticket with a lifetime of 7 days + community.general.krb_ticket: + password: some_password + lifetime: 7d + +- name: Get Kerberos ticket with a starting time of July 2, 2024, 1:35:30 p.m. + community.general.krb_ticket: + password: some_password + start_time: "240702133530" + +- name: Get Kerberos ticket using principal name + community.general.krb_ticket: + password: some_password + principal: admin + +- name: Get Kerberos ticket using principal with realm + community.general.krb_ticket: + password: some_password + principal: admin@IPA.TEST + +- name: Check for existence by ticket cache + community.general.krb_ticket: + cache_name: KEYRING:persistent:0:0 + +- name: Make sure default ticket is destroyed + community.general.krb_ticket: + state: absent + +- name: Make sure specific ticket destroyed by principal + community.general.krb_ticket: + state: absent + principal: admin@IPA.TEST + +- name: Make sure specific ticket destroyed by cache_name + community.general.krb_ticket: + state: absent + cache_name: KEYRING:persistent:0:0 + +- name: Make sure all tickets are destroyed + community.general.krb_ticket: + state: absent + kdestroy_all: true +""" + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +class IPAKeytab(object): + def __init__(self, module, **kwargs): + self.module = module + self.password = kwargs['password'] + self.principal = kwargs['principal'] + self.state = kwargs['state'] + self.kdestroy_all = kwargs['kdestroy_all'] + self.cache_name = kwargs['cache_name'] + self.start_time = kwargs['start_time'] + self.renewable = kwargs['renewable'] + self.forwardable = kwargs['forwardable'] + self.proxiable = kwargs['proxiable'] + self.address_restricted = kwargs['address_restricted'] + self.canonicalization = kwargs['canonicalization'] + self.enterprise = kwargs['enterprise'] + self.renewal = kwargs['renewal'] + self.validate = kwargs['validate'] + self.keytab = kwargs['keytab'] + self.keytab_path = kwargs['keytab_path'] + + self.kinit = CmdRunner( + module, + command='kinit', + arg_formats=dict( + lifetime=cmd_runner_fmt.as_opt_val('-l'), + start_time=cmd_runner_fmt.as_opt_val('-s'), + renewable=cmd_runner_fmt.as_opt_val('-r'), + forwardable=cmd_runner_fmt.as_bool('-f', '-F', ignore_none=True), + proxiable=cmd_runner_fmt.as_bool('-p', '-P', ignore_none=True), + address_restricted=cmd_runner_fmt.as_bool('-a', '-A', ignore_none=True), + anonymous=cmd_runner_fmt.as_bool('-n'), + canonicalization=cmd_runner_fmt.as_bool('-C'), + enterprise=cmd_runner_fmt.as_bool('-E'), + renewal=cmd_runner_fmt.as_bool('-R'), + validate=cmd_runner_fmt.as_bool('-v'), + keytab=cmd_runner_fmt.as_bool('-k'), + keytab_path=cmd_runner_fmt.as_func(lambda v: ['-t', v] if v else ['-i']), + cache_name=cmd_runner_fmt.as_opt_val('-c'), + principal=cmd_runner_fmt.as_list(), + ) + ) + + self.kdestroy = CmdRunner( + module, + command='kdestroy', + arg_formats=dict( + kdestroy_all=cmd_runner_fmt.as_bool('-A'), + cache_name=cmd_runner_fmt.as_opt_val('-c'), + principal=cmd_runner_fmt.as_opt_val('-p'), + ) + ) + + self.klist = CmdRunner( + module, + command='klist', + arg_formats=dict( + show_list=cmd_runner_fmt.as_bool('-l'), + ) + ) + + def exec_kinit(self): + params = dict(self.module.params) + with self.kinit( + "lifetime start_time renewable forwardable proxiable address_restricted anonymous " + "canonicalization enterprise renewal validate keytab keytab_path cache_name principal", + check_rc=True, + data=self.password, + ) as ctx: + rc, out, err = ctx.run(**params) + return out + + def exec_kdestroy(self): + params = dict(self.module.params) + with self.kdestroy( + "kdestroy_all cache_name principal", + check_rc=True + ) as ctx: + rc, out, err = ctx.run(**params) + return out + + def exec_klist(self, show_list): + # Use chech_rc = False because + # If no tickets present, klist command will always return rc = 1 + params = dict(show_list=show_list) + with self.klist( + "show_list", + check_rc=False + ) as ctx: + rc, out, err = ctx.run(**params) + return rc, out, err + + def check_ticket_present(self): + ticket_present = True + show_list = False + + if not self.principal and not self.cache_name: + rc, out, err = self.exec_klist(show_list) + if rc != 0: + ticket_present = False + else: + show_list = True + rc, out, err = self.exec_klist(show_list) + if self.principal and self.principal not in str(out): + ticket_present = False + if self.cache_name and self.cache_name not in str(out): + ticket_present = False + + return ticket_present + + +def main(): + arg_spec = dict( + principal=dict(type='str'), + password=dict(type='str', no_log=True), + state=dict(default='present', choices=['present', 'absent']), + kdestroy_all=dict(type='bool'), + cache_name=dict(type='str', fallback=(env_fallback, ['KRB5CCNAME'])), + lifetime=dict(type='str'), + start_time=dict(type='str'), + renewable=dict(type='str'), + forwardable=dict(type='bool'), + proxiable=dict(type='bool'), + address_restricted=dict(type='bool'), + anonymous=dict(type='bool'), + canonicalization=dict(type='bool'), + enterprise=dict(type='bool'), + renewal=dict(type='bool'), + validate=dict(type='bool'), + keytab=dict(type='bool'), + keytab_path=dict(type='path'), + ) + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True, + required_by={ + 'keytab_path': 'keytab' + }, + required_if=[ + ('state', 'present', ('password', 'keytab_path'), True), + ], + ) + + state = module.params['state'] + kdestroy_all = module.params['kdestroy_all'] + + keytab = IPAKeytab(module, + state=state, + kdestroy_all=kdestroy_all, + principal=module.params['principal'], + password=module.params['password'], + cache_name=module.params['cache_name'], + lifetime=module.params['lifetime'], + start_time=module.params['start_time'], + renewable=module.params['renewable'], + forwardable=module.params['forwardable'], + proxiable=module.params['proxiable'], + address_restricted=module.params['address_restricted'], + anonymous=module.params['anonymous'], + canonicalization=module.params['canonicalization'], + enterprise=module.params['enterprise'], + renewal=module.params['renewal'], + validate=module.params['validate'], + keytab=module.params['keytab'], + keytab_path=module.params['keytab_path'], + ) + + if module.params['keytab_path'] is not None and module.params['keytab'] is not True: + module.fail_json(msg="If keytab_path is specified then keytab parameter must be True") + + changed = False + if state == 'present': + if not keytab.check_ticket_present(): + changed = True + if not module.check_mode: + keytab.exec_kinit() + + if state == 'absent': + if kdestroy_all: + changed = True + if not module.check_mode: + keytab.exec_kdestroy() + elif keytab.check_ticket_present(): + changed = True + if not module.check_mode: + keytab.exec_kdestroy() + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/launchd.py b/plugins/modules/launchd.py similarity index 81% rename from plugins/modules/system/launchd.py rename to plugins/modules/launchd.py index 8c09a44f6e..c7e98f2bc0 100644 --- a/plugins/modules/system/launchd.py +++ b/plugins/modules/launchd.py @@ -1,62 +1,69 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Martin Migasiewicz -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Martin Migasiewicz +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: launchd author: -- Martin Migasiewicz (@martinm82) -short_description: Manage macOS services + - Martin Migasiewicz (@martinm82) +short_description: Manage macOS services version_added: 1.0.0 description: -- Manage launchd services on target macOS hosts. + - Manage launchd services on target macOS hosts. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: + name: + description: - Name of the service. - type: str - required: true - state: - description: - - C(started)/C(stopped) are idempotent actions that will not run - commands unless necessary. - - Launchd does not support C(restarted) nor C(reloaded) natively. - These will trigger a stop/start (restarted) or an unload/load - (reloaded). - - C(restarted) unloads and loads the service before start to ensure - that the latest job definition (plist) is used. - - C(reloaded) unloads and loads the service to ensure that the latest - job definition (plist) is used. Whether a service is started or - stopped depends on the content of the definition file. - type: str - choices: [ reloaded, restarted, started, stopped, unloaded ] - enabled: - description: + type: str + required: true + plist: + description: + - Name of the V(.plist) file for the service. + - Defaults to V({name}.plist). + type: str + version_added: 10.1.0 + state: + description: + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. + - C(launchd) does not support V(restarted) nor V(reloaded) natively. These states trigger a stop/start (restarted) or + an unload/load (reloaded). + - V(restarted) unloads and loads the service before start to ensure that the latest job definition (plist) is used. + - V(reloaded) unloads and loads the service to ensure that the latest job definition (plist) is used. Whether a service + is started or stopped depends on the content of the definition file. + type: str + choices: [reloaded, restarted, started, stopped, unloaded] + enabled: + description: - Whether the service should start on boot. - - B(At least one of state and enabled are required.) - type: bool - force_stop: - description: + - B(At least one of state and enabled are required). + type: bool + force_stop: + description: - Whether the service should not be restarted automatically by launchd. - - Services might have the 'KeepAlive' attribute set to true in a launchd configuration. - In case this is set to true, stopping a service will cause that launchd starts the service again. - - Set this option to C(yes) to let this module change the 'KeepAlive' attribute to false. - type: bool - default: no + - Services might have the C(KeepAlive) attribute set to V(true) in a launchd configuration. In case this is set to V(true), + stopping a service causes that C(launchd) starts the service again. + - Set this option to V(true) to let this module change the C(KeepAlive) attribute to V(false). + type: bool + default: false notes: -- A user must privileged to manage services using this module. + - A user must privileged to manage services using this module. requirements: -- A system managed by launchd -- The plistlib python library -''' + - A system managed by launchd + - The plistlib Python library +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Make sure spotify webhelper is started community.general.launchd: name: com.spotify.webhelper @@ -81,7 +88,7 @@ EXAMPLES = r''' community.general.launchd: name: org.memcached state: stopped - force_stop: yes + force_stop: true - name: Restart memcached community.general.launchd: @@ -92,21 +99,27 @@ EXAMPLES = r''' community.general.launchd: name: org.memcached state: unloaded -''' -RETURN = r''' +- name: restart sshd + community.general.launchd: + name: com.openssh.sshd + plist: ssh.plist + state: restarted +""" + +RETURN = r""" status: - description: Metadata about service status - returned: always - type: dict - sample: - { - "current_pid": "-", - "current_state": "stopped", - "previous_pid": "82636", - "previous_state": "running" - } -''' + description: Metadata about service status. + returned: always + type: dict + sample: + { + "current_pid": "-", + "current_state": "stopped", + "previous_pid": "82636", + "previous_state": "running" + } +""" import os import plistlib @@ -137,25 +150,28 @@ class ServiceState: class Plist: - def __init__(self, module, service): + def __init__(self, module, service, filename=None): self.__changed = False self.__service = service + if filename is not None: + self.__filename = filename + else: + self.__filename = '%s.plist' % service state, pid, dummy, dummy = LaunchCtlList(module, self.__service).run() - # Check if readPlist is available or not - self.old_plistlib = hasattr(plistlib, 'readPlist') - - self.__file = self.__find_service_plist(self.__service) + self.__file = self.__find_service_plist(self.__filename) if self.__file is None: - msg = 'Unable to infer the path of %s service plist file' % self.__service + msg = 'Unable to find the plist file %s for service %s' % ( + self.__filename, self.__service, + ) if pid is None and state == ServiceState.UNLOADED: msg += ' and it was not found among active services' module.fail_json(msg=msg) self.__update(module) @staticmethod - def __find_service_plist(service_name): + def __find_service_plist(filename): """Finds the plist file associated with a service""" launchd_paths = [ @@ -172,7 +188,6 @@ class Plist: except OSError: continue - filename = '%s.plist' % service_name if filename in files: return os.path.join(path, filename) return None @@ -183,10 +198,6 @@ class Plist: def __read_plist_file(self, module): service_plist = {} - if self.old_plistlib: - return plistlib.readPlist(self.__file) - - # readPlist is deprecated in Python 3 and onwards try: with open(self.__file, 'rb') as plist_fp: service_plist = plistlib.load(plist_fp) @@ -199,10 +210,6 @@ class Plist: if not service_plist: service_plist = {} - if self.old_plistlib: - plistlib.writePlist(service_plist, self.__file) - return - # writePlist is deprecated in Python 3 and onwards try: with open(self.__file, 'wb') as plist_fp: plistlib.dump(service_plist, plist_fp) @@ -251,8 +258,7 @@ class Plist: return self.__file -class LaunchCtlTask(object): - __metaclass__ = ABCMeta +class LaunchCtlTask(metaclass=ABCMeta): WAITING_TIME = 5 # seconds def __init__(self, module, service, plist): @@ -453,6 +459,7 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True), + plist=dict(type='str'), state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped', 'unloaded']), enabled=dict(type='bool'), force_stop=dict(type='bool', default=False), @@ -464,6 +471,7 @@ def main(): ) service = module.params['name'] + plist_filename = module.params['plist'] action = module.params['state'] rc = 0 out = err = '' @@ -475,7 +483,7 @@ def main(): # We will tailor the plist file in case one of the options # (enabled, force_stop) was specified. - plist = Plist(module, service) + plist = Plist(module, service, plist_filename) result['changed'] = plist.is_changed() # Gather information about the service to be controlled. @@ -506,7 +514,8 @@ def main(): result['status']['current_pid'] != result['status']['previous_pid']): result['changed'] = True if module.check_mode: - result['changed'] = True + if result['status']['current_state'] != action: + result['changed'] = True module.exit_json(**result) diff --git a/plugins/modules/packaging/os/layman.py b/plugins/modules/layman.py similarity index 84% rename from plugins/modules/packaging/os/layman.py rename to plugins/modules/layman.py index 3c990205d9..af2191654f 100644 --- a/plugins/modules/packaging/os/layman.py +++ b/plugins/modules/layman.py @@ -1,55 +1,56 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2014, Jakub Jirutka +# Copyright (c) 2014, Jakub Jirutka # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: layman author: "Jakub Jirutka (@jirutka)" short_description: Manage Gentoo overlays description: - - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. - Please note that Layman must be installed on a managed node prior using this module. + - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. Please note that Layman + must be installed on a managed node prior using this module. requirements: - - "python >= 2.6" - layman python module +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - - The overlay id to install, synchronize, or uninstall. - Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)). + - The overlay ID to install, synchronize, or uninstall. Use V(ALL) to sync all of the installed overlays (can be used + only when O(state=updated)). required: true type: str list_url: description: - - An URL of the alternative overlays list that defines the overlay to install. - This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where - C(overlay_defs) is readed from the Layman's configuration. + - An URL of the alternative overlays list that defines the overlay to install. This list is fetched and saved under + C(${overlay_defs}/${name}.xml), where C(overlay_defs) is read from the Layman's configuration. aliases: [url] type: str state: description: - - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay. + - Whether to install (V(present)), sync (V(updated)), or uninstall (V(absent)) the overlay. default: present choices: [present, absent, updated] type: str validate_certs: description: - - If C(no), SSL certificates will not be validated. This should only be - set to C(no) when no other option exists. Prior to 1.9.3 the code - defaulted to C(no). + - If V(false), SSL certificates are not validated. This should only be set to V(false) when no other option exists. type: bool - default: yes -''' + default: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install the overlay mozilla which is on the central overlays list community.general.layman: name: mozilla @@ -74,7 +75,7 @@ EXAMPLES = ''' community.general.layman: name: cvut state: absent -''' +""" import shutil import traceback @@ -233,7 +234,7 @@ def main(): name=dict(required=True), list_url=dict(aliases=['url']), state=dict(default="present", choices=['present', 'absent', 'updated']), - validate_certs=dict(required=False, default=True, type='bool'), + validate_certs=dict(default=True, type='bool'), ), supports_check_mode=True ) diff --git a/plugins/modules/system/lbu.py b/plugins/modules/lbu.py similarity index 72% rename from plugins/modules/system/lbu.py rename to plugins/modules/lbu.py index fcc3a0d940..7957d0392a 100644 --- a/plugins/modules/system/lbu.py +++ b/plugins/modules/lbu.py @@ -1,14 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Kaarle Ritvanen -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Kaarle Ritvanen +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: lbu short_description: Local Backup Utility for Alpine Linux @@ -16,29 +14,37 @@ short_description: Local Backup Utility for Alpine Linux version_added: '0.2.0' description: -- Manage Local Backup Utility of Alpine Linux in run-from-RAM mode + - Manage Local Backup Utility of Alpine Linux in run-from-RAM mode. +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: commit: description: - - Control whether to commit changed files. + - Control whether to commit changed files. type: bool exclude: description: - - List of paths to exclude. + - List of paths to exclude. type: list elements: str include: description: - - List of paths to include. + - List of paths to include. type: list elements: str author: -- Kaarle Ritvanen (@kunkku) -''' + - Kaarle Ritvanen (@kunkku) +""" -EXAMPLES = ''' +EXAMPLES = r""" # Commit changed files (if any) - name: Commit community.general.lbu: @@ -49,22 +55,22 @@ EXAMPLES = ''' community.general.lbu: commit: true exclude: - - /etc/opt + - /etc/opt # Include paths without committing - name: Include file and directory community.general.lbu: include: - - /root/.ssh/authorized_keys - - /var/lib/misc -''' + - /root/.ssh/authorized_keys + - /var/lib/misc +""" -RETURN = ''' +RETURN = r""" msg: - description: Error message + description: Error message. type: str returned: on failure -''' +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/net_tools/ldap/ldap_attrs.py b/plugins/modules/ldap_attrs.py similarity index 55% rename from plugins/modules/net_tools/ldap/ldap_attrs.py rename to plugins/modules/ldap_attrs.py index c357a83087..cb8c676536 100644 --- a/plugins/modules/net_tools/ldap/ldap_attrs.py +++ b/plugins/modules/ldap_attrs.py @@ -1,37 +1,26 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Maciej Delmanowski -# Copyright: (c) 2017, Alexander Korinek -# Copyright: (c) 2016, Peter Sagerson -# Copyright: (c) 2016, Jiri Tyr -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Maciej Delmanowski +# Copyright (c) 2017, Alexander Korinek +# Copyright (c) 2016, Peter Sagerson +# Copyright (c) 2016, Jiri Tyr +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ldap_attrs short_description: Add or remove multiple LDAP attribute values description: - Add or remove multiple LDAP attribute values. notes: - - This only deals with attributes on existing entries. To add or remove - whole entries, see M(community.general.ldap_entry). - - The default authentication settings will attempt to use a SASL EXTERNAL - bind over a UNIX domain socket. This works well with the default Ubuntu - install for example, which includes a cn=peercred,cn=external,cn=auth ACL - rule allowing root to modify the server configuration. If you need to use - a simple bind to access your server, pass the credentials in I(bind_dn) - and I(bind_pw). - - For I(state=present) and I(state=absent), all value comparisons are - performed on the server for maximum accuracy. For I(state=exact), values - have to be compared in Python, which obviously ignores LDAP matching - rules. This should work out in most cases, but it is theoretically - possible to see spurious changes when target and actual values are - semantically identical but lexically distinct. + - This only deals with attributes on existing entries. To add or remove whole entries, see M(community.general.ldap_entry). + - For O(state=present) and O(state=absent), all value comparisons are performed on the server for maximum accuracy. For + O(state=exact), values have to be compared in Python, which obviously ignores LDAP matching rules. This should work out + in most cases, but it is theoretically possible to see spurious changes when target and actual values are semantically + identical but lexically distinct. version_added: '0.2.0' author: - Jiri Tyr (@jtyr) @@ -39,6 +28,12 @@ author: - Maciej Delmanowski (@drybjed) requirements: - python-ldap +attributes: + check_mode: + support: full + diff_mode: + support: full + version_added: 8.5.0 options: state: required: false @@ -46,38 +41,38 @@ options: choices: [present, absent, exact] default: present description: - - The state of the attribute values. If C(present), all given attribute - values will be added if they're missing. If C(absent), all given - attribute values will be removed if present. If C(exact), the set of - attribute values will be forced to exactly those provided and no others. - If I(state=exact) and the attribute I(value) is empty, all values for - this attribute will be removed. + - The state of the attribute values. If V(present), all given attribute values are added if they are missing. If V(absent), + all given attribute values are removed if present. If V(exact), the set of attribute values is forced to exactly those + provided and no others. If O(state=exact) and the attribute value is empty, all values for this attribute are removed. attributes: required: true type: dict description: - - The attribute(s) and value(s) to add or remove. The complex argument format is required in order to pass - a list of strings (see examples). + - The attribute(s) and value(s) to add or remove. + - Each attribute value can be a string for single-valued attributes or a list of strings for multi-valued attributes. + - If you specify values for this option in YAML, please note that you can improve readability for long string values + by using YAML block modifiers as seen in the examples for this module. + - Note that when using values that YAML/ansible-core interprets as other types, like V(yes), V(no) (booleans), or V(2.10) + (float), make sure to quote them if these are meant to be strings. Otherwise the wrong values may be sent to LDAP. ordered: required: false type: bool - default: 'no' + default: false description: - - If C(yes), prepend list values with X-ORDERED index numbers in all - attributes specified in the current task. This is useful mostly with - I(olcAccess) attribute to easily manage LDAP Access Control Lists. + - If V(true), prepend list values with X-ORDERED index numbers in all attributes specified in the current task. This + is useful mostly with C(olcAccess) attribute to easily manage LDAP Access Control Lists. extends_documentation_fragment: -- community.general.ldap.documentation - -''' + - community.general.ldap.documentation + - community.general.attributes +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Configure directory number 1 for example.com community.general.ldap_attrs: dn: olcDatabase={1}hdb,cn=config attributes: - olcSuffix: dc=example,dc=com + olcSuffix: dc=example,dc=com state: exact # The complex argument format is required here to pass a list of ACL strings. @@ -85,17 +80,17 @@ EXAMPLES = r''' community.general.ldap_attrs: dn: olcDatabase={1}hdb,cn=config attributes: - olcAccess: - - >- - {0}to attrs=userPassword,shadowLastChange - by self write - by anonymous auth - by dn="cn=admin,dc=example,dc=com" write - by * none' - - >- - {1}to dn.base="dc=example,dc=com" - by dn="cn=admin,dc=example,dc=com" write - by * read + olcAccess: + - >- + {0}to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,dc=example,dc=com" write + by * none' + - >- + {1}to dn.base="dc=example,dc=com" + by dn="cn=admin,dc=example,dc=com" write + by * read state: exact # An alternative approach with automatic X-ORDERED numbering @@ -103,41 +98,41 @@ EXAMPLES = r''' community.general.ldap_attrs: dn: olcDatabase={1}hdb,cn=config attributes: - olcAccess: - - >- - to attrs=userPassword,shadowLastChange - by self write - by anonymous auth - by dn="cn=admin,dc=example,dc=com" write - by * none' - - >- - to dn.base="dc=example,dc=com" - by dn="cn=admin,dc=example,dc=com" write - by * read - ordered: yes + olcAccess: + - >- + to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,dc=example,dc=com" write + by * none' + - >- + to dn.base="dc=example,dc=com" + by dn="cn=admin,dc=example,dc=com" write + by * read + ordered: true state: exact - name: Declare some indexes community.general.ldap_attrs: dn: olcDatabase={1}hdb,cn=config attributes: - olcDbIndex: - - objectClass eq - - uid eq + olcDbIndex: + - objectClass eq + - uid eq - name: Set up a root user, which we can use later to bootstrap the directory community.general.ldap_attrs: dn: olcDatabase={1}hdb,cn=config attributes: - olcRootDN: cn=root,dc=example,dc=com - olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND" + olcRootDN: cn=root,dc=example,dc=com + olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND" state: exact - name: Remove an attribute with a specific value community.general.ldap_attrs: dn: uid=jdoe,ou=people,dc=example,dc=com attributes: - description: "An example user account" + description: "An example user account" state: absent server_uri: ldap://localhost/ bind_dn: cn=admin,dc=example,dc=com @@ -147,32 +142,35 @@ EXAMPLES = r''' community.general.ldap_attrs: dn: uid=jdoe,ou=people,dc=example,dc=com attributes: - description: [] + description: [] state: exact server_uri: ldap://localhost/ bind_dn: cn=admin,dc=example,dc=com bind_pw: password -''' +""" -RETURN = r''' +RETURN = r""" modlist: - description: list of modified parameters + description: List of modified parameters. returned: success type: list - sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]' -''' + sample: + - [2, "olcRootDN", ["cn=root,dc=example,dc=com"]] +""" import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native, to_bytes -from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs +from ansible.module_utils.common.text.converters import to_native, to_bytes, to_text +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together + import re LDAP_IMP_ERR = None try: import ldap + import ldap.filter HAS_LDAP = True except ImportError: @@ -190,7 +188,7 @@ class LdapAttrs(LdapGeneric): self.ordered = self.module.params['ordered'] def _order_values(self, values): - """ Preprend X-ORDERED index numbers to attribute's values. """ + """ Prepend X-ORDERED index numbers to attribute's values. """ ordered_values = [] if isinstance(values, list): @@ -218,26 +216,38 @@ class LdapAttrs(LdapGeneric): def add(self): modlist = [] + new_attrs = {} for name, values in self.module.params['attributes'].items(): norm_values = self._normalize_values(values) + added_values = [] for value in norm_values: if self._is_value_absent(name, value): modlist.append((ldap.MOD_ADD, name, value)) - - return modlist + added_values.append(value) + if added_values: + new_attrs[name] = norm_values + return modlist, {}, new_attrs def delete(self): modlist = [] + old_attrs = {} + new_attrs = {} for name, values in self.module.params['attributes'].items(): norm_values = self._normalize_values(values) + removed_values = [] for value in norm_values: if self._is_value_present(name, value): + removed_values.append(value) modlist.append((ldap.MOD_DELETE, name, value)) - - return modlist + if removed_values: + old_attrs[name] = norm_values + new_attrs[name] = [value for value in norm_values if value not in removed_values] + return modlist, old_attrs, new_attrs def exact(self): modlist = [] + old_attrs = {} + new_attrs = {} for name, values in self.module.params['attributes'].items(): norm_values = self._normalize_values(values) try: @@ -255,15 +265,22 @@ class LdapAttrs(LdapGeneric): modlist.append((ldap.MOD_DELETE, name, None)) else: modlist.append((ldap.MOD_REPLACE, name, norm_values)) + old_attrs[name] = current + new_attrs[name] = norm_values + if len(current) == 1 and len(norm_values) == 1: + old_attrs[name] = current[0] + new_attrs[name] = norm_values[0] - return modlist + return modlist, old_attrs, new_attrs def _is_value_present(self, name, value): """ True if the target attribute has the given value. """ try: - is_present = bool( - self.connection.compare_s(self.dn, name, value)) - except ldap.NO_SUCH_ATTRIBUTE: + escaped_value = ldap.filter.escape_filter_chars(to_text(value)) + filterstr = "(%s=%s)" % (name, escaped_value) + dns = self.connection.search_s(self.dn, ldap.SCOPE_BASE, filterstr) + is_present = len(dns) == 1 + except ldap.NO_SUCH_OBJECT: is_present = False return is_present @@ -277,10 +294,11 @@ def main(): module = AnsibleModule( argument_spec=gen_specs( attributes=dict(type='dict', required=True), - ordered=dict(type='bool', default=False, required=False), + ordered=dict(type='bool', default=False), state=dict(type='str', default='present', choices=['absent', 'exact', 'present']), ), supports_check_mode=True, + required_together=ldap_required_together(), ) if not HAS_LDAP: @@ -289,16 +307,18 @@ def main(): # Instantiate the LdapAttr object ldap = LdapAttrs(module) + old_attrs = None + new_attrs = None state = module.params['state'] # Perform action if state == 'present': - modlist = ldap.add() + modlist, old_attrs, new_attrs = ldap.add() elif state == 'absent': - modlist = ldap.delete() + modlist, old_attrs, new_attrs = ldap.delete() elif state == 'exact': - modlist = ldap.exact() + modlist, old_attrs, new_attrs = ldap.exact() changed = False @@ -311,7 +331,7 @@ def main(): except Exception as e: module.fail_json(msg="Attribute action failed.", details=to_native(e)) - module.exit_json(changed=changed, modlist=modlist) + module.exit_json(changed=changed, modlist=modlist, diff={"before": old_attrs, "after": new_attrs}) if __name__ == '__main__': diff --git a/plugins/modules/net_tools/ldap/ldap_entry.py b/plugins/modules/ldap_entry.py similarity index 68% rename from plugins/modules/net_tools/ldap/ldap_entry.py rename to plugins/modules/ldap_entry.py index 24e121e521..05242304bd 100644 --- a/plugins/modules/net_tools/ldap/ldap_entry.py +++ b/plugins/modules/ldap_entry.py @@ -1,46 +1,45 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Peter Sagerson -# Copyright: (c) 2016, Jiri Tyr +# Copyright (c) 2016, Peter Sagerson +# Copyright (c) 2016, Jiri Tyr # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ldap_entry -short_description: Add or remove LDAP entries. +short_description: Add or remove LDAP entries description: - - Add or remove LDAP entries. This module only asserts the existence or - non-existence of an LDAP entry, not its attributes. To assert the - attribute values of an entry, see M(community.general.ldap_attrs). -notes: - - The default authentication settings will attempt to use a SASL EXTERNAL - bind over a UNIX domain socket. This works well with the default Ubuntu - install for example, which includes a cn=peercred,cn=external,cn=auth ACL - rule allowing root to modify the server configuration. If you need to use - a simple bind to access your server, pass the credentials in I(bind_dn) - and I(bind_pw). + - Add or remove LDAP entries. This module only asserts the existence or non-existence of an LDAP entry, not its attributes. + To assert the attribute values of an entry, see M(community.general.ldap_attrs). author: - Jiri Tyr (@jtyr) requirements: - python-ldap +attributes: + check_mode: + support: full + diff_mode: + support: none options: attributes: description: - - If I(state=present), attributes necessary to create an entry. Existing - entries are never modified. To assert specific attribute values on an - existing entry, use M(community.general.ldap_attrs) module instead. + - If O(state=present), attributes necessary to create an entry. Existing entries are never modified. To assert specific + attribute values on an existing entry, use M(community.general.ldap_attrs) module instead. + - Each attribute value can be a string for single-valued attributes or a list of strings for multi-valued attributes. + - If you specify values for this option in YAML, please note that you can improve readability for long string values + by using YAML block modifiers as seen in the examples for this module. + - Note that when using values that YAML/ansible-core interprets as other types, like V(yes), V(no) (booleans), or V(2.10) + (float), make sure to quote them if these are meant to be strings. Otherwise the wrong values may be sent to LDAP. type: dict + default: {} objectClass: description: - - If I(state=present), value or list of values to use when creating - the entry. It can either be a string or an actual list of - strings. + - If O(state=present), value or list of values to use when creating the entry. It can either be a string or an actual + list of strings. type: list elements: str state: @@ -51,18 +50,17 @@ options: type: str recursive: description: - - If I(state=delete), a flag indicating whether a single entry or the - whole branch must be deleted. + - If O(state=delete), a flag indicating whether a single entry or the whole branch must be deleted. type: bool default: false version_added: 4.6.0 extends_documentation_fragment: -- community.general.ldap.documentation - -''' + - community.general.ldap.documentation + - community.general.attributes +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Make sure we have a parent entry for users community.general.ldap_entry: dn: ou=users,dc=example,dc=com @@ -78,6 +76,29 @@ EXAMPLES = """ description: An LDAP administrator userPassword: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND" +- name: Set possible values for attributes elements + community.general.ldap_entry: + dn: cn=admin,dc=example,dc=com + objectClass: + - simpleSecurityObject + - organizationalRole + attributes: + description: An LDAP Administrator + roleOccupant: + - cn=Chocs Puddington,ou=Information Technology,dc=example,dc=com + - cn=Alice Stronginthebrain,ou=Information Technology,dc=example,dc=com + olcAccess: + - >- + {0}to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,dc=example,dc=com" write + by * none' + - >- + {1}to dn.base="dc=example,dc=com" + by dn="cn=admin,dc=example,dc=com" write + by * read + - name: Get rid of an old entry community.general.ldap_entry: dn: ou=stuff,dc=example,dc=com @@ -104,7 +125,7 @@ EXAMPLES = """ """ -RETURN = """ +RETURN = r""" # Default return values """ @@ -112,7 +133,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_native, to_bytes -from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together LDAP_IMP_ERR = None try: @@ -174,7 +195,7 @@ class LdapEntry(LdapGeneric): self.connection.delete_s(self.dn) def _delete_recursive(): - """ Attempt recurive deletion using the subtree-delete control. + """ Attempt recursive deletion using the subtree-delete control. If that fails, do it manually. """ try: subtree_delete = ldap.controls.ValueLessRequestControl('1.2.840.113556.1.4.805') @@ -216,6 +237,7 @@ def main(): ), required_if=[('state', 'present', ['objectClass'])], supports_check_mode=True, + required_together=ldap_required_together(), ) if not HAS_LDAP: diff --git a/plugins/modules/ldap_inc.py b/plugins/modules/ldap_inc.py new file mode 100644 index 0000000000..41d58dfb3f --- /dev/null +++ b/plugins/modules/ldap_inc.py @@ -0,0 +1,241 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Philippe Duveau +# Copyright (c) 2019, Maciej Delmanowski (ldap_attrs.py) +# Copyright (c) 2017, Alexander Korinek (ldap_attrs.py) +# Copyright (c) 2016, Peter Sagerson (ldap_attrs.py) +# Copyright (c) 2016, Jiri Tyr (ldap_attrs.py) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# The code of this module is derived from that of ldap_attrs.py + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ldap_inc +short_description: Use the Modify-Increment LDAP V3 feature to increment an attribute value +version_added: 10.2.0 +description: + - Atomically increments the value of an attribute and return its new value. +notes: + - When implemented by the directory server, the module uses the ModifyIncrement extension defined in L(RFC4525, https://www.rfc-editor.org/rfc/rfc4525.html) + and the control PostRead. This extension and the control are implemented in OpenLdap but not all directory servers implement + them. In this case, the module automatically uses a more classic method based on two phases, first the current value is + read then the modify operation remove the old value and add the new one in a single request. If the value has changed + by a concurrent call then the remove action fails. Then the sequence is retried 3 times before raising an error to the + playbook. In an heavy modification environment, the module does not guarante to be systematically successful. + - This only deals with integer attribute of an existing entry. To modify attributes of an entry, see M(community.general.ldap_attrs) + or to add or remove whole entries, see M(community.general.ldap_entry). +author: + - Philippe Duveau (@pduveau) +requirements: + - python-ldap +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + dn: + required: true + type: str + description: + - The DN entry containing the attribute to increment. + attribute: + required: true + type: str + description: + - The attribute to increment. + increment: + required: false + type: int + default: 1 + description: + - The value of the increment to apply. + method: + required: false + type: str + default: auto + choices: [auto, rfc4525, legacy] + description: + - If V(auto), the module determines automatically the method to use. + - If V(rfc4525) or V(legacy) force to use the corresponding method. +extends_documentation_fragment: + - community.general.ldap.documentation + - community.general.attributes +""" + + +EXAMPLES = r""" +- name: Increments uidNumber 1 Number for example.com + community.general.ldap_inc: + dn: "cn=uidNext,ou=unix-management,dc=example,dc=com" + attribute: "uidNumber" + increment: "1" + register: ldap_uidNumber_sequence + +- name: Modifies the user to define its identification number (uidNumber) when incrementation is successful + community.general.ldap_attrs: + dn: "cn=john,ou=posix-users,dc=example,dc=com" + state: present + attributes: + - uidNumber: "{{ ldap_uidNumber_sequence.value }}" + when: ldap_uidNumber_sequence.incremented +""" + + +RETURN = r""" +incremented: + description: + - It is set to V(true) if the attribute value has changed. + returned: success + type: bool + sample: true + +attribute: + description: + - The name of the attribute that was incremented. + returned: success + type: str + sample: uidNumber + +value: + description: + - The new value after incrementing. + returned: success + type: str + sample: "2" + +rfc4525: + description: + - Is V(true) if the method used to increment is based on RFC4525, V(false) if legacy. + returned: success + type: bool + sample: true +""" + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native, to_bytes +from ansible_collections.community.general.plugins.module_utils import deps +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together + +with deps.declare("ldap", reason=missing_required_lib('python-ldap')): + import ldap + import ldap.controls.readentry + + +class LdapInc(LdapGeneric): + def __init__(self, module): + LdapGeneric.__init__(self, module) + # Shortcuts + self.attr = self.module.params['attribute'] + self.increment = self.module.params['increment'] + self.method = self.module.params['method'] + + def inc_rfc4525(self): + return [(ldap.MOD_INCREMENT, self.attr, [to_bytes(str(self.increment))])] + + def inc_legacy(self, curr_val, new_val): + return [(ldap.MOD_DELETE, self.attr, [to_bytes(curr_val)]), + (ldap.MOD_ADD, self.attr, [to_bytes(new_val)])] + + def serverControls(self): + return [ldap.controls.readentry.PostReadControl(attrList=[self.attr])] + + LDAP_MOD_INCREMENT = to_bytes("1.3.6.1.1.14") + + +def main(): + module = AnsibleModule( + argument_spec=gen_specs( + attribute=dict(type='str', required=True), + increment=dict(type='int', default=1), + method=dict(type='str', default='auto', choices=['auto', 'rfc4525', 'legacy']), + ), + supports_check_mode=True, + required_together=ldap_required_together(), + ) + + deps.validate(module) + + # Instantiate the LdapAttr object + mod = LdapInc(module) + + changed = False + ret = "" + rfc4525 = False + + try: + if mod.increment != 0 and not module.check_mode: + changed = True + + if mod.method != "auto": + rfc4525 = mod.method == "rfc425" + else: + rootDSE = mod.connection.search_ext_s( + base="", + scope=ldap.SCOPE_BASE, + attrlist=["*", "+"]) + if len(rootDSE) == 1: + if to_bytes(ldap.CONTROL_POST_READ) in rootDSE[0][1]["supportedControl"] and ( + mod.LDAP_MOD_INCREMENT in rootDSE[0][1]["supportedFeatures"] or + mod.LDAP_MOD_INCREMENT in rootDSE[0][1]["supportedExtension"] + ): + rfc4525 = True + + if rfc4525: + dummy, dummy, dummy, resp_ctrls = mod.connection.modify_ext_s( + dn=mod.dn, + modlist=mod.inc_rfc4525(), + serverctrls=mod.serverControls(), + clientctrls=None) + if len(resp_ctrls) == 1: + ret = resp_ctrls[0].entry[mod.attr][0] + + else: + tries = 0 + max_tries = 3 + while tries < max_tries: + tries = tries + 1 + result = mod.connection.search_ext_s( + base=mod.dn, + scope=ldap.SCOPE_BASE, + filterstr="(%s=*)" % mod.attr, + attrlist=[mod.attr]) + if len(result) != 1: + module.fail_json(msg="The entry does not exist or does not contain the specified attribute.") + return + try: + ret = str(int(result[0][1][mod.attr][0]) + mod.increment) + # if the current value first arg in inc_legacy has changed then the modify will fail + mod.connection.modify_s( + dn=mod.dn, + modlist=mod.inc_legacy(result[0][1][mod.attr][0], ret)) + break + except ldap.NO_SUCH_ATTRIBUTE: + if tries == max_tries: + module.fail_json(msg="The increment could not be applied after " + str(max_tries) + " tries.") + return + + else: + result = mod.connection.search_ext_s( + base=mod.dn, + scope=ldap.SCOPE_BASE, + filterstr="(%s=*)" % mod.attr, + attrlist=[mod.attr]) + if len(result) == 1: + ret = str(int(result[0][1][mod.attr][0]) + mod.increment) + changed = mod.increment != 0 + else: + module.fail_json(msg="The entry does not exist or does not contain the specified attribute.") + + except Exception as e: + module.fail_json(msg="Attribute action failed.", details=to_native(e)) + + module.exit_json(changed=changed, incremented=changed, attribute=mod.attr, value=ret, rfc4525=rfc4525) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/ldap/ldap_passwd.py b/plugins/modules/ldap_passwd.py similarity index 68% rename from plugins/modules/net_tools/ldap/ldap_passwd.py rename to plugins/modules/ldap_passwd.py index 8d86ee93fc..86cd923c95 100644 --- a/plugins/modules/net_tools/ldap/ldap_passwd.py +++ b/plugins/modules/ldap_passwd.py @@ -1,43 +1,38 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017-2018, Keller Fuchs -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017-2018, Keller Fuchs +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ldap_passwd -short_description: Set passwords in LDAP. +short_description: Set passwords in LDAP description: - - Set a password for an LDAP entry. This module only asserts that - a given password is valid for a given entry. To assert the - existence of an entry, see M(community.general.ldap_entry). -notes: - - The default authentication settings will attempt to use a SASL EXTERNAL - bind over a UNIX domain socket. This works well with the default Ubuntu - install for example, which includes a cn=peercred,cn=external,cn=auth ACL - rule allowing root to modify the server configuration. If you need to use - a simple bind to access your server, pass the credentials in I(bind_dn) - and I(bind_pw). + - Set a password for an LDAP entry. This module only asserts that a given password is valid for a given entry. To assert + the existence of an entry, see M(community.general.ldap_entry). author: - Keller Fuchs (@KellerFuchs) requirements: - python-ldap +attributes: + check_mode: + support: full + diff_mode: + support: none options: passwd: description: - - The (plaintext) password to be set for I(dn). + - The (plaintext) password to be set for O(dn). type: str extends_documentation_fragment: -- community.general.ldap.documentation + - community.general.ldap.documentation + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Set a password for the admin user community.general.ldap_passwd: dn: cn=admin,dc=example,dc=com @@ -49,22 +44,23 @@ EXAMPLES = """ passwd: "{{ item.value }}" with_dict: alice: alice123123 - bob: "|30b!" + bob: "|30b!" admin: "{{ vault_secret }}" """ -RETURN = """ +RETURN = r""" modlist: - description: list of modified parameters + description: List of modified parameters. returned: success type: list - sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]' + sample: + - [2, "olcRootDN", ["cn=root,dc=example,dc=com"]] """ import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together LDAP_IMP_ERR = None try: @@ -125,6 +121,7 @@ def main(): module = AnsibleModule( argument_spec=gen_specs(passwd=dict(no_log=True)), supports_check_mode=True, + required_together=ldap_required_together(), ) if not HAS_LDAP: diff --git a/plugins/modules/ldap_search.py b/plugins/modules/ldap_search.py new file mode 100644 index 0000000000..d7d1a9bbcf --- /dev/null +++ b/plugins/modules/ldap_search.py @@ -0,0 +1,241 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Peter Sagerson +# Copyright (c) 2020, Sebastian Pfahl +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ldap_search +version_added: '0.2.0' +short_description: Search for entries in a LDAP server +description: + - Return the results of an LDAP search. +author: + - Sebastian Pfahl (@eryx12o45) +requirements: + - python-ldap +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + dn: + required: true + type: str + description: + - The LDAP DN to search in. + scope: + choices: [base, onelevel, subordinate, children] + default: base + type: str + description: + - The LDAP scope to use. + - V(subordinate) requires the LDAPv3 subordinate feature extension. + - V(children) is equivalent to a "subtree" scope. + filter: + default: '(objectClass=*)' + type: str + description: + - Used for filtering the LDAP search result. + attrs: + type: list + elements: str + description: + - A list of attributes for limiting the result. Use an actual list or a comma-separated string. + schema: + default: false + type: bool + description: + - Set to V(true) to return the full attribute schema of entries, not their attribute values. Overrides O(attrs) when + provided. + page_size: + default: 0 + type: int + description: + - The page size when performing a simple paged result search (RFC 2696). This setting can be tuned to reduce issues + with timeouts and server limits. + - Setting the page size to V(0) (default) disables paged searching. + version_added: 7.1.0 + base64_attributes: + description: + - If provided, all attribute values returned that are listed in this option are Base64 encoded. + - If the special value V(*) appears in this list, all attributes are Base64 encoded. + - All other attribute values are converted to UTF-8 strings. If they contain binary data, please note that invalid UTF-8 + bytes are omitted. + type: list + elements: str + version_added: 7.0.0 +extends_documentation_fragment: + - community.general.ldap.documentation + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Return all entries within the 'groups' organizational unit. + community.general.ldap_search: + dn: "ou=groups,dc=example,dc=com" + register: ldap_groups + +- name: Return GIDs for all groups + community.general.ldap_search: + dn: "ou=groups,dc=example,dc=com" + scope: "onelevel" + attrs: + - "gidNumber" + register: ldap_group_gids +""" + +# @FIXME RV 'results' is meant to be used when 'loop:' was used with the module. +RESULTS = r""" +results: + description: + - For every entry found, one dictionary is returned. + - Every dictionary contains a key C(dn) with the entry's DN as a value. + - Every attribute of the entry found is added to the dictionary. If the key has precisely one value, that value is taken + directly, otherwise the key's value is a list. + - Note that all values (for single-element lists) and list elements (for multi-valued lists) are UTF-8 strings. Some might + contain Base64-encoded binary data; which ones is determined by the O(base64_attributes) option. + type: list + elements: dict +""" + +import base64 +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text +from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together + +LDAP_IMP_ERR = None +try: + import ldap + + HAS_LDAP = True +except ImportError: + LDAP_IMP_ERR = traceback.format_exc() + HAS_LDAP = False + + +def main(): + module = AnsibleModule( + argument_spec=gen_specs( + dn=dict(type='str', required=True), + scope=dict(type='str', default='base', choices=['base', 'onelevel', 'subordinate', 'children']), + filter=dict(type='str', default='(objectClass=*)'), + attrs=dict(type='list', elements='str'), + schema=dict(type='bool', default=False), + page_size=dict(type='int', default=0), + base64_attributes=dict(type='list', elements='str'), + ), + supports_check_mode=True, + required_together=ldap_required_together(), + ) + + if not HAS_LDAP: + module.fail_json(msg=missing_required_lib('python-ldap'), + exception=LDAP_IMP_ERR) + + try: + LdapSearch(module).main() + except Exception as exception: + module.fail_json(msg="Attribute action failed.", details=to_native(exception)) + + +def _normalize_string(val, convert_to_base64): + if isinstance(val, (str, bytes)): + if isinstance(val, str): + val = to_bytes(val, encoding='utf-8') + if convert_to_base64: + val = to_text(base64.b64encode(val)) + else: + # See https://github.com/ansible/ansible/issues/80258#issuecomment-1477038952 for details. + # We want to make sure that all strings are properly UTF-8 encoded, even if they were not, + # or happened to be byte strings. + val = to_text(val, 'utf-8', errors='replace') + # See also https://github.com/ansible-collections/community.general/issues/5704. + return val + + +def _extract_entry(dn, attrs, base64_attributes): + extracted = {'dn': dn} + for attr, val in list(attrs.items()): + convert_to_base64 = '*' in base64_attributes or attr in base64_attributes + if len(val) == 1: + extracted[attr] = _normalize_string(val[0], convert_to_base64) + else: + extracted[attr] = [_normalize_string(v, convert_to_base64) for v in val] + return extracted + + +class LdapSearch(LdapGeneric): + def __init__(self, module): + LdapGeneric.__init__(self, module) + + self.filterstr = self.module.params['filter'] + self.attrlist = [] + self.page_size = self.module.params['page_size'] + self._load_scope() + self._load_attrs() + self._load_schema() + self._base64_attributes = set(self.module.params['base64_attributes'] or []) + + def _load_schema(self): + self.schema = self.module.params['schema'] + if self.schema: + self.attrsonly = 1 + else: + self.attrsonly = 0 + + def _load_scope(self): + spec = dict( + base=ldap.SCOPE_BASE, + onelevel=ldap.SCOPE_ONELEVEL, + subordinate=ldap.SCOPE_SUBORDINATE, + children=ldap.SCOPE_SUBTREE, + ) + self.scope = spec[self.module.params['scope']] + + def _load_attrs(self): + self.attrlist = self.module.params['attrs'] or None + + def main(self): + results = self.perform_search() + self.module.exit_json(changed=False, results=results) + + def perform_search(self): + ldap_entries = [] + controls = [] + if self.page_size > 0: + controls.append(ldap.controls.libldap.SimplePagedResultsControl(True, size=self.page_size, cookie='')) + try: + while True: + response = self.connection.search_ext( + self.dn, + self.scope, + filterstr=self.filterstr, + attrlist=self.attrlist, + attrsonly=self.attrsonly, + serverctrls=controls, + ) + rtype, results, rmsgid, serverctrls = self.connection.result3(response) + for result in results: + if isinstance(result[1], dict): + if self.schema: + ldap_entries.append(dict(dn=result[0], attrs=list(result[1].keys()))) + else: + ldap_entries.append(_extract_entry(result[0], result[1], self._base64_attributes)) + cookies = [c.cookie for c in serverctrls if c.controlType == ldap.controls.libldap.SimplePagedResultsControl.controlType] + if self.page_size > 0 and cookies and cookies[0]: + controls[0].cookie = cookies[0] + else: + return ldap_entries + except ldap.NO_SUCH_OBJECT: + self.module.fail_json(msg="Base not found: {0}".format(self.dn)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/librato_annotation.py b/plugins/modules/librato_annotation.py similarity index 55% rename from plugins/modules/monitoring/librato_annotation.py rename to plugins/modules/librato_annotation.py index 6fcabcf34e..2118d95051 100644 --- a/plugins/modules/monitoring/librato_annotation.py +++ b/plugins/modules/librato_annotation.py @@ -1,74 +1,82 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# (C) Seth Edwards, 2014 -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Seth Edwards, 2014 +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: librato_annotation -short_description: create an annotation in librato +short_description: Create an annotation in Librato description: - - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically + - Create an annotation event on the given annotation stream O(name). If the annotation stream does not exist, it creates + one automatically. author: "Seth Edwards (@Sedward)" requirements: [] +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - user: - type: str - description: - - Librato account username - required: true - api_key: - type: str - description: - - Librato account api key - required: true - name: - type: str - description: - - The annotation stream name - - If the annotation stream does not exist, it will be created automatically - required: false - title: - type: str - description: - - The title of an annotation is a string and may contain spaces - - The title should be a short, high-level summary of the annotation e.g. v45 Deployment - required: true - source: - type: str - description: - - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population - required: false + user: + type: str description: - type: str - description: - - The description contains extra metadata about a particular annotation - - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo! - required: false - start_time: - type: int - description: - - The unix timestamp indicating the time at which the event referenced by this annotation started - required: false - end_time: - type: int - description: - - The unix timestamp indicating the time at which the event referenced by this annotation ended - - For events that have a duration, this is a useful way to annotate the duration of the event - required: false - links: - type: list - elements: dict - description: - - See examples -''' + - Librato account username. + required: true + api_key: + type: str + description: + - Librato account API key. + required: true + name: + type: str + description: + - The annotation stream name. + - If the annotation stream does not exist, it creates one automatically. + required: false + title: + type: str + description: + - The title of an annotation is a string and may contain spaces. + - The title should be a short, high-level summary of the annotation for example V(v45 Deployment). + required: true + source: + type: str + description: + - A string which describes the originating source of an annotation when that annotation is tracked across multiple members + of a population. + required: false + description: + type: str + description: + - The description contains extra metadata about a particular annotation. + - The description should contain specifics on the individual annotation for example V(Deployed 9b562b2 shipped new feature + foo!). + required: false + start_time: + type: int + description: + - The unix timestamp indicating the time at which the event referenced by this annotation started. + required: false + end_time: + type: int + description: + - The unix timestamp indicating the time at which the event referenced by this annotation ended. + - For events that have a duration, this is a useful way to annotate the duration of the event. + required: false + links: + type: list + elements: dict + description: + - See examples. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a simple annotation event with a source community.general.librato_annotation: user: user@example.com @@ -97,7 +105,7 @@ EXAMPLES = ''' description: This is a detailed description of maintenance start_time: 1395940006 end_time: 1395954406 -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url @@ -150,12 +158,12 @@ def main(): argument_spec=dict( user=dict(required=True), api_key=dict(required=True, no_log=True), - name=dict(required=False), + name=dict(), title=dict(required=True), - source=dict(required=False), - description=dict(required=False), - start_time=dict(required=False, default=None, type='int'), - end_time=dict(required=False, default=None, type='int'), + source=dict(), + description=dict(), + start_time=dict(type='int'), + end_time=dict(type='int'), links=dict(type='list', elements='dict') ) ) diff --git a/plugins/modules/cloud/linode/linode.py b/plugins/modules/linode.py similarity index 82% rename from plugins/modules/cloud/linode/linode.py rename to plugins/modules/linode.py index 8c29e52a21..e4e27bf0d4 100644 --- a/plugins/modules/cloud/linode/linode.py +++ b/plugins/modules/linode.py @@ -1,48 +1,53 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: linode short_description: Manage instances on the Linode Public Cloud description: - - Manage Linode Public Cloud instances and optionally wait for it to be 'running'. + - Manage Linode Public Cloud instances and optionally wait for it to be 'running'. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: state: description: - - Indicate desired state of the resource - choices: [ absent, active, deleted, present, restarted, started, stopped ] + - Indicate desired state of the resource. + choices: [absent, active, deleted, present, restarted, started, stopped] default: present type: str api_key: description: - - Linode API key. - - C(LINODE_API_KEY) env variable can be used instead. + - Linode API key. + - E(LINODE_API_KEY) environment variable can be used instead. type: str - required: yes + required: true name: description: - - Name to give the instance (alphanumeric, dashes, underscore). - - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-). + - Name to give the instance (alphanumeric, dashes, underscore). + - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-). required: true type: str displaygroup: description: - - Add the instance to a Display Group in Linode Manager. + - Add the instance to a Display Group in Linode Manager. type: str + default: '' linode_id: description: - - Unique ID of a linode server. This value is read-only in the sense that - if you specify it on creation of a Linode it will not be used. The - Linode API generates these IDs and we can those generated value here to - reference a Linode more specifically. This is useful for idempotence. - aliases: [ lid ] + - Unique ID of a Linode server. This value is read-only in the sense that if you specify it on creation of a Linode + it is not used. The Linode API generates these IDs and we can those generated value here to reference a Linode more + specifically. This is useful for idempotency. + aliases: [lid] type: int additional_disks: description: @@ -52,120 +57,118 @@ options: elements: dict alert_bwin_enabled: description: - - Set status of bandwidth in alerts. + - Set status of bandwidth in alerts. type: bool alert_bwin_threshold: description: - - Set threshold in MB of bandwidth in alerts. + - Set threshold in MB of bandwidth in alerts. type: int alert_bwout_enabled: description: - - Set status of bandwidth out alerts. + - Set status of bandwidth out alerts. type: bool alert_bwout_threshold: description: - - Set threshold in MB of bandwidth out alerts. + - Set threshold in MB of bandwidth out alerts. type: int alert_bwquota_enabled: description: - - Set status of bandwidth quota alerts as percentage of network transfer quota. + - Set status of bandwidth quota alerts as percentage of network transfer quota. type: bool alert_bwquota_threshold: description: - - Set threshold in MB of bandwidth quota alerts. + - Set threshold in MB of bandwidth quota alerts. type: int alert_cpu_enabled: description: - - Set status of receiving CPU usage alerts. + - Set status of receiving CPU usage alerts. type: bool alert_cpu_threshold: description: - - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total. + - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total. type: int alert_diskio_enabled: description: - - Set status of receiving disk IO alerts. + - Set status of receiving disk IO alerts. type: bool alert_diskio_threshold: description: - - Set threshold for average IO ops/sec over 2 hour period. + - Set threshold for average IO ops/sec over 2 hour period. type: int backupweeklyday: description: - - Day of the week to take backups. + - Day of the week to take backups. type: int backupwindow: description: - - The time window in which backups will be taken. + - The time window in which backups are taken. type: int plan: description: - - plan to use for the instance (Linode plan) + - Plan to use for the instance (Linode plan). type: int payment_term: description: - - payment term to use for the instance (payment term in months) + - Payment term to use for the instance (payment term in months). default: 1 - choices: [ 1, 12, 24 ] + choices: [1, 12, 24] type: int password: description: - - root password to apply to a new server (auto generated if missing) + - Root password to apply to a new server (auto generated if missing). type: str private_ip: description: - - Add private IPv4 address when Linode is created. - - Default is C(false). + - Add private IPv4 address when Linode is created. + - Default is V(false). type: bool ssh_pub_key: description: - - SSH public key applied to root user + - SSH public key applied to root user. type: str swap: description: - - swap size in MB + - Swap size in MB. default: 512 type: int distribution: description: - - distribution to use for the instance (Linode Distribution) + - Distribution to use for the instance (Linode Distribution). type: int datacenter: description: - - datacenter to create an instance in (Linode Datacenter) + - Datacenter to create an instance in (Linode Datacenter). type: int kernel_id: description: - - kernel to use for the instance (Linode Kernel) + - Kernel to use for the instance (Linode Kernel). type: int wait: description: - - wait for the instance to be in state C(running) before returning + - Wait for the instance to be in state V(running) before returning. type: bool default: true wait_timeout: description: - - how long before wait gives up, in seconds + - How long before wait gives up, in seconds. default: 300 type: int watchdog: description: - - Set status of Lassie watchdog. + - Set status of Lassie watchdog. type: bool - default: "True" + default: true requirements: - - python >= 2.6 - - linode-python + - linode-python author: -- Vincent Viallet (@zbal) + - Vincent Viallet (@zbal) notes: - Please note, linode-python does not have python 3 support. - This module uses the now deprecated v3 of the Linode API. - Please review U(https://www.linode.com/api/linode) for determining the required parameters. -''' - -EXAMPLES = ''' +""" +EXAMPLES = r""" - name: Create a new Linode community.general.linode: name: linode-test1 @@ -177,97 +180,97 @@ EXAMPLES = ''' - name: Create a server with a private IP Address community.general.linode: - module: linode - api_key: 'longStringFromLinodeApi' - name: linode-test1 - plan: 1 - datacenter: 2 - distribution: 99 - password: 'superSecureRootPassword' - private_ip: yes - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: yes - wait_timeout: 600 - state: present + module: linode + api_key: 'longStringFromLinodeApi' + name: linode-test1 + plan: 1 + datacenter: 2 + distribution: 99 + password: 'superSecureRootPassword' + private_ip: true + ssh_pub_key: 'ssh-rsa qwerty' + swap: 768 + wait: true + wait_timeout: 600 + state: present delegate_to: localhost register: linode_creation - name: Fully configure new server community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - plan: 4 - datacenter: 2 - distribution: 99 - kernel_id: 138 - password: 'superSecureRootPassword' - private_ip: yes - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: yes - wait_timeout: 600 - state: present - alert_bwquota_enabled: True - alert_bwquota_threshold: 80 - alert_bwin_enabled: True - alert_bwin_threshold: 10 - alert_cpu_enabled: True - alert_cpu_threshold: 210 - alert_bwout_enabled: True - alert_bwout_threshold: 10 - alert_diskio_enabled: True - alert_diskio_threshold: 10000 - backupweeklyday: 1 - backupwindow: 2 - displaygroup: 'test' - additional_disks: + api_key: 'longStringFromLinodeApi' + name: linode-test1 + plan: 4 + datacenter: 2 + distribution: 99 + kernel_id: 138 + password: 'superSecureRootPassword' + private_ip: true + ssh_pub_key: 'ssh-rsa qwerty' + swap: 768 + wait: true + wait_timeout: 600 + state: present + alert_bwquota_enabled: true + alert_bwquota_threshold: 80 + alert_bwin_enabled: true + alert_bwin_threshold: 10 + alert_cpu_enabled: true + alert_cpu_threshold: 210 + alert_bwout_enabled: true + alert_bwout_threshold: 10 + alert_diskio_enabled: true + alert_diskio_threshold: 10000 + backupweeklyday: 1 + backupwindow: 2 + displaygroup: 'test' + additional_disks: - {Label: 'disk1', Size: 2500, Type: 'raw'} - {Label: 'newdisk', Size: 2000} - watchdog: True + watchdog: true delegate_to: localhost register: linode_creation - name: Ensure a running server (create if missing) community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - plan: 1 - datacenter: 2 - distribution: 99 - password: 'superSecureRootPassword' - ssh_pub_key: 'ssh-rsa qwerty' - swap: 768 - wait: yes - wait_timeout: 600 - state: present + api_key: 'longStringFromLinodeApi' + name: linode-test1 + plan: 1 + datacenter: 2 + distribution: 99 + password: 'superSecureRootPassword' + ssh_pub_key: 'ssh-rsa qwerty' + swap: 768 + wait: true + wait_timeout: 600 + state: present delegate_to: localhost register: linode_creation - name: Delete a server community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: "{{ linode_creation.instance.id }}" - state: absent + api_key: 'longStringFromLinodeApi' + name: linode-test1 + linode_id: "{{ linode_creation.instance.id }}" + state: absent delegate_to: localhost - name: Stop a server community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: "{{ linode_creation.instance.id }}" - state: stopped + api_key: 'longStringFromLinodeApi' + name: linode-test1 + linode_id: "{{ linode_creation.instance.id }}" + state: stopped delegate_to: localhost - name: Reboot a server community.general.linode: - api_key: 'longStringFromLinodeApi' - name: linode-test1 - linode_id: "{{ linode_creation.instance.id }}" - state: restarted + api_key: 'longStringFromLinodeApi' + name: linode-test1 + linode_id: "{{ linode_creation.instance.id }}" + state: restarted delegate_to: localhost -''' +""" import time import traceback @@ -662,7 +665,7 @@ def main(): backupwindow=backupwindow, ) - kwargs = dict((k, v) for k, v in check_items.items() if v is not None) + kwargs = {k: v for k, v in check_items.items() if v is not None} # setup the auth try: diff --git a/plugins/modules/cloud/linode/linode_v4.py b/plugins/modules/linode_v4.py similarity index 70% rename from plugins/modules/cloud/linode/linode_v4.py rename to plugins/modules/linode_v4.py index fcf3725bfc..6f0cac84d6 100644 --- a/plugins/modules/cloud/linode/linode_v4.py +++ b/plugins/modules/linode_v4.py @@ -1,76 +1,70 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ -# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: linode_v4 -short_description: Manage instances on the Linode cloud. +short_description: Manage instances on the Linode cloud description: Manage instances on the Linode cloud. requirements: - - python >= 2.7 - linode_api4 >= 2.0.0 author: - Luke Murphy (@decentral1se) notes: - - No Linode resizing is currently implemented. This module will, in time, - replace the current Linode module which uses deprecated API bindings on the - Linode side. + - No Linode resizing is currently implemented. This module aims to replace the current Linode module which uses deprecated + API bindings on the Linode side. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: region: description: - - The region of the instance. This is a required parameter only when - creating Linode instances. See - U(https://www.linode.com/docs/api/regions/). + - The region of the instance. This is a required parameter only when creating Linode instances. See U(https://www.linode.com/docs/api/regions/). type: str image: description: - - The image of the instance. This is a required parameter only when - creating Linode instances. See - U(https://www.linode.com/docs/api/images/). + - The image of the instance. This is a required parameter only when creating Linode instances. + - See U(https://www.linode.com/docs/api/images/). type: str type: description: - - The type of the instance. This is a required parameter only when - creating Linode instances. See - U(https://www.linode.com/docs/api/linode-types/). + - The type of the instance. This is a required parameter only when creating Linode instances. + - See U(https://www.linode.com/docs/api/linode-types/). type: str label: description: - - The instance label. This label is used as the main determiner for - idempotence for the module and is therefore mandatory. + - The instance label. This label is used as the main determiner for idempotency for the module and is therefore mandatory. type: str required: true group: description: - - The group that the instance should be marked under. Please note, that - group labelling is deprecated but still supported. The encouraged - method for marking instances is to use tags. + - The group that the instance should be marked under. Please note, that group labelling is deprecated but still supported. + The encouraged method for marking instances is to use tags. type: str private_ip: description: - - If C(true), the created Linode will have private networking enabled and - assigned a private IPv4 address. + - If V(true), the created Linode instance has private networking enabled and assigned a private IPv4 address. type: bool default: false version_added: 3.0.0 tags: description: - - The tags that the instance should be marked under. See - U(https://www.linode.com/docs/api/tags/). + - The tags that the instance should be marked under. + - See U(https://www.linode.com/docs/api/tags/). type: list elements: str root_pass: description: - - The password for the root user. If not specified, one will be - generated. This generated password will be available in the task - success JSON. + - The password for the root user. If not specified, it generates a new one. This generated password is available in + the task success JSON. type: str authorized_keys: description: @@ -82,33 +76,31 @@ options: - The desired instance state. type: str choices: - - present - - absent + - present + - absent required: true access_token: description: - - The Linode API v4 access token. It may also be specified by exposing - the C(LINODE_ACCESS_TOKEN) environment variable. See - U(https://www.linode.com/docs/api#access-and-authentication). + - The Linode API v4 access token. It may also be specified by exposing the E(LINODE_ACCESS_TOKEN) environment variable. + - See U(https://www.linode.com/docs/api#access-and-authentication). required: true type: str stackscript_id: description: - The numeric ID of the StackScript to use when creating the instance. - See U(https://www.linode.com/docs/api/stackscripts/). + - See U(https://www.linode.com/docs/api/stackscripts/). type: int version_added: 1.3.0 stackscript_data: description: - - An object containing arguments to any User Defined Fields present in - the StackScript used when creating the instance. - Only valid when a stackscript_id is provided. - See U(https://www.linode.com/docs/api/stackscripts/). + - An object containing arguments to any User Defined Fields present in the StackScript used when creating the instance. + Only valid when a O(stackscript_id) is provided. + - See U(https://www.linode.com/docs/api/stackscripts/). type: dict version_added: 1.3.0 -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Create a new Linode. community.general.linode_v4: label: new-linode @@ -129,50 +121,51 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" instance: description: The instance description in JSON serialized form. returned: Always. type: dict - sample: { - "root_pass": "foobar", # if auto-generated - "alerts": { - "cpu": 90, - "io": 10000, - "network_in": 10, - "network_out": 10, - "transfer_quota": 80 - }, - "backups": { - "enabled": false, - "schedule": { - "day": null, - "window": null - } - }, - "created": "2018-09-26T08:12:33", - "group": "Foobar Group", - "hypervisor": "kvm", - "id": 10480444, - "image": "linode/centos7", - "ipv4": [ - "130.132.285.233" - ], - "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64", - "label": "lin-foo", - "region": "eu-west", - "specs": { - "disk": 25600, - "memory": 1024, - "transfer": 1000, - "vcpus": 1 - }, - "status": "running", - "tags": [], - "type": "g6-nanode-1", - "updated": "2018-09-26T10:10:14", - "watchdog_enabled": true - } + sample: + { + "root_pass": "foobar", # if auto-generated + "alerts": { + "cpu": 90, + "io": 10000, + "network_in": 10, + "network_out": 10, + "transfer_quota": 80 + }, + "backups": { + "enabled": false, + "schedule": { + "day": null, + "window": null + } + }, + "created": "2018-09-26T08:12:33", + "group": "Foobar Group", + "hypervisor": "kvm", + "id": 10480444, + "image": "linode/centos7", + "ipv4": [ + "130.132.285.233" + ], + "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64", + "label": "lin-foo", + "region": "eu-west", + "specs": { + "disk": 25600, + "memory": 1024, + "transfer": 1000, + "vcpus": 1 + }, + "status": "running", + "tags": [], + "type": "g6-nanode-1", + "updated": "2018-09-26T10:10:14", + "watchdog_enabled": true + } """ import traceback diff --git a/plugins/modules/system/listen_ports_facts.py b/plugins/modules/listen_ports_facts.py similarity index 54% rename from plugins/modules/system/listen_ports_facts.py rename to plugins/modules/listen_ports_facts.py index 40adeb9e16..11b364ad4b 100644 --- a/plugins/modules/system/listen_ports_facts.py +++ b/plugins/modules/listen_ports_facts.py @@ -1,40 +1,49 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: (c) 2017, Nathan Davison -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Nathan Davison +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: listen_ports_facts author: - - Nathan Davison (@ndavison) + - Nathan Davison (@ndavison) description: - - Gather facts on processes listening on TCP and UDP ports using the C(netstat) or C(ss) commands. - - This module currently supports Linux only. + - Gather facts on processes listening on TCP and UDP ports using the C(netstat) or C(ss) commands. + - This module currently supports Linux only. requirements: - netstat or ss -short_description: Gather facts on processes listening on TCP and UDP ports. +short_description: Gather facts on processes listening on TCP and UDP ports notes: - - | - C(ss) returns all processes for each listen address and port. - This plugin will return each of them, so multiple entries for the same listen address and port are likely in results. + - C(ss) returns all processes for each listen address and port. + - This plugin returns each of them, so multiple entries for the same listen address and port are likely in results. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module options: command: description: - Override which command to use for fetching listen ports. - - 'By default module will use first found supported command on the system (in alphanumerical order).' + - By default module uses first found supported command on the system (in alphanumerical order). type: str choices: - netstat - ss version_added: 4.1.0 -''' + include_non_listening: + description: + - Show both listening and non-listening sockets (for TCP this means established connections). + - Adds the return values RV(ansible_facts.tcp_listen[].state), RV(ansible_facts.udp_listen[].state), RV(ansible_facts.tcp_listen[].foreign_address), + and RV(ansible_facts.udp_listen[].foreign_address) to the returned facts. + type: bool + default: false + version_added: 5.4.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts on listening ports community.general.listen_ports_facts: @@ -59,11 +68,16 @@ EXAMPLES = r''' - name: List all ports ansible.builtin.debug: msg: "{{ (ansible_facts.tcp_listen + ansible_facts.udp_listen) | map(attribute='port') | unique | sort | list }}" -''' -RETURN = r''' +- name: Gather facts on all ports and override which command to use + community.general.listen_ports_facts: + command: 'netstat' + include_non_listening: true +""" + +RETURN = r""" ansible_facts: - description: Dictionary containing details of TCP and UDP ports with listening servers + description: Dictionary containing details of TCP and UDP ports with listening servers. returned: always type: complex contains: @@ -77,6 +91,18 @@ ansible_facts: returned: always type: str sample: "0.0.0.0" + foreign_address: + description: The address of the remote end of the socket. + returned: if O(include_non_listening=true) + type: str + sample: "10.80.0.1" + version_added: 5.4.0 + state: + description: The state of the socket. + returned: if O(include_non_listening=true) + type: str + sample: "ESTABLISHED" + version_added: 5.4.0 name: description: The name of the listening process. returned: if user permissions allow @@ -117,6 +143,18 @@ ansible_facts: returned: always type: str sample: "0.0.0.0" + foreign_address: + description: The address of the remote end of the socket. + returned: if O(include_non_listening=true) + type: str + sample: "10.80.0.1" + version_added: 5.4.0 + state: + description: The state of the socket. UDP is a connectionless protocol. Shows UCONN or ESTAB. + returned: if O(include_non_listening=true) + type: str + sample: "UCONN" + version_added: 5.4.0 name: description: The name of the listening process. returned: if user permissions allow @@ -147,7 +185,7 @@ ansible_facts: returned: always type: str sample: "root" -''' +""" import re import platform @@ -155,47 +193,82 @@ from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.basic import AnsibleModule +def split_pid_name(pid_name): + """ + Split the entry PID/Program name into the PID (int) and the name (str) + :param pid_name: PID/Program String separated with a dash. E.g 51/sshd: returns pid = 51 and name = sshd + :return: PID (int) and the program name (str) + """ + try: + pid, name = pid_name.split("/", 1) + except ValueError: + # likely unprivileged user, so add empty name & pid + return 0, "" + else: + name = name.rstrip(":") + return int(pid), name + + def netStatParse(raw): + """ + The netstat result can be either split in 6,7 or 8 elements depending on the values of state, process and name. + For UDP the state is always empty. For UDP and TCP the process can be empty. + So these cases have to be checked. + :param raw: Netstat raw output String. First line explains the format, each following line contains a connection. + :return: List of dicts, each dict contains protocol, state, local address, foreign address, port, name, pid for one + connection. + """ results = list() for line in raw.splitlines(): - listening_search = re.search('[^ ]+:[0-9]+', line) - if listening_search: - splitted = line.split() - conns = re.search('([^ ]+):([0-9]+)', splitted[3]) - pidstr = '' - if 'tcp' in splitted[0]: - protocol = 'tcp' - pidstr = splitted[6] - elif 'udp' in splitted[0]: - protocol = 'udp' - pidstr = splitted[5] - pids = re.search(r'(([0-9]+)/(.*)|-)', pidstr) - if conns and pids: - address = conns.group(1) - port = conns.group(2) - if (pids.group(2)): - pid = pids.group(2) - else: - pid = 0 - if (pids.group(3)): - name = pids.group(3) - else: - name = '' - result = { - 'pid': int(pid), - 'address': address, - 'port': int(port), - 'protocol': protocol, - 'name': name, - } - if result not in results: - results.append(result) - else: - raise EnvironmentError('Could not get process information for the listening ports.') + if line.startswith(("tcp", "udp")): + # set variables to default state, in case they are not specified + state = "" + pid_and_name = "" + process = "" + formatted_line = line.split() + protocol, recv_q, send_q, address, foreign_address, rest = \ + formatted_line[0], formatted_line[1], formatted_line[2], formatted_line[3], formatted_line[4], formatted_line[5:] + address, port = address.rsplit(":", 1) + + if protocol.startswith("tcp"): + # nestat distinguishes between tcp6 and tcp + protocol = "tcp" + if len(rest) == 3: + state, pid_and_name, process = rest + if len(rest) == 2: + state, pid_and_name = rest + + if protocol.startswith("udp"): + # safety measure, similar to tcp6 + protocol = "udp" + if len(rest) == 2: + pid_and_name, process = rest + if len(rest) == 1: + pid_and_name = rest[0] + + pid, name = split_pid_name(pid_name=pid_and_name) + result = { + 'protocol': protocol, + 'state': state, + 'address': address, + 'foreign_address': foreign_address, + 'port': int(port), + 'name': name, + 'pid': int(pid), + } + if result not in results: + results.append(result) return results def ss_parse(raw): + """ + The ss_parse result can be either split in 6 or 7 elements depending on the process column, + e.g. due to unprivileged user. + :param raw: ss raw output String. First line explains the format, each following line contains a connection. + :return: List of dicts, each dict contains protocol, state, local address, foreign address, port, name, pid for one + connection. + """ results = list() regex_conns = re.compile(pattern=r'\[?(.+?)\]?:([0-9]+)$') regex_pid = re.compile(pattern=r'"(.*?)",pid=(\d+)') @@ -221,8 +294,8 @@ def ss_parse(raw): except ValueError: # unexpected stdout from ss raise EnvironmentError( - 'Expected `ss` table layout "Netid, State, Recv-Q, Send-Q, Local Address:Port, Peer Address:Port" and optionally "Process", \ - but got something else: {0}'.format(line) + 'Expected `ss` table layout "Netid, State, Recv-Q, Send-Q, Local Address:Port, Peer Address:Port" and \ + optionally "Process", but got something else: {0}'.format(line) ) conns = regex_conns.search(local_addr_port) @@ -239,46 +312,44 @@ def ss_parse(raw): port = conns.group(2) for name, pid in pids: result = { - 'pid': int(pid), - 'address': address, - 'port': int(port), 'protocol': protocol, - 'name': name + 'state': state, + 'address': address, + 'foreign_address': peer_addr_port, + 'port': int(port), + 'name': name, + 'pid': int(pid), } results.append(result) return results def main(): + command_args = ['-p', '-l', '-u', '-n', '-t'] commands_map = { 'netstat': { - 'args': [ - '-p', - '-l', - '-u', - '-n', - '-t', - ], + 'args': [], 'parse_func': netStatParse }, 'ss': { - 'args': [ - '-p', - '-l', - '-u', - '-n', - '-t', - ], + 'args': [], 'parse_func': ss_parse }, } module = AnsibleModule( argument_spec=dict( - command=dict(type='str', choices=list(sorted(commands_map))) + command=dict(type='str', choices=list(sorted(commands_map))), + include_non_listening=dict(default=False, type='bool'), ), supports_check_mode=True, ) + if module.params['include_non_listening']: + command_args = ['-p', '-u', '-n', '-t', '-a'] + + commands_map['netstat']['args'] = command_args + commands_map['ss']['args'] = command_args + if platform.system() != 'Linux': module.fail_json(msg='This module requires Linux.') @@ -324,7 +395,7 @@ def main(): break if bin_path is None: - raise EnvironmentError(msg='Unable to find any of the supported commands in PATH: {0}'.format(", ".join(sorted(commands_map)))) + raise EnvironmentError('Unable to find any of the supported commands in PATH: {0}'.format(", ".join(sorted(commands_map)))) # which ports are listening for connections? args = commands_map[command]['args'] @@ -333,13 +404,17 @@ def main(): parse_func = commands_map[command]['parse_func'] results = parse_func(stdout) - for p in results: - p['stime'] = getPidSTime(p['pid']) - p['user'] = getPidUser(p['pid']) - if p['protocol'].startswith('tcp'): - result['ansible_facts']['tcp_listen'].append(p) - elif p['protocol'].startswith('udp'): - result['ansible_facts']['udp_listen'].append(p) + for connection in results: + # only display state and foreign_address for include_non_listening. + if not module.params['include_non_listening']: + connection.pop('state', None) + connection.pop('foreign_address', None) + connection['stime'] = getPidSTime(connection['pid']) + connection['user'] = getPidUser(connection['pid']) + if connection['protocol'].startswith('tcp'): + result['ansible_facts']['tcp_listen'].append(connection) + elif connection['protocol'].startswith('udp'): + result['ansible_facts']['udp_listen'].append(connection) except (KeyError, EnvironmentError) as e: module.fail_json(msg=to_native(e)) diff --git a/plugins/modules/lldp.py b/plugins/modules/lldp.py new file mode 100644 index 0000000000..a142d9a2ab --- /dev/null +++ b/plugins/modules/lldp.py @@ -0,0 +1,112 @@ +#!/usr/bin/python +# +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: lldp +requirements: [lldpctl] +short_description: Get details reported by LLDP +description: + - Reads data out of C(lldpctl). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + multivalues: + description: If lldpctl outputs an attribute multiple time represent all values as a list. + required: false + type: bool + default: false +author: "Andy Hill (@andyhky)" +notes: + - Requires C(lldpd) running and LLDP enabled on switches. +""" + +EXAMPLES = r""" +# Retrieve switch/port information +- name: Gather information from LLDP + community.general.lldp: + +- name: Print each switch/port + ansible.builtin.debug: + msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}" + with_items: "{{ lldp.keys() }}" + +# TASK: [Print each switch/port] *********************************************************** +# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"} +# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"} +# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"} +""" + +from ansible.module_utils.basic import AnsibleModule + + +def gather_lldp(module): + cmd = [module.get_bin_path('lldpctl'), '-f', 'keyvalue'] + rc, output, err = module.run_command(cmd) + if output: + output_dict = {} + current_dict = {} + lldp_entries = output.strip().split("\n") + + final = "" + for entry in lldp_entries: + if entry.startswith('lldp'): + path, value = entry.strip().split("=", 1) + path = path.split(".") + path_components, final = path[:-1], path[-1] + elif final in current_dict and isinstance(current_dict[final], str): + current_dict[final] += '\n' + entry + continue + elif final in current_dict and isinstance(current_dict[final], list): + current_dict[final][-1] += '\n' + entry + continue + else: + continue + + current_dict = output_dict + for path_component in path_components: + current_dict[path_component] = current_dict.get(path_component, {}) + if not isinstance(current_dict[path_component], dict): + current_dict[path_component] = {'value': current_dict[path_component]} + current_dict = current_dict[path_component] + + if final in current_dict and isinstance(current_dict[final], dict) and module.params['multivalues']: + current_dict = current_dict[final] + final = 'value' + + if final not in current_dict or not module.params['multivalues']: + current_dict[final] = value + elif isinstance(current_dict[final], str): + current_dict[final] = [current_dict[final], value] + elif isinstance(current_dict[final], list): + current_dict[final].append(value) + + return output_dict + + +def main(): + module_args = dict( + multivalues=dict(type='bool', default=False) + ) + module = AnsibleModule(module_args) + + lldp_output = gather_lldp(module) + try: + data = {'lldp': lldp_output['lldp']} + module.exit_json(ansible_facts=data) + except TypeError: + module.fail_json(msg="lldpctl command failed. is lldpd running?") + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/locale_gen.py b/plugins/modules/locale_gen.py new file mode 100644 index 0000000000..6cfbe81ccc --- /dev/null +++ b/plugins/modules/locale_gen.py @@ -0,0 +1,283 @@ +#!/usr/bin/python + +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: locale_gen +short_description: Creates or removes locales +description: + - Manages locales in Debian and Ubuntu systems. +author: + - Augustus Kling (@AugustusKling) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: list + elements: str + description: + - Name and encoding of the locales, such as V(en_GB.UTF-8). + - Before community.general 9.3.0, this was a string. Using a string still works. + required: true + state: + type: str + description: + - Whether the locales shall be present. + choices: [absent, present] + default: present +notes: + - If C(/etc/locale.gen) exists, the module assumes to be using the B(glibc) mechanism, else if C(/var/lib/locales/supported.d/) + exists it assumes to be using the B(ubuntu_legacy) mechanism, else it raises an error. + - When using glibc mechanism, it manages locales by editing C(/etc/locale.gen) and running C(locale-gen). + - When using ubuntu_legacy mechanism, it manages locales by editing C(/var/lib/locales/supported.d/local) and then running + C(locale-gen). + - Please note that the code path that uses ubuntu_legacy mechanism has not been tested for a while, because Ubuntu is already + using the glibc mechanism. There is no support for that, given our inability to test it. Therefore, that mechanism is + B(deprecated) and will be removed in community.general 13.0.0. + - Currently the module is B(only supported for Debian and Ubuntu) systems. + - This module requires the package C(locales) installed in Debian and Ubuntu systems. +""" + +EXAMPLES = r""" +- name: Ensure a locale exists + community.general.locale_gen: + name: de_CH.UTF-8 + state: present + +- name: Ensure multiple locales exist + community.general.locale_gen: + name: + - en_GB.UTF-8 + - nl_NL.UTF-8 + state: present +""" + +RETURN = r""" +mechanism: + description: Mechanism used to deploy the locales. + type: str + choices: + - glibc + - ubuntu_legacy + returned: success + sample: glibc + version_added: 10.2.0 +""" + +import os +import re + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.mh.deco import check_mode_skip + +from ansible_collections.community.general.plugins.module_utils.locale_gen import locale_runner, locale_gen_runner + + +ETC_LOCALE_GEN = "/etc/locale.gen" +VAR_LIB_LOCALES = "/var/lib/locales/supported.d" +VAR_LIB_LOCALES_LOCAL = os.path.join(VAR_LIB_LOCALES, "local") +SUPPORTED_LOCALES = "/usr/share/i18n/SUPPORTED" +LOCALE_NORMALIZATION = { + ".utf8": ".UTF-8", + ".eucjp": ".EUC-JP", + ".iso885915": ".ISO-8859-15", + ".cp1251": ".CP1251", + ".koi8r": ".KOI8-R", + ".armscii8": ".ARMSCII-8", + ".euckr": ".EUC-KR", + ".gbk": ".GBK", + ".gb18030": ".GB18030", + ".euctw": ".EUC-TW", +} + + +class LocaleGen(StateModuleHelper): + output_params = ["name"] + module = dict( + argument_spec=dict( + name=dict(type="list", elements="str", required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + ) + + def __init_module__(self): + self.MECHANISMS = dict( + ubuntu_legacy=dict( + available=SUPPORTED_LOCALES, + apply_change=self.apply_change_ubuntu_legacy, + ), + glibc=dict( + available=SUPPORTED_LOCALES, + apply_change=self.apply_change_glibc, + ), + ) + + if os.path.exists(ETC_LOCALE_GEN): + self.vars.ubuntu_mode = False + self.vars.mechanism = "glibc" + elif os.path.exists(VAR_LIB_LOCALES): + self.vars.ubuntu_mode = True + self.vars.mechanism = "ubuntu_legacy" + self.module.deprecate( + "On this machine mechanism=ubuntu_legacy is used. This mechanism is deprecated and will be removed from" + " in community.general 13.0.0. If you see this message on a modern Debian or Ubuntu version," + " please create an issue in the community.general repository", + version="13.0.0", collection_name="community.general" + ) + else: + self.do_raise('{0} and {1} are missing. Is the package "locales" installed?'.format( + VAR_LIB_LOCALES, ETC_LOCALE_GEN + )) + + self.runner = locale_runner(self.module) + + self.assert_available() + self.vars.set("is_present", self.is_present(), output=False) + self.vars.set("state_tracking", self._state_name(self.vars.is_present), output=False, change=True) + + def __quit_module__(self): + self.vars.state_tracking = self._state_name(self.is_present()) + + @staticmethod + def _state_name(present): + return "present" if present else "absent" + + def assert_available(self): + """Check if the given locales are available on the system. This is done by + checking either : + * if the locale is present in /etc/locales.gen + * or if the locale is present in /usr/share/i18n/SUPPORTED""" + regexp = r'^\s*#?\s*(?P\S+[\._\S]+) (?P\S+)\s*$' + locales_available = self.MECHANISMS[self.vars.mechanism]["available"] + + re_compiled = re.compile(regexp) + with open(locales_available, 'r') as fd: + lines = fd.readlines() + res = [re_compiled.match(line) for line in lines] + self.vars.set("available_lines", lines, verbosity=4) + + locales_not_found = [] + for locale in self.vars.name: + # Check if the locale is not found in any of the matches + if not any(match and match.group("locale") == locale for match in res): + locales_not_found.append(locale) + + # locale may be installed but not listed in the file, for example C.UTF-8 in some systems + locales_not_found = self.locale_get_not_present(locales_not_found) + + if locales_not_found: + self.do_raise("The following locales you have entered are not available on your system: {0}".format(', '.join(locales_not_found))) + + def is_present(self): + return not self.locale_get_not_present(self.vars.name) + + def locale_get_not_present(self, locales): + runner = locale_runner(self.module) + with runner() as ctx: + rc, out, err = ctx.run() + if self.verbosity >= 4: + self.vars.locale_run_info = ctx.run_info + + not_found = [] + for locale in locales: + if not any(self.fix_case(locale) == self.fix_case(line) for line in out.splitlines()): + not_found.append(locale) + + return not_found + + def fix_case(self, name): + """locale -a might return the encoding in either lower or upper case. + Passing through this function makes them uniform for comparisons.""" + for s, r in LOCALE_NORMALIZATION.items(): + name = name.replace(s, r) + return name + + def set_locale_glibc(self, names, enabled=True): + """ Sets the state of the locale. Defaults to enabled. """ + with open(ETC_LOCALE_GEN, 'r') as fr: + lines = fr.readlines() + + locale_regexes = [] + + for name in names: + search_string = r'^#?\s*%s (?P.+)' % re.escape(name) + if enabled: + new_string = r'%s \g' % (name) + else: + new_string = r'# %s \g' % (name) + re_search = re.compile(search_string) + locale_regexes.append([re_search, new_string]) + + for i in range(len(lines)): + for [search, replace] in locale_regexes: + lines[i] = search.sub(replace, lines[i]) + + # Write the modified content back to the file + with open(ETC_LOCALE_GEN, 'w') as fw: + fw.writelines(lines) + + def apply_change_glibc(self, targetState, names): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, either present or absent. + names -- Names list including encoding such as de_CH.UTF-8. + """ + + self.set_locale_glibc(names, enabled=(targetState == "present")) + + runner = locale_gen_runner(self.module) + with runner() as ctx: + ctx.run() + + def apply_change_ubuntu_legacy(self, targetState, names): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, either present or absent. + names -- Name list including encoding such as de_CH.UTF-8. + """ + runner = locale_gen_runner(self.module) + + if targetState == "present": + # Create locale. + # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local + with runner() as ctx: + ctx.run() + else: + # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales. + with open(VAR_LIB_LOCALES_LOCAL, "r") as fr: + content = fr.readlines() + with open(VAR_LIB_LOCALES_LOCAL, "w") as fw: + for line in content: + locale, charset = line.split(' ') + if locale not in names: + fw.write(line) + # Purge locales and regenerate. + # Please provide a patch if you know how to avoid regenerating the locales to keep! + with runner("purge") as ctx: + ctx.run() + + @check_mode_skip + def __state_fallback__(self): + if self.vars.state_tracking == self.vars.state: + return + self.MECHANISMS[self.vars.mechanism]["apply_change"](self.vars.state, self.vars.name) + + +def main(): + LocaleGen.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/logentries.py b/plugins/modules/logentries.py similarity index 71% rename from plugins/modules/monitoring/logentries.py rename to plugins/modules/logentries.py index 075752862d..535ef57a2a 100644 --- a/plugins/modules/monitoring/logentries.py +++ b/plugins/modules/logentries.py @@ -1,49 +1,55 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, Ivan Vanderbyl -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, Ivan Vanderbyl +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: logentries author: "Ivan Vanderbyl (@ivanvanderbyl)" -short_description: Module for tracking logs via logentries.com +short_description: Module for tracking logs using U(logentries.com) description: - - Sends logs to LogEntries in realtime + - Sends logs to LogEntries in realtime. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - path: - type: str - description: - - path to a log file - required: true - state: - type: str - description: - - following state of the log - choices: [ 'present', 'absent', 'followed', 'unfollowed' ] - required: false - default: present - name: - type: str - description: - - name of the log - required: false - logtype: - type: str - description: - - type of the log - required: false - aliases: [type] + path: + type: str + description: + - Path to a log file. + required: true + state: + type: str + description: + - Following state of the log. + choices: ['present', 'absent', 'followed', 'unfollowed'] + required: false + default: present + name: + type: str + description: + - Name of the log. + required: false + logtype: + type: str + description: + - Type of the log. + required: false + aliases: [type] notes: - - Requires the LogEntries agent which can be installed following the instructions at logentries.com -''' -EXAMPLES = ''' + - Requires the LogEntries agent which can be installed following the instructions at U(logentries.com). +""" + +EXAMPLES = r""" - name: Track nginx logs community.general.logentries: path: /var/log/nginx/access.log @@ -54,7 +60,7 @@ EXAMPLES = ''' community.general.logentries: path: /var/log/nginx/error.log state: absent -''' +""" from ansible.module_utils.basic import AnsibleModule @@ -131,8 +137,8 @@ def main(): argument_spec=dict( path=dict(required=True), state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]), - name=dict(required=False, default=None, type='str'), - logtype=dict(required=False, default=None, type='str', aliases=['type']) + name=dict(type='str'), + logtype=dict(type='str', aliases=['type']) ), supports_check_mode=True ) @@ -143,7 +149,7 @@ def main(): # Handle multiple log files logs = p["path"].split(",") - logs = filter(None, logs) + logs = [_f for _f in logs if _f] if p["state"] in ["present", "followed"]: follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype']) diff --git a/plugins/modules/notification/logentries_msg.py b/plugins/modules/logentries_msg.py similarity index 72% rename from plugins/modules/notification/logentries_msg.py rename to plugins/modules/logentries_msg.py index 59e0f32565..bbbaf9720d 100644 --- a/plugins/modules/notification/logentries_msg.py +++ b/plugins/modules/logentries_msg.py @@ -1,21 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: logentries_msg -short_description: Send a message to logentries. +short_description: Send a message to logentries description: - - Send a message to logentries -requirements: - - "python >= 2.6" + - Send a message to logentries. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: token: type: str @@ -30,24 +33,24 @@ options: api: type: str description: - - API endpoint + - API endpoint. default: data.logentries.com port: type: int description: - - API endpoint port + - API endpoint port. default: 80 author: "Jimmy Tang (@jcftang) " -''' +""" -RETURN = '''# ''' +RETURN = """#""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Send a message to logentries community.general.logentries_msg: - token=00000000-0000-0000-0000-000000000000 - msg="{{ ansible_hostname }}" -''' + token: 00000000-0000-0000-0000-000000000000 + msg: "{{ ansible_hostname }}" +""" import socket diff --git a/plugins/modules/monitoring/logstash_plugin.py b/plugins/modules/logstash_plugin.py similarity index 69% rename from plugins/modules/monitoring/logstash_plugin.py rename to plugins/modules/logstash_plugin.py index 13b1233c1f..e0d112d334 100644 --- a/plugins/modules/monitoring/logstash_plugin.py +++ b/plugins/modules/logstash_plugin.py @@ -1,52 +1,56 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Loic Blot -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Loic Blot +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: logstash_plugin short_description: Manage Logstash plugins description: - - Manages Logstash plugins. + - Manages Logstash plugins. author: Loic Blot (@nerzhul) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - description: - - Install plugin with that name. - required: True - state: - type: str - description: - - Apply plugin state. - choices: ["present", "absent"] - default: present - plugin_bin: - type: path - description: - - Specify logstash-plugin to use for plugin management. - default: /usr/share/logstash/bin/logstash-plugin - proxy_host: - type: str - description: - - Proxy host to use during plugin installation. - proxy_port: - type: str - description: - - Proxy port to use during plugin installation. - version: - type: str - description: - - Specify plugin Version of the plugin to install. - If plugin exists with previous version, it will NOT be updated. -''' + name: + type: str + description: + - Install plugin with that name. + required: true + state: + type: str + description: + - Apply plugin state. + choices: ["present", "absent"] + default: present + plugin_bin: + type: path + description: + - Specify logstash-plugin to use for plugin management. + default: /usr/share/logstash/bin/logstash-plugin + proxy_host: + type: str + description: + - Proxy host to use during plugin installation. + proxy_port: + type: str + description: + - Proxy port to use during plugin installation. + version: + type: str + description: + - Specify version of the plugin to install. If the plugin exists with a previous version, it is B(not) updated. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install Logstash beats input plugin community.general.logstash_plugin: state: present @@ -69,7 +73,7 @@ EXAMPLES = ''' name: logstash-input-beats environment: LS_JAVA_OPTS: "-Xms256m -Xmx256m" -''' +""" from ansible.module_utils.basic import AnsibleModule @@ -98,17 +102,17 @@ def install_plugin(module, plugin_bin, plugin_name, version, proxy_host, proxy_p cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name] if version: - cmd_args.append("--version %s" % version) + cmd_args.extend(["--version", version]) if proxy_host and proxy_port: - cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port)) + cmd_args.extend(["-DproxyHost=%s" % proxy_host, "-DproxyPort=%s" % proxy_port]) cmd = " ".join(cmd_args) if module.check_mode: rc, out, err = 0, "check mode", "" else: - rc, out, err = module.run_command(cmd) + rc, out, err = module.run_command(cmd_args) if rc != 0: reason = parse_error(out) @@ -125,7 +129,7 @@ def remove_plugin(module, plugin_bin, plugin_name): if module.check_mode: rc, out, err = 0, "check mode", "" else: - rc, out, err = module.run_command(cmd) + rc, out, err = module.run_command(cmd_args) if rc != 0: reason = parse_error(out) diff --git a/plugins/modules/lvg.py b/plugins/modules/lvg.py new file mode 100644 index 0000000000..bc165ad5f8 --- /dev/null +++ b/plugins/modules/lvg.py @@ -0,0 +1,560 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Alexander Bulimov +# Based on lvol module by Jeroen Hoekx +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: + - Alexander Bulimov (@abulimov) +module: lvg +short_description: Configure LVM volume groups +description: + - This module creates, removes or resizes volume groups. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + vg: + description: + - The name of the volume group. + type: str + required: true + pvs: + description: + - List of comma-separated devices to use as physical devices in this volume group. + - Required when creating or resizing volume group. + - The module runs C(pvcreate) if needed. + - O(remove_extra_pvs) controls whether or not unspecified physical devices are removed from the volume group. + type: list + elements: str + pesize: + description: + - The size of the physical extent. O(pesize) must be a power of 2 of at least 1 sector (where the sector size is the + largest sector size of the PVs currently used in the VG), or at least 128KiB. + - O(pesize) can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte. + type: str + default: "4" + pv_options: + description: + - Additional options to pass to C(pvcreate) when creating the volume group. + type: str + default: '' + pvresize: + description: + - If V(true), resize the physical volume to the maximum available size. + type: bool + default: false + version_added: '0.2.0' + vg_options: + description: + - Additional options to pass to C(vgcreate) when creating the volume group. + type: str + default: '' + state: + description: + - Control if the volume group exists and its state. + - The states V(active) and V(inactive) implies V(present) state. Added in 7.1.0. + - If V(active) or V(inactive), the module manages the VG's logical volumes current state. The module also handles the + VG's autoactivation state if supported unless when creating a volume group and the autoactivation option specified + in O(vg_options). + type: str + choices: [absent, present, active, inactive] + default: present + force: + description: + - If V(true), allows to remove volume group with logical volumes. + type: bool + default: false + reset_vg_uuid: + description: + - Whether the volume group's UUID is regenerated. + - This is B(not idempotent). Specifying this parameter always results in a change. + type: bool + default: false + version_added: 7.1.0 + reset_pv_uuid: + description: + - Whether the volume group's physical volumes' UUIDs are regenerated. + - This is B(not idempotent). Specifying this parameter always results in a change. + type: bool + default: false + version_added: 7.1.0 + remove_extra_pvs: + description: + - Remove physical volumes from the volume group which are not in O(pvs). + type: bool + default: true + version_added: 10.4.0 +seealso: + - module: community.general.filesystem + - module: community.general.lvol + - module: community.general.parted +notes: + - This module does not modify PE size for already present volume group. +""" + +EXAMPLES = r""" +- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB + community.general.lvg: + vg: vg.services + pvs: /dev/sda1 + pesize: 32 + +- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB + community.general.lvg: + vg: vg.services + pvs: /dev/sdb + pesize: 128K + +# If, for example, we already have VG vg.services on top of /dev/sdb1, +# this VG will be extended by /dev/sdc5. Or if vg.services was created on +# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5, +# and then reduce by /dev/sda5. +- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5. + community.general.lvg: + vg: vg.services + pvs: + - /dev/sdb1 + - /dev/sdc5 + +- name: Remove a volume group with name vg.services + community.general.lvg: + vg: vg.services + state: absent + +- name: Create a volume group on top of /dev/sda3 and resize the volume group /dev/sda3 to the maximum possible + community.general.lvg: + vg: resizableVG + pvs: /dev/sda3 + pvresize: true + +- name: Deactivate a volume group + community.general.lvg: + state: inactive + vg: vg.services + +- name: Activate a volume group + community.general.lvg: + state: active + vg: vg.services + +- name: Add new PVs to volume group without removing existing ones + community.general.lvg: + vg: vg.services + pvs: /dev/sdb1,/dev/sdc1 + remove_extra_pvs: false + state: present + +- name: Reset a volume group UUID + community.general.lvg: + state: inactive + vg: vg.services + reset_vg_uuid: true + +- name: Reset both volume group and pv UUID + community.general.lvg: + state: inactive + vg: vg.services + pvs: + - /dev/sdb1 + - /dev/sdc5 + reset_vg_uuid: true + reset_pv_uuid: true +""" + +import itertools +import os + +from ansible.module_utils.basic import AnsibleModule + +VG_AUTOACTIVATION_OPT = '--setautoactivation' + + +def parse_vgs(data): + vgs = [] + for line in data.splitlines(): + parts = line.strip().split(';') + vgs.append({ + 'name': parts[0], + 'pv_count': int(parts[1]), + 'lv_count': int(parts[2]), + }) + return vgs + + +def find_mapper_device_name(module, dm_device): + dmsetup_cmd = module.get_bin_path('dmsetup', True) + mapper_prefix = '/dev/mapper/' + rc, dm_name, err = module.run_command([dmsetup_cmd, "info", "-C", "--noheadings", "-o", "name", dm_device]) + if rc != 0: + module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err) + mapper_device = mapper_prefix + dm_name.rstrip() + return mapper_device + + +def parse_pvs(module, data): + pvs = [] + dm_prefix = '/dev/dm-' + for line in data.splitlines(): + parts = line.strip().split(';') + if parts[0].startswith(dm_prefix): + parts[0] = find_mapper_device_name(module, parts[0]) + pvs.append({ + 'name': parts[0], + 'vg_name': parts[1], + }) + return pvs + + +def find_vg(module, vg): + if not vg: + return None + vgs_cmd = module.get_bin_path('vgs', True) + dummy, current_vgs, dummy = module.run_command([vgs_cmd, "--noheadings", "-o", "vg_name,pv_count,lv_count", "--separator", ";"], check_rc=True) + + vgs = parse_vgs(current_vgs) + + for test_vg in vgs: + if test_vg['name'] == vg: + this_vg = test_vg + break + else: + this_vg = None + + return this_vg + + +def is_autoactivation_supported(module, vg_cmd): + autoactivation_supported = False + dummy, vgchange_opts, dummy = module.run_command([vg_cmd, '--help'], check_rc=True) + + if VG_AUTOACTIVATION_OPT in vgchange_opts: + autoactivation_supported = True + + return autoactivation_supported + + +def activate_vg(module, vg, active): + changed = False + vgchange_cmd = module.get_bin_path('vgchange', True) + vgs_cmd = module.get_bin_path('vgs', True) + vgs_fields = ['lv_attr'] + + autoactivation_enabled = False + autoactivation_supported = is_autoactivation_supported(module=module, vg_cmd=vgchange_cmd) + + if autoactivation_supported: + vgs_fields.append('autoactivation') + + vgs_cmd_with_opts = [vgs_cmd, '--noheadings', '-o', ','.join(vgs_fields), '--separator', ';', vg] + dummy, current_vg_lv_states, dummy = module.run_command(vgs_cmd_with_opts, check_rc=True) + + lv_active_count = 0 + lv_inactive_count = 0 + + for line in current_vg_lv_states.splitlines(): + parts = line.strip().split(';') + if parts[0][4] == 'a': + lv_active_count += 1 + else: + lv_inactive_count += 1 + if autoactivation_supported: + autoactivation_enabled = autoactivation_enabled or parts[1] == 'enabled' + + activate_flag = None + if active and lv_inactive_count > 0: + activate_flag = 'y' + elif not active and lv_active_count > 0: + activate_flag = 'n' + + # Extra logic necessary because vgchange returns error when autoactivation is already set + if autoactivation_supported: + if active and not autoactivation_enabled: + if module.check_mode: + changed = True + else: + module.run_command([vgchange_cmd, VG_AUTOACTIVATION_OPT, 'y', vg], check_rc=True) + changed = True + elif not active and autoactivation_enabled: + if module.check_mode: + changed = True + else: + module.run_command([vgchange_cmd, VG_AUTOACTIVATION_OPT, 'n', vg], check_rc=True) + changed = True + + if activate_flag is not None: + if module.check_mode: + changed = True + else: + module.run_command([vgchange_cmd, '--activate', activate_flag, vg], check_rc=True) + changed = True + + return changed + + +def append_vgcreate_options(module, state, vgoptions): + vgcreate_cmd = module.get_bin_path('vgcreate', True) + + autoactivation_supported = is_autoactivation_supported(module=module, vg_cmd=vgcreate_cmd) + + if autoactivation_supported and state in ['active', 'inactive']: + if VG_AUTOACTIVATION_OPT not in vgoptions: + if state == 'active': + vgoptions += [VG_AUTOACTIVATION_OPT, 'y'] + else: + vgoptions += [VG_AUTOACTIVATION_OPT, 'n'] + + +def get_pv_values_for_resize(module, device): + pvdisplay_cmd = module.get_bin_path('pvdisplay', True) + pvdisplay_ops = ["--units", "b", "--columns", "--noheadings", "--nosuffix", "--separator", ";", "-o", "dev_size,pv_size,pe_start,vg_extent_size"] + pvdisplay_cmd_device_options = [pvdisplay_cmd, device] + pvdisplay_ops + + dummy, pv_values, dummy = module.run_command(pvdisplay_cmd_device_options, check_rc=True) + + values = pv_values.strip().split(';') + + dev_size = int(values[0]) + pv_size = int(values[1]) + pe_start = int(values[2]) + vg_extent_size = int(values[3]) + + return (dev_size, pv_size, pe_start, vg_extent_size) + + +def resize_pv(module, device): + changed = False + pvresize_cmd = module.get_bin_path('pvresize', True) + + dev_size, pv_size, pe_start, vg_extent_size = get_pv_values_for_resize(module=module, device=device) + if (dev_size - (pe_start + pv_size)) > vg_extent_size: + if module.check_mode: + changed = True + else: + # If there is a missing pv on the machine, versions of pvresize rc indicates failure. + rc, out, err = module.run_command([pvresize_cmd, device]) + dummy, new_pv_size, dummy, dummy = get_pv_values_for_resize(module=module, device=device) + if pv_size == new_pv_size: + module.fail_json(msg="Failed executing pvresize command.", rc=rc, err=err, out=out) + else: + changed = True + + return changed + + +def reset_uuid_pv(module, device): + changed = False + pvs_cmd = module.get_bin_path('pvs', True) + pvs_cmd_with_opts = [pvs_cmd, '--noheadings', '-o', 'uuid', device] + pvchange_cmd = module.get_bin_path('pvchange', True) + pvchange_cmd_with_opts = [pvchange_cmd, '-u', device] + + dummy, orig_uuid, dummy = module.run_command(pvs_cmd_with_opts, check_rc=True) + + if module.check_mode: + changed = True + else: + # If there is a missing pv on the machine, pvchange rc indicates failure. + pvchange_rc, pvchange_out, pvchange_err = module.run_command(pvchange_cmd_with_opts) + dummy, new_uuid, dummy = module.run_command(pvs_cmd_with_opts, check_rc=True) + if orig_uuid.strip() == new_uuid.strip(): + module.fail_json(msg="PV (%s) UUID change failed" % (device), rc=pvchange_rc, err=pvchange_err, out=pvchange_out) + else: + changed = True + + return changed + + +def reset_uuid_vg(module, vg): + changed = False + vgchange_cmd = module.get_bin_path('vgchange', True) + vgchange_cmd_with_opts = [vgchange_cmd, '-u', vg] + if module.check_mode: + changed = True + else: + module.run_command(vgchange_cmd_with_opts, check_rc=True) + changed = True + + return changed + + +def main(): + module = AnsibleModule( + argument_spec=dict( + vg=dict(type='str', required=True), + pvs=dict(type='list', elements='str'), + pesize=dict(type='str', default='4'), + pv_options=dict(type='str', default=''), + pvresize=dict(type='bool', default=False), + vg_options=dict(type='str', default=''), + state=dict(type='str', default='present', choices=['absent', 'present', 'active', 'inactive']), + force=dict(type='bool', default=False), + reset_vg_uuid=dict(type='bool', default=False), + reset_pv_uuid=dict(type='bool', default=False), + remove_extra_pvs=dict(type="bool", default=True), + ), + required_if=[ + ['reset_pv_uuid', True, ['pvs']], + ], + supports_check_mode=True, + ) + + vg = module.params['vg'] + state = module.params['state'] + force = module.boolean(module.params['force']) + pvresize = module.boolean(module.params['pvresize']) + pesize = module.params['pesize'] + pvoptions = module.params['pv_options'].split() + vgoptions = module.params['vg_options'].split() + reset_vg_uuid = module.boolean(module.params['reset_vg_uuid']) + reset_pv_uuid = module.boolean(module.params['reset_pv_uuid']) + remove_extra_pvs = module.boolean(module.params["remove_extra_pvs"]) + + this_vg = find_vg(module=module, vg=vg) + present_state = state in ['present', 'active', 'inactive'] + pvs_required = present_state and this_vg is None + changed = False + + dev_list = [] + if module.params['pvs']: + dev_list = list(module.params['pvs']) + elif pvs_required: + module.fail_json(msg="No physical volumes given.") + + # LVM always uses real paths not symlinks so replace symlinks with actual path + for idx, dev in enumerate(dev_list): + dev_list[idx] = os.path.realpath(dev) + + if present_state: + # check given devices + for test_dev in dev_list: + if not os.path.exists(test_dev): + module.fail_json(msg="Device %s not found." % test_dev) + + # get pv list + pvs_cmd = module.get_bin_path('pvs', True) + if dev_list: + pvs_filter_pv_name = ' || '.join( + 'pv_name = {0}'.format(x) + for x in itertools.chain(dev_list, module.params['pvs']) + ) + pvs_filter_vg_name = 'vg_name = {0}'.format(vg) + pvs_filter = ["--select", "{0} || {1}".format(pvs_filter_pv_name, pvs_filter_vg_name)] + else: + pvs_filter = [] + rc, current_pvs, err = module.run_command([pvs_cmd, "--noheadings", "-o", "pv_name,vg_name", "--separator", ";"] + pvs_filter) + if rc != 0: + module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err) + + # check pv for devices + pvs = parse_pvs(module, current_pvs) + used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg] + if used_pvs: + module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name'])) + + if this_vg is None: + if present_state: + append_vgcreate_options(module=module, state=state, vgoptions=vgoptions) + # create VG + if module.check_mode: + changed = True + else: + # create PV + pvcreate_cmd = module.get_bin_path('pvcreate', True) + for current_dev in dev_list: + rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) + vgcreate_cmd = module.get_bin_path('vgcreate') + rc, dummy, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err) + else: + if state == 'absent': + if module.check_mode: + module.exit_json(changed=True) + else: + if this_vg['lv_count'] == 0 or force: + # remove VG + vgremove_cmd = module.get_bin_path('vgremove', True) + rc, dummy, err = module.run_command([vgremove_cmd, "--force", vg]) + if rc == 0: + module.exit_json(changed=True) + else: + module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err) + else: + module.fail_json(msg="Refuse to remove non-empty volume group %s without force=true" % (vg)) + # activate/deactivate existing VG + elif state == 'active': + changed = activate_vg(module=module, vg=vg, active=True) + elif state == 'inactive': + changed = activate_vg(module=module, vg=vg, active=False) + + # reset VG uuid + if reset_vg_uuid: + changed = reset_uuid_vg(module=module, vg=vg) or changed + + # resize VG + if dev_list: + current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg] + devs_to_remove = list(set(current_devs) - set(dev_list)) + devs_to_add = list(set(dev_list) - set(current_devs)) + + if not remove_extra_pvs: + devs_to_remove = [] + + if current_devs: + if present_state: + for device in current_devs: + if pvresize: + changed = resize_pv(module=module, device=device) or changed + if reset_pv_uuid: + changed = reset_uuid_pv(module=module, device=device) or changed + + if devs_to_add or devs_to_remove: + if module.check_mode: + changed = True + else: + if devs_to_add: + # create PV + pvcreate_cmd = module.get_bin_path('pvcreate', True) + for current_dev in devs_to_add: + rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) + # add PV to our VG + vgextend_cmd = module.get_bin_path('vgextend', True) + rc, dummy, err = module.run_command([vgextend_cmd, vg] + devs_to_add) + if rc == 0: + changed = True + else: + module.fail_json(msg="Unable to extend %s by %s." % (vg, ' '.join(devs_to_add)), rc=rc, err=err) + + # remove some PV from our VG + if devs_to_remove: + vgreduce_cmd = module.get_bin_path('vgreduce', True) + rc, dummy, err = module.run_command([vgreduce_cmd, "--force", vg] + devs_to_remove) + if rc == 0: + changed = True + else: + module.fail_json(msg="Unable to reduce %s by %s." % (vg, ' '.join(devs_to_remove)), rc=rc, err=err) + + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lvg_rename.py b/plugins/modules/lvg_rename.py new file mode 100644 index 0000000000..5c1b497f2b --- /dev/null +++ b/plugins/modules/lvg_rename.py @@ -0,0 +1,167 @@ +#!/usr/bin/python + +# Copyright (c) Contributors to the Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: + - Laszlo Szomor (@lszomor) +module: lvg_rename +short_description: Renames LVM volume groups +description: + - This module renames volume groups using the C(vgchange) command. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +version_added: 7.1.0 +options: + vg: + description: + - The name or UUID of the source VG. + - See V(vgrename(8\)) for valid values. + type: str + required: true + vg_new: + description: + - The new name of the VG. + - See V(lvm(8\)) for valid names. + type: str + required: true +seealso: + - module: community.general.lvg +notes: + - This module does not modify VG renaming-related configurations like C(fstab) entries or boot parameters. +""" + +EXAMPLES = r""" +- name: Rename a VG by name + community.general.lvg_rename: + vg: vg_orig_name + vg_new: vg_new_name + +- name: Rename a VG by UUID + community.general.lvg_rename: + vg_uuid: SNgd0Q-rPYa-dPB8-U1g6-4WZI-qHID-N7y9Vj + vg_new: vg_new_name +""" + +from ansible.module_utils.basic import AnsibleModule + +argument_spec = dict( + vg=dict(type='str', required=True), + vg_new=dict(type='str', required=True), +) + + +class LvgRename(object): + def __init__(self, module): + ''' + Orchestrates the lvg_rename module logic. + + :param module: An AnsibleModule instance. + ''' + self.module = module + self.result = {'changed': False} + self.vg_list = [] + self._load_params() + + def run(self): + """Performs the module logic.""" + + self._load_vg_list() + + old_vg_exists = self._is_vg_exists(vg=self.vg) + new_vg_exists = self._is_vg_exists(vg=self.vg_new) + + if old_vg_exists: + if new_vg_exists: + self.module.fail_json(msg='The new VG name (%s) is already in use.' % (self.vg_new)) + else: + self._rename_vg() + else: + if new_vg_exists: + self.result['msg'] = 'The new VG (%s) already exists, nothing to do.' % (self.vg_new) + self.module.exit_json(**self.result) + else: + self.module.fail_json(msg='Both current (%s) and new (%s) VG are missing.' % (self.vg, self.vg_new)) + + self.module.exit_json(**self.result) + + def _load_params(self): + """Load the parameters from the module.""" + + self.vg = self.module.params['vg'] + self.vg_new = self.module.params['vg_new'] + + def _load_vg_list(self): + """Load the VGs from the system.""" + + vgs_cmd = self.module.get_bin_path('vgs', required=True) + vgs_cmd_with_opts = [vgs_cmd, '--noheadings', '--separator', ';', '-o', 'vg_name,vg_uuid'] + dummy, vg_raw_list, dummy = self.module.run_command(vgs_cmd_with_opts, check_rc=True) + + for vg_info in vg_raw_list.splitlines(): + vg_name, vg_uuid = vg_info.strip().split(';') + self.vg_list.append(vg_name) + self.vg_list.append(vg_uuid) + + def _is_vg_exists(self, vg): + ''' + Checks VG existence by name or UUID. It removes the '/dev/' prefix before checking. + + :param vg: A string with the name or UUID of the VG. + :returns: A boolean indicates whether the VG exists or not. + ''' + + vg_found = False + dev_prefix = '/dev/' + + if vg.startswith(dev_prefix): + vg_id = vg[len(dev_prefix):] + else: + vg_id = vg + + vg_found = vg_id in self.vg_list + + return vg_found + + def _rename_vg(self): + """Renames the volume group.""" + + vgrename_cmd = self.module.get_bin_path('vgrename', required=True) + + if self.module._diff: + self.result['diff'] = {'before': {'vg': self.vg}, 'after': {'vg': self.vg_new}} + + if self.module.check_mode: + self.result['msg'] = "Running in check mode. The module would rename VG %s to %s." % (self.vg, self.vg_new) + self.result['changed'] = True + else: + vgrename_cmd_with_opts = [vgrename_cmd, self.vg, self.vg_new] + dummy, vg_rename_out, dummy = self.module.run_command(vgrename_cmd_with_opts, check_rc=True) + + self.result['msg'] = vg_rename_out + self.result['changed'] = True + + +def setup_module_object(): + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + return module + + +def main(): + module = setup_module_object() + lvg_rename = LvgRename(module=module) + lvg_rename.run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lvm_pv.py b/plugins/modules/lvm_pv.py new file mode 100644 index 0000000000..3623109465 --- /dev/null +++ b/plugins/modules/lvm_pv.py @@ -0,0 +1,201 @@ +#!/usr/bin/python + +# Copyright (c) 2025, Klention Mali +# Based on lvol module by Jeroen Hoekx +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + + +DOCUMENTATION = r""" +module: lvm_pv +short_description: Manage LVM Physical Volumes +version_added: "11.0.0" +description: + - Creates, resizes or removes LVM Physical Volumes. +author: + - Klention Mali (@klention) +options: + device: + description: + - Path to the block device to manage. + type: path + required: true + state: + description: + - Control if the physical volume exists. + type: str + choices: [present, absent] + default: present + force: + description: + - Force the operation. + - When O(state=present) (creating a PV), this uses C(pvcreate -f) to force creation. + - When O(state=absent) (removing a PV), this uses C(pvremove -ff) to force removal even if part of a volume group. + type: bool + default: false + resize: + description: + - Resize PV to device size when O(state=present). + type: bool + default: false +notes: + - Requires LVM2 utilities installed on the target system. + - Device path must exist when creating a PV. +""" + +EXAMPLES = r""" +- name: Creating physical volume on /dev/sdb + community.general.lvm_pv: + device: /dev/sdb + +- name: Creating and resizing (if needed) physical volume + community.general.lvm_pv: + device: /dev/sdb + resize: true + +- name: Removing physical volume that is not part of any volume group + community.general.lvm_pv: + device: /dev/sdb + state: absent + +- name: Force removing physical volume that is already part of a volume group + community.general.lvm_pv: + device: /dev/sdb + force: true + state: absent +""" + +RETURN = r""" +""" + + +import os +from ansible.module_utils.basic import AnsibleModule + + +def get_pv_status(module, device): + """Check if the device is already a PV.""" + cmd = ['pvs', '--noheadings', '--readonly', device] + return module.run_command(cmd)[0] == 0 + + +def get_pv_size(module, device): + """Get current PV size in bytes.""" + cmd = ['pvs', '--noheadings', '--nosuffix', '--units', 'b', '-o', 'pv_size', device] + rc, out, err = module.run_command(cmd, check_rc=True) + return int(out.strip()) + + +def rescan_device(module, device): + """Perform storage rescan for the device.""" + base_device = os.path.basename(device) + is_partition = "/sys/class/block/{0}/partition".format(base_device) + + # Determine parent device if partition exists + parent_device = base_device + if os.path.exists(is_partition): + parent_device = ( + base_device.rpartition('p')[0] if base_device.startswith('nvme') + else base_device.rstrip('0123456789') + ) + + # Determine rescan path + rescan_path = "/sys/block/{0}/device/{1}".format( + parent_device, + "rescan_controller" if base_device.startswith('nvme') else "rescan" + ) + + if os.path.exists(rescan_path): + try: + with open(rescan_path, 'w') as f: + f.write('1') + return True + except IOError as e: + module.warn("Failed to rescan device {0}: {1}".format(device, str(e))) + else: + module.warn("Rescan path does not exist for device {0}".format(device)) + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + device=dict(type='path', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + force=dict(type='bool', default=False), + resize=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + + device = module.params['device'] + state = module.params['state'] + force = module.params['force'] + resize = module.params['resize'] + changed = False + actions = [] + + # Validate device existence for present state + if state == 'present' and not os.path.exists(device): + module.fail_json(msg="Device %s not found" % device) + + is_pv = get_pv_status(module, device) + + if state == 'present': + # Create PV if needed + if not is_pv: + if module.check_mode: + changed = True + actions.append('would be created') + else: + cmd = ['pvcreate'] + if force: + cmd.append('-f') + cmd.append(device) + rc, out, err = module.run_command(cmd, check_rc=True) + changed = True + actions.append('created') + is_pv = True + + # Handle resizing + elif resize and is_pv: + if module.check_mode: + # In check mode, assume resize would change + changed = True + actions.append('would be resized') + else: + # Perform device rescan if each time + if rescan_device(module, device): + actions.append('rescanned') + original_size = get_pv_size(module, device) + rc, out, err = module.run_command(['pvresize', device], check_rc=True) + new_size = get_pv_size(module, device) + if new_size != original_size: + changed = True + actions.append('resized') + + elif state == 'absent': + if is_pv: + if module.check_mode: + changed = True + actions.append('would be removed') + else: + cmd = ['pvremove', '-y'] + if force: + cmd.append('-ff') + changed = True + cmd.append(device) + rc, out, err = module.run_command(cmd, check_rc=True) + actions.append('removed') + + # Generate final message + if actions: + msg = "PV %s: %s" % (device, ', '.join(actions)) + else: + msg = "No changes needed for PV %s" % device + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/lvm_pv_move_data.py b/plugins/modules/lvm_pv_move_data.py new file mode 100644 index 0000000000..d14434d66a --- /dev/null +++ b/plugins/modules/lvm_pv_move_data.py @@ -0,0 +1,218 @@ +#!/usr/bin/python + +# Copyright (c) 2025, Klention Mali +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + + +DOCUMENTATION = r""" +module: lvm_pv_move_data +short_description: Move data between LVM Physical Volumes (PVs) +version_added: "11.2.0" +description: + - Moves data from one LVM Physical Volume (PV) to another. +author: + - Klention Mali (@klention) +options: + source: + description: + - Path to the source block device to move data from. + - Must be an existing PV. + type: path + required: true + destination: + description: + - Path to the destination block device to move data to. + - Must be an existing PV with enough free space. + type: path + required: true + auto_answer: + description: + - Answer yes to all prompts automatically. + type: bool + default: false + atomic: + description: + - Makes the C(pvmove) operation atomic, ensuring that all affected LVs are moved to the destination PV, + or none are if the operation is aborted. + type: bool + default: true + autobackup: + description: + - Automatically backup metadata before changes (strongly advised!). + type: bool + default: true +requirements: + - LVM2 utilities + - Both O(source) and O(destination) devices must exist, and the PVs must be in the same volume group. + - The O(destination) PV must have enough free space to accommodate the O(source) PV's allocated extents. + - Verbosity is automatically controlled by Ansible's verbosity level (using multiple C(-v) flags). +""" + +EXAMPLES = r""" +- name: Moving data from /dev/sdb to /dev/sdc + community.general.lvm_pv_move_data: + source: /dev/sdb + destination: /dev/sdc +""" + +RETURN = r""" +actions: + description: List of actions performed during module execution. + returned: success + type: list + elements: str + sample: [ + "moved data from /dev/sdb to /dev/sdc", + "no allocated extents to move", + "would move data from /dev/sdb to /dev/sdc" + ] +""" + + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def main(): + module = AnsibleModule( + argument_spec=dict( + source=dict(type='path', required=True), + destination=dict(type='path', required=True), + auto_answer=dict(type='bool', default=False), + atomic=dict(type='bool', default=True), + autobackup=dict(type='bool', default=True), + ), + supports_check_mode=True, + ) + + pvs_runner = CmdRunner( + module, + command="pvs", + arg_formats=dict( + noheadings=cmd_runner_fmt.as_fixed("--noheadings"), + readonly=cmd_runner_fmt.as_fixed("--readonly"), + vg_name=cmd_runner_fmt.as_fixed("-o", "vg_name"), + pv_pe_alloc_count=cmd_runner_fmt.as_fixed("-o", "pv_pe_alloc_count"), + pv_pe_count=cmd_runner_fmt.as_fixed("-o", "pv_pe_count"), + device=cmd_runner_fmt.as_list(), + ) + ) + + source = module.params['source'] + destination = module.params['destination'] + changed = False + actions = [] + result = {'changed': False} + + # Validate device existence + if not os.path.exists(source): + module.fail_json(msg="Source device %s not found" % source) + if not os.path.exists(destination): + module.fail_json(msg="Destination device %s not found" % destination) + if source == destination: + module.fail_json(msg="Source and destination devices must be different") + + def run_pvs_command(arguments, device): + with pvs_runner(arguments) as ctx: + rc, out, err = ctx.run(device=device) + if rc != 0: + module.fail_json( + msg="Command failed: %s" % err, + stdout=out, + stderr=err, + rc=rc, + cmd=ctx.cmd, + arguments=arguments, + device=device, + ) + return out.strip() + + def is_pv(device): + with pvs_runner("noheadings readonly device", check_rc=False) as ctx: + rc, out, err = ctx.run(device=device) + return rc == 0 + + if not is_pv(source): + module.fail_json(msg="Source device %s is not a PV" % source) + if not is_pv(destination): + module.fail_json(msg="Destination device %s is not a PV" % destination) + + vg_src = run_pvs_command("noheadings vg_name device", source) + vg_dest = run_pvs_command("noheadings vg_name device", destination) + if vg_src != vg_dest: + module.fail_json( + msg="Source and destination must be in the same VG. Source VG: '%s', Destination VG: '%s'." % (vg_src, vg_dest) + ) + + def get_allocated_pe(device): + try: + return int(run_pvs_command("noheadings pv_pe_alloc_count device", device)) + except ValueError: + module.fail_json(msg="Invalid allocated PE count for device %s" % device) + + allocated = get_allocated_pe(source) + if allocated == 0: + actions.append('no allocated extents to move') + else: + # Check destination has enough free space + def get_total_pe(device): + try: + return int(run_pvs_command("noheadings pv_pe_count device", device)) + except ValueError: + module.fail_json(msg="Invalid total PE count for device %s" % device) + + def get_free_pe(device): + return get_total_pe(device) - get_allocated_pe(device) + + free_pe_dest = get_free_pe(destination) + if free_pe_dest < allocated: + module.fail_json( + msg="Destination device %s has only %d free physical extents, but source device %s has %d allocated extents. Not enough space." % + (destination, free_pe_dest, source, allocated) + ) + + if module.check_mode: + changed = True + actions.append('would move data from %s to %s' % (source, destination)) + else: + pvmove_runner = CmdRunner( + module, + command="pvmove", + arg_formats=dict( + auto_answer=cmd_runner_fmt.as_bool("-y"), + atomic=cmd_runner_fmt.as_bool("--atomic"), + autobackup=cmd_runner_fmt.as_fixed("--autobackup", "y" if module.params['autobackup'] else "n"), + verbosity=cmd_runner_fmt.as_func(lambda v: ['-' + 'v' * v] if v > 0 else []), + source=cmd_runner_fmt.as_list(), + destination=cmd_runner_fmt.as_list(), + ) + ) + + verbosity = module._verbosity + with pvmove_runner("auto_answer atomic autobackup verbosity source destination") as ctx: + rc, out, err = ctx.run( + verbosity=verbosity, + source=source, + destination=destination + ) + result['stdout'] = out + result['stderr'] = err + + changed = True + actions.append('moved data from %s to %s' % (source, destination)) + + result['changed'] = changed + result['actions'] = actions + if actions: + result['msg'] = "PV data move: %s" % ", ".join(actions) + else: + result['msg'] = "No data to move from %s" % source + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/lvol.py b/plugins/modules/lvol.py similarity index 72% rename from plugins/modules/system/lvol.py rename to plugins/modules/lvol.py index b1b9dcb739..c2b18dd936 100644 --- a/plugins/modules/system/lvol.py +++ b/plugins/modules/lvol.py @@ -1,96 +1,102 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2013, Jeroen Hoekx , Alexander Bulimov -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, Jeroen Hoekx , Alexander Bulimov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" author: - - Jeroen Hoekx (@jhoekx) - - Alexander Bulimov (@abulimov) - - Raoul Baudach (@unkaputtbar112) - - Ziga Kern (@zigaSRC) + - Jeroen Hoekx (@jhoekx) + - Alexander Bulimov (@abulimov) + - Raoul Baudach (@unkaputtbar112) + - Ziga Kern (@zigaSRC) module: lvol short_description: Configure LVM logical volumes description: - This module creates, removes or resizes logical volumes. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: vg: type: str required: true description: - - The volume group this logical volume is part of. + - The volume group this logical volume is part of. lv: type: str description: - - The name of the logical volume. + - The name of the logical volume. size: type: str description: - - The size of the logical volume, according to lvcreate(8) --size, by - default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or - according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE]; - Float values must begin with a digit. - - When resizing, apart from specifying an absolute size you may, according to - lvextend(8)|lvreduce(8) C(--size), specify the amount to extend the logical volume with - the prefix C(+) or the amount to reduce the logical volume by with prefix C(-). - - Resizing using C(+) or C(-) was not supported prior to community.general 3.0.0. - - Please note that when using C(+) or C(-), the module is B(not idempotent). + - The size of the logical volume, according to lvcreate(8) C(--size), by default in megabytes or optionally with one + of [bBsSkKmMgGtTpPeE] units; or according to lvcreate(8) C(--extents) as a percentage of [VG|PVS|FREE|ORIGIN]; Float + values must begin with a digit. + - When resizing, apart from specifying an absolute size you may, according to lvextend(8)|lvreduce(8) C(--size), specify + the amount to extend the logical volume with the prefix V(+) or the amount to reduce the logical volume by with prefix + V(-). + - Resizing using V(+) or V(-) was not supported prior to community.general 3.0.0. + - Please note that when using V(+), V(-), or percentage of FREE, the module is B(not idempotent). state: type: str description: - - Control if the logical volume exists. If C(present) and the - volume does not already exist then the C(size) option is required. - choices: [ absent, present ] + - Control if the logical volume exists. If V(present) and the volume does not already exist then the O(size) option + is required. + choices: [absent, present] default: present active: description: - - Whether the volume is active and visible to the host. + - Whether the volume is active and visible to the host. type: bool - default: 'yes' + default: true force: description: - - Shrink or remove operations of volumes requires this switch. Ensures that - that filesystems get never corrupted/destroyed by mistake. + - Shrink or remove operations of volumes requires this switch. Ensures that filesystems never get corrupted/destroyed + by mistake. type: bool - default: 'no' + default: false opts: type: str description: - - Free-form options to be passed to the lvcreate command. + - Free-form options to be passed to the lvcreate command. snapshot: type: str description: - - The name of the snapshot volume + - The name of a snapshot volume to be configured. When creating a snapshot volume, the O(lv) parameter specifies the + origin volume. pvs: - type: str + type: list + elements: str description: - - Comma separated list of physical volumes (e.g. /dev/sda,/dev/sdb). + - List of physical volumes (for example V(/dev/sda, /dev/sdb)). thinpool: type: str description: - - The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name. + - The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name. shrink: description: - - Shrink if current size is higher than size requested. + - Shrink if current size is higher than size requested. type: bool - default: 'yes' + default: true resizefs: description: - - Resize the underlying filesystem together with the logical volume. - - Supported for C(ext2), C(ext3), C(ext4), C(reiserfs) and C(XFS) filesystems. - Attempts to resize other filesystem types will fail. + - Resize the underlying filesystem together with the logical volume. + - Supported for C(ext2), C(ext3), C(ext4), C(reiserfs) and C(XFS) filesystems. Attempts to resize other filesystem types + result in failure. type: bool - default: 'no' + default: false notes: - You must specify lv (when managing the state of logical volumes) or thinpool (when managing a thin provisioned volume). -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a logical volume of 512m community.general.lvol: vg: firefly @@ -102,7 +108,9 @@ EXAMPLES = ''' vg: firefly lv: test size: 512 - pvs: /dev/sda,/dev/sdb + pvs: + - /dev/sda + - /dev/sdb - name: Create cache pool logical volume community.general.lvol: @@ -160,35 +168,35 @@ EXAMPLES = ''' vg: firefly lv: test size: 80%VG - force: yes + force: true - name: Reduce the logical volume to 512m community.general.lvol: vg: firefly lv: test size: 512 - force: yes + force: true - name: Reduce the logical volume by given space community.general.lvol: vg: firefly lv: test size: -512M - force: yes + force: true - name: Set the logical volume to 512m and do not try to shrink if size is lower than current one community.general.lvol: vg: firefly lv: test size: 512 - shrink: no + shrink: false - name: Remove the logical volume. community.general.lvol: vg: firefly lv: test state: absent - force: yes + force: true - name: Create a snapshot volume of the test logical volume. community.general.lvol: @@ -222,9 +230,10 @@ EXAMPLES = ''' lv: test thinpool: testpool size: 128g -''' +""" import re +import shlex from ansible.module_utils.basic import AnsibleModule @@ -270,7 +279,7 @@ def parse_vgs(data): def get_lvm_version(module): ver_cmd = module.get_bin_path("lvm", required=True) - rc, out, err = module.run_command("%s version" % (ver_cmd)) + rc, out, err = module.run_command([ver_cmd, "version"]) if rc != 0: return None m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out) @@ -291,7 +300,7 @@ def main(): shrink=dict(type='bool', default=True), active=dict(type='bool', default=True), snapshot=dict(type='str'), - pvs=dict(type='str'), + pvs=dict(type='list', elements='str'), resizefs=dict(type='bool', default=False), thinpool=dict(type='str'), ), @@ -309,14 +318,14 @@ def main(): module.fail_json(msg="Failed to get LVM version number") version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option if version_found >= version_yesopt: - yesopt = "--yes" + yesopt = ["--yes"] else: - yesopt = "" + yesopt = [] vg = module.params['vg'] lv = module.params['lv'] size = module.params['size'] - opts = module.params['opts'] + opts = shlex.split(module.params['opts'] or '') state = module.params['state'] force = module.boolean(module.params['force']) shrink = module.boolean(module.params['shrink']) @@ -327,21 +336,13 @@ def main(): size_unit = 'm' size_operator = None snapshot = module.params['snapshot'] - pvs = module.params['pvs'] - - if pvs is None: - pvs = "" - else: - pvs = pvs.replace(",", " ") - - if opts is None: - opts = "" + pvs = module.params['pvs'] or [] # Add --test option when running in check-mode if module.check_mode: - test_opt = ' --test' + test_opt = ['--test'] else: - test_opt = '' + test_opt = [] if size: # LVEXTEND(8)/LVREDUCE(8) -l, -L options: Check for relative value for resizing @@ -360,10 +361,10 @@ def main(): if size_percent > 100: module.fail_json(msg="Size percentage cannot be larger than 100%") size_whole = size_parts[1] - if size_whole == 'ORIGIN': - module.fail_json(msg="Snapshot Volumes are not supported") - elif size_whole not in ['VG', 'PVS', 'FREE']: - module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE") + if size_whole == 'ORIGIN' and snapshot is None: + module.fail_json(msg="Percentage of ORIGIN supported only for snapshot volumes") + elif size_whole not in ['VG', 'PVS', 'FREE', 'ORIGIN']: + module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE|ORIGIN") size_opt = 'l' size_unit = '' @@ -389,7 +390,7 @@ def main(): # Get information on volume group requested vgs_cmd = module.get_bin_path("vgs", required=True) rc, current_vgs, err = module.run_command( - "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit.lower(), vg)) + [vgs_cmd, "--noheadings", "--nosuffix", "-o", "vg_name,size,free,vg_extent_size", "--units", unit.lower(), "--separator", ";", vg]) if rc != 0: if state == 'absent': @@ -403,7 +404,7 @@ def main(): # Get information on logical volume requested lvs_cmd = module.get_bin_path("lvs", required=True) rc, current_lvs, err = module.run_command( - "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit.lower(), vg)) + [lvs_cmd, "-a", "--noheadings", "--nosuffix", "-o", "lv_name,size,lv_attr", "--units", unit.lower(), "--separator", ";", vg]) if rc != 0: if state == 'absent': @@ -463,20 +464,23 @@ def main(): # create LV lvcreate_cmd = module.get_bin_path("lvcreate", required=True) + cmd = [lvcreate_cmd] + test_opt + yesopt if snapshot is not None: if size: - cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv) - else: - cmd = "%s %s %s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, snapshot, opts, vg, lv) - elif thinpool and lv: - if size_opt == 'l': - module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.") - size_opt = 'V' - cmd = "%s %s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool) - elif thinpool and not lv: - cmd = "%s %s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, opts, vg, thinpool) + cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)] + cmd += ["-s", "-n", snapshot] + opts + ["%s/%s" % (vg, lv)] + elif thinpool: + if lv: + if size_opt == 'l': + module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.") + size_opt = 'V' + cmd += ["-n", lv] + cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)] + cmd += opts + ["-T", "%s/%s" % (vg, thinpool)] else: - cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs) + cmd += ["-n", lv] + cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)] + cmd += opts + [vg] + pvs rc, dummy, err = module.run_command(cmd) if rc == 0: changed = True @@ -486,9 +490,9 @@ def main(): if state == 'absent': # remove LV if not force: - module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) + module.fail_json(msg="Sorry, no removal of logical volume %s without force=true." % (this_lv['name'])) lvremove_cmd = module.get_bin_path("lvremove", required=True) - rc, dummy, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name'])) + rc, dummy, err = module.run_command([lvremove_cmd] + test_opt + ["--force", "%s/%s" % (vg, this_lv['name'])]) if rc == 0: module.exit_json(changed=True) else: @@ -516,7 +520,7 @@ def main(): if this_lv['size'] < size_requested: if (size_free > 0) and (size_free >= (size_requested - this_lv['size'])): - tool = module.get_bin_path("lvextend", required=True) + tool = [module.get_bin_path("lvextend", required=True)] else: module.fail_json( msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % @@ -526,27 +530,28 @@ def main(): if size_requested < 1: module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) elif not force: - module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name'])) + module.fail_json(msg="Sorry, no shrinking of %s without force=true" % (this_lv['name'])) else: - tool = module.get_bin_path("lvreduce", required=True) - tool = '%s %s' % (tool, '--force') + tool = [module.get_bin_path("lvreduce", required=True), '--force'] if tool: if resizefs: - tool = '%s %s' % (tool, '--resizefs') + tool += ['--resizefs'] + cmd = tool + test_opt if size_operator: - cmd = "%s %s -%s %s%s%s %s/%s %s" % (tool, test_opt, size_opt, size_operator, size, size_unit, vg, this_lv['name'], pvs) + cmd += ["-%s" % size_opt, "%s%s%s" % (size_operator, size, size_unit)] else: - cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) + cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)] + cmd += ["%s/%s" % (vg, this_lv['name'])] + pvs rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) elif rc == 0: changed = True msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit) - elif "matches existing size" in err: + elif "matches existing size" in err or "matches existing size" in out: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) - elif "not larger than existing size" in err: + elif "not larger than existing size" in err or "not larger than existing size" in out: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) else: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) @@ -555,31 +560,32 @@ def main(): # resize LV based on absolute values tool = None if float(size) > this_lv['size'] or size_operator == '+': - tool = module.get_bin_path("lvextend", required=True) + tool = [module.get_bin_path("lvextend", required=True)] elif shrink and float(size) < this_lv['size'] or size_operator == '-': if float(size) == 0: module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) if not force: - module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) + module.fail_json(msg="Sorry, no shrinking of %s without force=true." % (this_lv['name'])) else: - tool = module.get_bin_path("lvreduce", required=True) - tool = '%s %s' % (tool, '--force') + tool = [module.get_bin_path("lvreduce", required=True), '--force'] if tool: if resizefs: - tool = '%s %s' % (tool, '--resizefs') + tool += ['--resizefs'] + cmd = tool + test_opt if size_operator: - cmd = "%s %s -%s %s%s%s %s/%s %s" % (tool, test_opt, size_opt, size_operator, size, size_unit, vg, this_lv['name'], pvs) + cmd += ["-%s" % size_opt, "%s%s%s" % (size_operator, size, size_unit)] else: - cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) + cmd += ["-%s" % size_opt, "%s%s" % (size, size_unit)] + cmd += ["%s/%s" % (vg, this_lv['name'])] + pvs rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) elif rc == 0: changed = True - elif "matches existing size" in err: + elif "matches existing size" in err or "matches existing size" in out: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) - elif "not larger than existing size" in err: + elif "not larger than existing size" in err or "not larger than existing size" in out: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) else: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) @@ -587,14 +593,14 @@ def main(): if this_lv is not None: if active: lvchange_cmd = module.get_bin_path("lvchange", required=True) - rc, dummy, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name'])) + rc, dummy, err = module.run_command([lvchange_cmd, "-ay", "%s/%s" % (vg, this_lv['name'])]) if rc == 0: module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err) else: lvchange_cmd = module.get_bin_path("lvchange", required=True) - rc, dummy, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name'])) + rc, dummy, err = module.run_command([lvchange_cmd, "-an", "%s/%s" % (vg, this_lv['name'])]) if rc == 0: module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: diff --git a/plugins/modules/cloud/lxc/lxc_container.py b/plugins/modules/lxc_container.py similarity index 83% rename from plugins/modules/cloud/lxc/lxc_container.py rename to plugins/modules/lxc_container.py index c8c577aba6..6c4ff64f9c 100644 --- a/plugins/modules/cloud/lxc/lxc_container.py +++ b/plugins/modules/lxc_container.py @@ -1,193 +1,187 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2014, Kevin Carter -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Kevin Carter +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: lxc_container short_description: Manage LXC Containers description: - Management of LXC containers. author: "Kevin Carter (@cloudnull)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - Name of a container. - type: str - required: true - backing_store: - choices: - - dir - - lvm - - loop - - btrfs - - overlayfs - - zfs - description: - - Backend storage type for the container. - type: str - default: dir - template: - description: - - Name of the template to use within an LXC create. - type: str - default: ubuntu - template_options: - description: - - Template options when building the container. - type: str - config: - description: - - Path to the LXC configuration file. - type: path - lv_name: - description: - - Name of the logical volume, defaults to the container name. - - If not specified, it defaults to C($CONTAINER_NAME). - type: str - vg_name: - description: - - If backend store is lvm, specify the name of the volume group. - type: str - default: lxc - thinpool: - description: - - Use LVM thin pool called TP. - type: str - fs_type: - description: - - Create fstype TYPE. - type: str - default: ext4 - fs_size: - description: - - File system Size. - type: str - default: 5G - directory: - description: - - Place rootfs directory under DIR. - type: path - zfs_root: - description: - - Create zfs under given zfsroot. - type: str - container_command: - description: - - Run a command within a container. - type: str - lxc_path: - description: - - Place container under PATH. - type: path - container_log: - description: - - Enable a container log for host actions to the container. - type: bool - default: 'no' - container_log_level: - choices: - - Info - - info - - INFO - - Error - - error - - ERROR - - Debug - - debug - - DEBUG - description: - - Set the log level for a container where *container_log* was set. - type: str - required: false - default: INFO - clone_name: - description: - - Name of the new cloned server. - - This is only used when state is clone. - type: str - clone_snapshot: - description: - - Create a snapshot a container when cloning. - - This is not supported by all container storage backends. - - Enabling this may fail if the backing store does not support snapshots. - type: bool - default: 'no' - archive: - description: - - Create an archive of a container. - - This will create a tarball of the running container. - type: bool - default: 'no' - archive_path: - description: - - Path the save the archived container. - - If the path does not exist the archive method will attempt to create it. - type: path - archive_compression: - choices: - - gzip - - bzip2 - - none - description: - - Type of compression to use when creating an archive of a running - container. - type: str - default: gzip - state: - choices: - - started - - stopped - - restarted - - absent - - frozen - - clone - description: - - Define the state of a container. - - If you clone a container using I(clone_name) the newly cloned - container created in a stopped state. - - The running container will be stopped while the clone operation is - happening and upon completion of the clone the original container - state will be restored. - type: str - default: started - container_config: - description: - - A list of C(key=value) options to use when configuring a container. - type: list - elements: str + name: + description: + - Name of a container. + type: str + required: true + backing_store: + choices: + - dir + - lvm + - loop + - btrfs + - overlayfs + - zfs + description: + - Backend storage type for the container. + type: str + default: dir + template: + description: + - Name of the template to use within an LXC create. + type: str + default: ubuntu + template_options: + description: + - Template options when building the container. + type: str + config: + description: + - Path to the LXC configuration file. + type: path + lv_name: + description: + - Name of the logical volume, defaults to the container name. + - If not specified, it defaults to E(CONTAINER_NAME). + type: str + vg_name: + description: + - If backend store is lvm, specify the name of the volume group. + type: str + default: lxc + thinpool: + description: + - Use LVM thin pool called TP. + type: str + fs_type: + description: + - Create fstype TYPE. + type: str + default: ext4 + fs_size: + description: + - File system Size. + type: str + default: 5G + directory: + description: + - Place rootfs directory under DIR. + type: path + zfs_root: + description: + - Create zfs under given zfsroot. + type: str + container_command: + description: + - Run a command within a container. + type: str + lxc_path: + description: + - Place container under E(PATH). + type: path + container_log: + description: + - Enable a container log for host actions to the container. + type: bool + default: false + container_log_level: + choices: + - Info + - info + - INFO + - Error + - error + - ERROR + - Debug + - debug + - DEBUG + description: + - Set the log level for a container where O(container_log) was set. + type: str + required: false + default: INFO + clone_name: + description: + - Name of the new cloned server. + - This is only used when state is clone. + type: str + clone_snapshot: + description: + - Create a snapshot a container when cloning. + - This is not supported by all container storage backends. + - Enabling this may fail if the backing store does not support snapshots. + type: bool + default: false + archive: + description: + - Create an archive of a container. + - This creates a tarball of the running container. + type: bool + default: false + archive_path: + description: + - Path the save the archived container. + - If the path does not exist the archive method attempts to create it. + type: path + archive_compression: + choices: + - gzip + - bzip2 + - none + description: + - Type of compression to use when creating an archive of a running container. + type: str + default: gzip + state: + choices: + - started + - stopped + - restarted + - absent + - frozen + - clone + description: + - Define the state of a container. + - If you clone a container using O(clone_name) the newly cloned container created in a stopped state. + - The running container is stopped while the clone operation is happening and upon completion of the clone the original + container state is restored. + type: str + default: started + container_config: + description: + - A list of C(key=value) options to use when configuring a container. + type: list + elements: str requirements: - - 'lxc >= 1.0 # OS package' - - 'python >= 2.6 # OS Package' - - 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc' + - 'lxc >= 2.0 # OS package' + - 'python3 >= 3.5 # OS Package' + - 'python3-lxc # OS Package' notes: - - Containers must have a unique name. If you attempt to create a container - with a name that already exists in the users namespace the module will - simply return as "unchanged". - - The "container_command" can be used with any state except "absent". If - used with state "stopped" the container will be "started", the command - executed, and then the container "stopped" again. Likewise if the state - is "stopped" and the container does not exist it will be first created, - "started", the command executed, and then "stopped". If you use a "|" - in the variable you can use common script formatting within the variable - itself The "container_command" option will always execute as BASH. - When using "container_command" a log file is created in the /tmp/ directory - which contains both stdout and stderr of any command executed. - - If "archive" is **true** the system will attempt to create a compressed - tarball of the running container. The "archive" option supports LVM backed - containers and will create a snapshot of the running container when - creating the archive. - - If your distro does not have a package for "python2-lxc", which is a - requirement for this module, it can be installed from source at - "https://github.com/lxc/python2-lxc" or installed via pip using the package - name lxc-python2. -''' + - Containers must have a unique name. If you attempt to create a container with a name that already exists in the users + namespace the module simply returns as "unchanged". + - The O(container_command) can be used with any state except V(absent). If used with state V(stopped) the container is V(started), + the command executed, and then the container V(stopped) again. Likewise if O(state=stopped) and the container does not + exist it is first created, V(started), the command executed, and then V(stopped). If you use a C(|) in the variable you + can use common script formatting within the variable itself. The O(container_command) option always execute as C(bash). + When using O(container_command), a log file is created in the C(/tmp/) directory which contains both C(stdout) and C(stderr) + of any command executed. + - If O(archive=true) the system attempts to create a compressed tarball of the running container. The O(archive) option + supports LVM backed containers and creates a snapshot of the running container when creating the archive. + - If your distro does not have a package for C(python3-lxc), which is a requirement for this module, it can be installed + from source at U(https://github.com/lxc/python3-lxc) or installed using C(pip install lxc). +""" EXAMPLES = r""" - name: Create a started container @@ -260,14 +254,14 @@ EXAMPLES = r""" ansible.builtin.debug: var: lvm_container_info -- name: Run a command in a container and ensure its in a "stopped" state. +- name: Run a command in a container and ensure it is in a "stopped" state. community.general.lxc_container: name: test-container-started state: stopped container_command: | echo 'hello world.' | tee /opt/stopped -- name: Run a command in a container and ensure its it in a "frozen" state. +- name: Run a command in a container and ensure it is in a "frozen" state. community.general.lxc_container: name: test-container-stopped state: frozen @@ -374,45 +368,45 @@ EXAMPLES = r""" RETURN = r""" lxc_container: - description: container information - returned: success - type: complex - contains: - name: - description: name of the lxc container - returned: success - type: str - sample: test_host - init_pid: - description: pid of the lxc init process - returned: success - type: int - sample: 19786 - interfaces: - description: list of the container's network interfaces - returned: success - type: list - sample: [ "eth0", "lo" ] - ips: - description: list of ips - returned: success - type: list - sample: [ "10.0.3.3" ] - state: - description: resulting state of the container - returned: success - type: str - sample: "running" - archive: - description: resulting state of the container - returned: success, when archive is true - type: str - sample: "/tmp/test-container-config.tar" - clone: - description: if the container was cloned - returned: success, when clone_name is specified - type: bool - sample: True + description: Container information. + returned: success + type: complex + contains: + name: + description: Name of the LXC container. + returned: success + type: str + sample: test_host + init_pid: + description: Pid of the LXC init process. + returned: success + type: int + sample: 19786 + interfaces: + description: List of the container's network interfaces. + returned: success + type: list + sample: ["eth0", "lo"] + ips: + description: List of IPs. + returned: success + type: list + sample: ["10.0.3.3"] + state: + description: Resulting state of the container. + returned: success + type: str + sample: "running" + archive: + description: Resulting state of the container. + returned: success, when archive is true + type: str + sample: "/tmp/test-container-config.tar" + clone: + description: If the container was cloned. + returned: success, when clone_name is specified + type: bool + sample: true """ import os @@ -432,8 +426,7 @@ else: HAS_LXC = True from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE -from ansible.module_utils.six.moves import xrange +from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE from ansible.module_utils.common.text.converters import to_text, to_bytes @@ -558,7 +551,7 @@ popd def create_script(command): """Write out a script onto a target. - This method should be backward compatible with Python 2.4+ when executing + This method should be backward compatible with Python when executing from within the container. :param command: command to run, this can be a script and can use spacing @@ -607,10 +600,10 @@ class LxcContainerManagement(object): :type module: ``object`` """ self.module = module - self.state = self.module.params.get('state', None) + self.state = self.module.params['state'] self.state_change = False self.lxc_vg = None - self.lxc_path = self.module.params.get('lxc_path', None) + self.lxc_path = self.module.params['lxc_path'] self.container_name = self.module.params['name'] self.container = self.get_container_bind() self.archive_info = None @@ -643,10 +636,7 @@ class LxcContainerManagement(object): :returns: True or False if the container is found. :rtype: ``bol`` """ - if [i for i in lxc.list_containers(config_path=lxc_path) if i == container_name]: - return True - else: - return False + return any(c == container_name for c in lxc.list_containers(config_path=lxc_path)) @staticmethod def _add_variables(variables_dict, build_command): @@ -678,23 +668,23 @@ class LxcContainerManagement(object): for v in LXC_BACKING_STORE[self.module.params['backing_store']]: variables.pop(v, None) - return_dict = dict() false_values = BOOLEANS_FALSE.union([None, '']) - for k, v in variables.items(): - _var = self.module.params.get(k) - if _var not in false_values: - return_dict[v] = _var - return return_dict + result = { + v: self.module.params[k] + for k, v in variables.items() + if self.module.params[k] not in false_values + } + return result def _config(self): """Configure an LXC container. Write new configuration values to the lxc config file. This will - stop the container if it's running write the new options and then + stop the container if it is running write the new options and then restart the container upon completion. """ - _container_config = self.module.params.get('container_config') + _container_config = self.module.params['container_config'] if not _container_config: return False @@ -784,12 +774,12 @@ class LxcContainerManagement(object): ) # Load logging for the instance when creating it. - if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE: + if self.module.params['clone_snapshot']: build_command.append('--snapshot') # Check for backing_store == overlayfs if so force the use of snapshot # If overlay fs is used and snapshot is unset the clone command will # fail with an unsupported type. - elif self.module.params.get('backing_store') == 'overlayfs': + elif self.module.params['backing_store'] == 'overlayfs': build_command.append('--snapshot') rc, return_data, err = self.module.run_command(build_command) @@ -837,7 +827,7 @@ class LxcContainerManagement(object): ) # Load logging for the instance when creating it. - if self.module.params.get('container_log') in BOOLEANS_TRUE: + if self.module.params['container_log']: # Set the logging path to the /var/log/lxc if uid is root. else # set it to the home folder of the user executing. try: @@ -862,7 +852,7 @@ class LxcContainerManagement(object): ]) # Add the template commands to the end of the command if there are any - template_options = self.module.params.get('template_options', None) + template_options = self.module.params['template_options'] if template_options: build_command.append('--') build_command += shlex.split(template_options) @@ -919,7 +909,7 @@ class LxcContainerManagement(object): def _execute_command(self): """Execute a shell command.""" - container_command = self.module.params.get('container_command') + container_command = self.module.params['container_command'] if container_command: container_state = self._get_state() if container_state == 'frozen': @@ -938,18 +928,17 @@ class LxcContainerManagement(object): """ self.container = self.get_container_bind() - for dummy in xrange(timeout): - if self._get_state() != 'running': - self.container.start() - self.state_change = True - # post startup sleep for 1 second. - time.sleep(1) - else: + for dummy in range(timeout): + if self._get_state() == 'running': return True + + self.container.start() + self.state_change = True + # post startup sleep for 1 second. + time.sleep(1) self.failure( lxc_container=self._container_data(), - error='Failed to start container' - ' [ %s ]' % self.container_name, + error='Failed to start container [ %s ]' % self.container_name, rc=1, msg='The container [ %s ] failed to start. Check to lxc is' ' available and that the container is in a functional' @@ -962,7 +951,7 @@ class LxcContainerManagement(object): This will store archive_info in as self.archive_info """ - if self.module.params.get('archive') in BOOLEANS_TRUE: + if self.module.params['archive']: self.archive_info = { 'archive': self._container_create_tar() } @@ -973,7 +962,7 @@ class LxcContainerManagement(object): This will store archive_info in as self.archive_info """ - clone_name = self.module.params.get('clone_name') + clone_name = self.module.params['clone_name'] if clone_name: if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path): self.clone_info = { @@ -991,7 +980,7 @@ class LxcContainerManagement(object): :type timeout: ``int`` """ - for dummy in xrange(timeout): + for dummy in range(timeout): if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): break @@ -1274,7 +1263,7 @@ class LxcContainerManagement(object): """ vg = self._get_lxc_vg() - free_space, messurement = self._get_vg_free_pe(vg_name=vg) + free_space, measurement = self._get_vg_free_pe(vg_name=vg) if free_space < float(snapshot_size_gb): message = ( @@ -1339,11 +1328,11 @@ class LxcContainerManagement(object): old_umask = os.umask(int('0077', 8)) - archive_path = self.module.params.get('archive_path') + archive_path = self.module.params['archive_path'] if not os.path.isdir(archive_path): os.makedirs(archive_path) - archive_compression = self.module.params.get('archive_compression') + archive_compression = self.module.params['archive_compression'] compression_type = LXC_COMPRESSION_MAP[archive_compression] # remove trailing / if present. @@ -1357,9 +1346,7 @@ class LxcContainerManagement(object): build_command = [ self.module.get_bin_path('tar', True), - '--directory=%s' % os.path.realpath( - os.path.expanduser(source_dir) - ), + '--directory=%s' % os.path.realpath(source_dir), compression_type['argument'], archive_name, '.' @@ -1702,7 +1689,6 @@ def main(): ), clone_name=dict( type='str', - required=False ), clone_snapshot=dict( type='bool', @@ -1731,9 +1717,8 @@ def main(): msg='The `lxc` module is not importable. Check the requirements.' ) - lv_name = module.params.get('lv_name') - if not lv_name: - module.params['lv_name'] = module.params.get('name') + if not module.params['lv_name']: + module.params['lv_name'] = module.params['name'] lxc_manage = LxcContainerManagement(module=module) lxc_manage.run() diff --git a/plugins/modules/remote_management/lxca/lxca_cmms.py b/plugins/modules/lxca_cmms.py similarity index 75% rename from plugins/modules/remote_management/lxca/lxca_cmms.py rename to plugins/modules/lxca_cmms.py index b3bb6c2a8c..9078cd272a 100644 --- a/plugins/modules/remote_management/lxca/lxca_cmms.py +++ b/plugins/modules/lxca_cmms.py @@ -1,49 +1,49 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" author: - Naval Patel (@navalkp) - Prashant Bhosale (@prabhosa) module: lxca_cmms short_description: Custom module for lxca cmms inventory utility description: - - This module returns/displays a inventory details of cmms + - This module returns/displays a inventory details of cmms. +attributes: + check_mode: + support: none + diff_mode: + support: none options: uuid: - description: - uuid of device, this is string with length greater than 16. + description: UUID of device, this is string with length greater than 16. type: str command_options: - description: - options to filter nodes information + description: Options to filter nodes information. default: cmms choices: - - cmms - - cmms_by_uuid - - cmms_by_chassis_uuid + - cmms + - cmms_by_uuid + - cmms_by_chassis_uuid type: str chassis: - description: - uuid of chassis, this is string with length greater than 16. + description: UUID of chassis, this is string with length greater than 16. type: str extends_documentation_fragment: -- community.general.lxca_common + - community.general.lxca_common + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # get all cmms info - name: Get nodes data from LXCA community.general.lxca_cmms: @@ -68,28 +68,27 @@ EXAMPLES = ''' auth_url: "https://10.243.15.168" chassis: "3C737AA5E31640CE949B10C129A8B01F" command_options: cmms_by_chassis_uuid +""" -''' - -RETURN = r''' +RETURN = r""" result: - description: cmms detail from lxca - returned: success - type: dict - sample: - cmmList: - - machineType: '' - model: '' - type: 'CMM' - uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' + description: Cmms detail from lxca. + returned: success + type: dict + sample: + cmmList: + - machineType: '' + model: '' + type: 'CMM' + uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' # bunch of properties - - machineType: '' - model: '' - type: 'CMM' - uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' + - machineType: '' + model: '' + type: 'CMM' + uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' # bunch of properties # Multiple cmms details -''' +""" import traceback from ansible.module_utils.basic import AnsibleModule @@ -143,8 +142,8 @@ FUNC_DICT = { INPUT_ARG_SPEC = dict( command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid', 'cmms_by_chassis_uuid']), - uuid=dict(default=None), - chassis=dict(default=None) + uuid=dict(), + chassis=dict() ) diff --git a/plugins/modules/remote_management/lxca/lxca_nodes.py b/plugins/modules/lxca_nodes.py similarity index 78% rename from plugins/modules/remote_management/lxca/lxca_nodes.py rename to plugins/modules/lxca_nodes.py index 62b8e334d8..010f189629 100644 --- a/plugins/modules/remote_management/lxca/lxca_nodes.py +++ b/plugins/modules/lxca_nodes.py @@ -1,51 +1,51 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" author: - Naval Patel (@navalkp) - Prashant Bhosale (@prabhosa) module: lxca_nodes short_description: Custom module for lxca nodes inventory utility description: - - This module returns/displays a inventory details of nodes + - This module returns/displays a inventory details of nodes. +attributes: + check_mode: + support: none + diff_mode: + support: none options: uuid: - description: - uuid of device, this is string with length greater than 16. + description: UUID of device, this is string with length greater than 16. type: str command_options: - description: - options to filter nodes information + description: Options to filter nodes information. default: nodes choices: - - nodes - - nodes_by_uuid - - nodes_by_chassis_uuid - - nodes_status_managed - - nodes_status_unmanaged + - nodes + - nodes_by_uuid + - nodes_by_chassis_uuid + - nodes_status_managed + - nodes_status_unmanaged type: str chassis: - description: - uuid of chassis, this is string with length greater than 16. + description: UUID of chassis, this is string with length greater than 16. type: str extends_documentation_fragment: -- community.general.lxca_common + - community.general.lxca_common + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # get all nodes info - name: Get nodes data from LXCA community.general.lxca_nodes: @@ -87,28 +87,27 @@ EXAMPLES = ''' login_password: Password auth_url: "https://10.243.15.168" command_options: nodes_status_unmanaged +""" -''' - -RETURN = r''' +RETURN = r""" result: - description: nodes detail from lxca - returned: always - type: dict - sample: - nodeList: - - machineType: '6241' - model: 'AC1' - type: 'Rack-TowerServer' - uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' + description: Nodes detail from lxca. + returned: always + type: dict + sample: + nodeList: + - machineType: '6241' + model: 'AC1' + type: 'Rack-TowerServer' + uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF' # bunch of properties - - machineType: '8871' - model: 'AC1' - type: 'Rack-TowerServer' - uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' + - machineType: '8871' + model: 'AC1' + type: 'Rack-TowerServer' + uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF' # bunch of properties # Multiple nodes details -''' +""" import traceback from ansible.module_utils.basic import AnsibleModule @@ -174,7 +173,7 @@ INPUT_ARG_SPEC = dict( 'nodes_by_chassis_uuid', 'nodes_status_managed', 'nodes_status_unmanaged']), - uuid=dict(default=None), chassis=dict(default=None) + uuid=dict(), chassis=dict() ) diff --git a/plugins/modules/cloud/lxd/lxd_container.py b/plugins/modules/lxd_container.py similarity index 59% rename from plugins/modules/cloud/lxd/lxd_container.py rename to plugins/modules/lxd_container.py index 27f8409bc8..22e4315150 100644 --- a/plugins/modules/cloud/lxd/lxd_container.py +++ b/plugins/modules/lxd_container.py @@ -1,204 +1,197 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Hiroaki Nakamura -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Hiroaki Nakamura +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: lxd_container short_description: Manage LXD instances description: - Management of LXD containers and virtual machines. author: "Hiroaki Nakamura (@hnakamur)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + version_added: 6.4.0 + diff_mode: + support: full + version_added: 6.4.0 options: - name: - description: - - Name of an instance. - type: str - required: true - project: - description: - - 'Project of an instance. - See U(https://github.com/lxc/lxd/blob/master/doc/projects.md).' - required: false - type: str - version_added: 4.8.0 - architecture: - description: - - 'The architecture for the instance (for example C(x86_64) or C(i686)). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).' - type: str - required: false - config: - description: - - 'The config for the instance (for example C({"limits.cpu": "2"})). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).' - - If the instance already exists and its "config" values in metadata - obtained from the LXD API U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#instances-containers-and-virtual-machines) - are different, this module tries to apply the configurations. - - The keys starting with C(volatile.) are ignored for this comparison when I(ignore_volatile_options=true). - type: dict - required: false - ignore_volatile_options: - description: - - If set to C(true), options starting with C(volatile.) are ignored. As a result, - they are reapplied for each execution. - - This default behavior can be changed by setting this option to C(false). - - The current default value C(true) is deprecated since community.general 4.0.0, - and will change to C(false) in community.general 6.0.0. - type: bool - required: false - version_added: 3.7.0 - profiles: - description: - - Profile to be used by the instance. - type: list - elements: str - devices: - description: - - 'The devices for the instance - (for example C({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).' - type: dict - required: false - ephemeral: - description: - - Whether or not the instance is ephemeral (for example C(true) or C(false)). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1). - required: false - type: bool - source: - description: - - 'The source for the instance - (e.g. { "type": "image", - "mode": "pull", - "server": "https://images.linuxcontainers.org", - "protocol": "lxd", - "alias": "ubuntu/xenial/amd64" }).' - - 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.' - - 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams).' - required: false - type: dict - state: - choices: - - started - - stopped - - restarted - - absent - - frozen - description: - - Define the state of an instance. - required: false - default: started - type: str - target: - description: - - For cluster deployments. Will attempt to create an instance on a target node. - If the instance exists elsewhere in a cluster, then it will not be replaced or moved. - The name should respond to same name of the node you see in C(lxc cluster list). - type: str - required: false - version_added: 1.0.0 - timeout: - description: - - A timeout for changing the state of the instance. - - This is also used as a timeout for waiting until IPv4 addresses - are set to the all network interfaces in the instance after - starting or restarting. - required: false - default: 30 - type: int - type: - description: - - Instance type can be either C(virtual-machine) or C(container). - required: false - default: container - choices: - - container - - virtual-machine - type: str - version_added: 4.1.0 - wait_for_ipv4_addresses: - description: - - If this is true, the C(lxd_container) waits until IPv4 addresses - are set to the all network interfaces in the instance after - starting or restarting. - required: false - default: false - type: bool - wait_for_container: - description: - - If set to C(true), the tasks will wait till the task reports a - success status when performing container operations. - default: false - type: bool - version_added: 4.4.0 - force_stop: - description: - - If this is true, the C(lxd_container) forces to stop the instance - when it stops or restarts the instance. - required: false - default: false - type: bool - url: - description: - - The unix domain socket path or the https URL for the LXD server. - required: false - default: unix:/var/lib/lxd/unix.socket - type: str - snap_url: - description: - - The unix domain socket path when LXD is installed by snap package manager. - required: false - default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str - client_key: - description: - - The client certificate key file path. - - If not specified, it defaults to C(${HOME}/.config/lxc/client.key). - required: false - aliases: [ key_file ] - type: path - client_cert: - description: - - The client certificate file path. - - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt). - required: false - aliases: [ cert_file ] - type: path - trust_password: - description: - - The client trusted password. - - 'You need to set this password on the LXD server before - running this module using the following command: - C(lxc config set core.trust_password ). - See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' - - If trust_password is set, this module send a request for - authentication before sending any requests. - required: false - type: str + name: + description: + - Name of an instance. + type: str + required: true + project: + description: + - Project of an instance. + - See U(https://documentation.ubuntu.com/lxd/en/latest/projects/). + required: false + type: str + version_added: 4.8.0 + architecture: + description: + - The architecture for the instance (for example V(x86_64) or V(i686)). + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get). + type: str + required: false + config: + description: + - 'The config for the instance (for example V({"limits.cpu": "2"})).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get). + - If the instance already exists and its "config" values in metadata obtained from the LXD API + U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get) + are different, then this module tries to apply the configurations U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_put). + - The keys starting with C(volatile.) are ignored for this comparison when O(ignore_volatile_options=true). + type: dict + required: false + ignore_volatile_options: + description: + - If set to V(true), options starting with C(volatile.) are ignored. As a result, they are reapplied for each execution. + - This default behavior can be changed by setting this option to V(false). + - The default value changed from V(true) to V(false) in community.general 6.0.0. + type: bool + required: false + default: false + version_added: 3.7.0 + profiles: + description: + - Profile to be used by the instance. + type: list + elements: str + devices: + description: + - 'The devices for the instance (for example V({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get). + type: dict + required: false + ephemeral: + description: + - Whether or not the instance is ephemeral (for example V(true) or V(false)). + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get). + required: false + type: bool + source: + description: + - 'The source for the instance (for example V({ "type": "image", "mode": "pull", "server": "https://cloud-images.ubuntu.com/releases/", + "protocol": "simplestreams", "alias": "22.04" })).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/) for complete API documentation. + - 'Note that C(protocol) accepts two choices: V(lxd) or V(simplestreams).' + required: false + type: dict + state: + choices: + - started + - stopped + - restarted + - absent + - frozen + description: + - Define the state of an instance. + required: false + default: started + type: str + target: + description: + - For cluster deployments. It attempts to create an instance on a target node. If the instance exists elsewhere in a + cluster, then it is not replaced nor moved. The name should respond to same name of the node you see in C(lxc cluster + list). + type: str + required: false + version_added: 1.0.0 + timeout: + description: + - A timeout for changing the state of the instance. + - This is also used as a timeout for waiting until IPv4 addresses are set to the all network interfaces in the instance + after starting or restarting. + required: false + default: 30 + type: int + type: + description: + - Instance type can be either V(virtual-machine) or V(container). + required: false + default: container + choices: + - container + - virtual-machine + type: str + version_added: 4.1.0 + wait_for_ipv4_addresses: + description: + - If this is V(true), the C(lxd_container) waits until IPv4 addresses are set to the all network interfaces in the instance + after starting or restarting. + required: false + default: false + type: bool + wait_for_container: + description: + - If set to V(true), the tasks wait until the task reports a success status when performing container operations. + default: false + type: bool + version_added: 4.4.0 + force_stop: + description: + - If this is V(true), the C(lxd_container) forces to stop the instance when it stops or restarts the instance. + required: false + default: false + type: bool + url: + description: + - The unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + type: str + snap_url: + description: + - The unix domain socket path when LXD is installed by snap package manager. + required: false + default: unix:/var/snap/lxd/common/lxd/unix.socket + type: str + client_key: + description: + - The client certificate key file path. + - If not specified, it defaults to C(${HOME}/.config/lxc/client.key). + required: false + aliases: [key_file] + type: path + client_cert: + description: + - The client certificate file path. + - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt). + required: false + aliases: [cert_file] + type: path + trust_password: + description: + - The client trusted password. + - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config + set core.trust_password ). See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' + - If trust_password is set, this module send a request for authentication before sending any requests. + required: false + type: str notes: - Instances can be a container or a virtual machine, both of them must have unique name. If you attempt to create an instance - with a name that already existed in the users namespace the module will - simply return as "unchanged". - - There are two ways to run commands inside a container or virtual machine, using the command - module or using the ansible lxd connection plugin bundled in Ansible >= - 2.1, the later requires python to be installed in the instance which can - be done with the command module. - - You can copy a file from the host to the instance - with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the `lxd` connection plugin. - See the example below. - - You can copy a file in the created instance to the localhost - with `command=lxc file pull instance_name/dir/filename filename`. + with a name that already existed in the users namespace, the module simply returns as "unchanged". + - There are two ways to run commands inside a container or virtual machine, using the command module or using the ansible + lxd connection plugin bundled in Ansible >= 2.1, the later requires python to be installed in the instance which can be + done with the command module. + - You can copy a file from the host to the instance with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) + module and the P(community.general.lxd#connection) connection plugin. See the example below. + - You can copy a file in the created instance to the localhost with C(command=lxc file pull instance_name/dir/filename filename). See the first example below. -''' + - Linuxcontainers.org has phased out LXC/LXD support with March 2024 + (U(https://discuss.linuxcontainers.org/t/important-notice-for-lxd-users-image-server/18479)). + Currently only Ubuntu is still providing images. +""" -EXAMPLES = ''' +EXAMPLES = r""" # An example for creating a Ubuntu container and install python - hosts: localhost connection: local @@ -211,9 +204,9 @@ EXAMPLES = ''' source: type: image mode: pull - server: https://images.linuxcontainers.org - protocol: lxd # if you get a 404, try setting protocol: simplestreams - alias: ubuntu/xenial/amd64 + server: https://cloud-images.ubuntu.com/releases/ + protocol: simplestreams + alias: "22.04" profiles: ["default"] wait_for_ipv4_addresses: true timeout: 600 @@ -255,6 +248,26 @@ EXAMPLES = ''' wait_for_ipv4_addresses: true timeout: 600 +# An example of creating a ubuntu-minial container +- hosts: localhost + connection: local + tasks: + - name: Create a started container + community.general.lxd_container: + name: mycontainer + ignore_volatile_options: true + state: started + source: + type: image + mode: pull + # Provides Ubuntu minimal images + server: https://cloud-images.ubuntu.com/minimal/releases/ + protocol: simplestreams + alias: "22.04" + profiles: ["default"] + wait_for_ipv4_addresses: true + timeout: 600 + # An example for creating container in project other than default - hosts: localhost connection: local @@ -269,8 +282,8 @@ EXAMPLES = ''' protocol: simplestreams type: image mode: pull - server: https://images.linuxcontainers.org - alias: ubuntu/20.04/cloud + server: https://cloud-images.ubuntu.com/releases/ + alias: "22.04" profiles: ["default"] wait_for_ipv4_addresses: true timeout: 600 @@ -303,8 +316,8 @@ EXAMPLES = ''' community.general.lxd_container: url: https://127.0.0.1:8443 # These client_cert and client_key values are equal to the default values. - #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" - #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" + # client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" + # client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" trust_password: mypassword name: mycontainer state: restarted @@ -327,7 +340,7 @@ EXAMPLES = ''' # nodes - 'node01' and 'node02'. In 'target:', 'node01' and 'node02' are names of LXD cluster # members that LXD cluster recognizes, not ansible inventory names, see: 'lxc cluster list'. # LXD API calls can be made to any LXD member, in this example, we send API requests to -#'node01.example.com', which matches ansible inventory name. +# 'node01.example.com', which matches ansible inventory name. - hosts: node01.example.com tasks: - name: Create LXD container @@ -338,7 +351,7 @@ EXAMPLES = ''' source: type: image mode: pull - alias: ubuntu/xenial/amd64 + alias: "22.04" target: node01 - name: Create container on another node @@ -349,7 +362,7 @@ EXAMPLES = ''' source: type: image mode: pull - alias: ubuntu/xenial/amd64 + alias: "22.04" target: node02 # An example for creating a virtual machine @@ -368,17 +381,22 @@ EXAMPLES = ''' protocol: simplestreams type: image mode: pull - server: https://images.linuxcontainers.org + server: ['...'] # URL to the image server alias: debian/11 timeout: 600 -''' +""" -RETURN = ''' +RETURN = r""" addresses: description: Mapping from the network device name to a list of IPv4 addresses in the instance. returned: when state is started or restarted type: dict - sample: {"eth0": ["10.155.92.191"]} + sample: + { + "eth0": [ + "10.155.92.191" + ] + } old_state: description: The old state of the instance. returned: when state is started or restarted @@ -393,15 +411,17 @@ actions: description: List of actions performed for the instance. returned: success type: list - sample: '["create", "start"]' -''' + sample: ["create", "start"] +""" + +import copy import datetime import os import time +from urllib.parse import urlencode from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException -from ansible.module_utils.six.moves.urllib.parse import urlencode # LXD_ANSIBLE_STATES is a map of states that contain values of methods used # when a particular state is evoked. @@ -410,7 +430,7 @@ LXD_ANSIBLE_STATES = { 'stopped': '_stopped', 'restarted': '_restarted', 'absent': '_destroyed', - 'frozen': '_frozen' + 'frozen': '_frozen', } # ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible @@ -426,9 +446,13 @@ ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' # CONFIG_PARAMS is a list of config attribute names. CONFIG_PARAMS = [ - 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source' + 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source', 'type' ] +# CONFIG_CREATION_PARAMS is a list of attribute names that are only applied +# on instance creation. +CONFIG_CREATION_PARAMS = ['source', 'type'] + class LXDContainerManagement(object): def __init__(self, module): @@ -453,13 +477,6 @@ class LXDContainerManagement(object): self.type = self.module.params['type'] - # LXD Rest API provides additional endpoints for creating containers and virtual-machines. - self.api_endpoint = None - if self.type == 'container': - self.api_endpoint = '/1.0/containers' - elif self.type == 'virtual-machine': - self.api_endpoint = '/1.0/virtual-machines' - self.key_file = self.module.params.get('client_key') if self.key_file is None: self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME']) @@ -485,8 +502,23 @@ class LXDContainerManagement(object): ) except LXDClientException as e: self.module.fail_json(msg=e.msg) + + # LXD (3.19) Rest API provides instances endpoint, failback to containers and virtual-machines + # https://documentation.ubuntu.com/lxd/en/latest/rest-api/#instances-containers-and-virtual-machines + self.api_endpoint = '/1.0/instances' + check_api_endpoint = self.client.do('GET', '{0}?project='.format(self.api_endpoint), ok_error_codes=[404]) + + if check_api_endpoint['error_code'] == 404: + if self.type == 'container': + self.api_endpoint = '/1.0/containers' + elif self.type == 'virtual-machine': + self.api_endpoint = '/1.0/virtual-machines' + self.trust_password = self.module.params.get('trust_password', None) self.actions = [] + self.diff = {'before': {}, 'after': {}} + self.old_instance_json = {} + self.old_sections = {} def _build_config(self): self.config = {} @@ -520,7 +552,8 @@ class LXDContainerManagement(object): body_json = {'action': action, 'timeout': self.timeout} if force_stop: body_json['force'] = True - return self.client.do('PUT', url, body_json=body_json) + if not self.module.check_mode: + return self.client.do('PUT', url, body_json=body_json) def _create_instance(self): url = self.api_endpoint @@ -533,7 +566,10 @@ class LXDContainerManagement(object): url = '{0}?{1}'.format(url, urlencode(url_params)) config = self.config.copy() config['name'] = self.name - self.client.do('POST', url, config, wait_for_container=self.wait_for_container) + if self.type not in self.api_endpoint: + config['type'] = self.type + if not self.module.check_mode: + self.client.do('POST', url, config, wait_for_container=self.wait_for_container) self.actions.append('create') def _start_instance(self): @@ -552,7 +588,8 @@ class LXDContainerManagement(object): url = '{0}/{1}'.format(self.api_endpoint, self.name) if self.project: url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) - self.client.do('DELETE', url) + if not self.module.check_mode: + self.client.do('DELETE', url) self.actions.append('delete') def _freeze_instance(self): @@ -561,15 +598,20 @@ class LXDContainerManagement(object): def _unfreeze_instance(self): self._change_state('unfreeze') - self.actions.append('unfreez') + self.actions.append('unfreeze') def _instance_ipv4_addresses(self, ignore_devices=None): ignore_devices = ['lo'] if ignore_devices is None else ignore_devices - - resp_json = self._get_instance_state_json() - network = resp_json['metadata']['network'] or {} - network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {} - addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {} + data = (self._get_instance_state_json() or {}).get('metadata', None) or {} + network = { + k: v + for k, v in (data.get('network') or {}).items() + if k not in ignore_devices + } + addresses = { + k: [a['address'] for a in v['addresses'] if a['family'] == 'inet'] + for k, v in network.items() + } return addresses @staticmethod @@ -582,7 +624,7 @@ class LXDContainerManagement(object): while datetime.datetime.now() < due: time.sleep(1) addresses = self._instance_ipv4_addresses() - if self._has_all_ipv4_addresses(addresses): + if self._has_all_ipv4_addresses(addresses) or self.module.check_mode: self.addresses = addresses return except LXDClientException as e: @@ -655,16 +697,10 @@ class LXDContainerManagement(object): def _needs_to_change_instance_config(self, key): if key not in self.config: return False - if key == 'config' and self.ignore_volatile_options: # the old behavior is to ignore configurations by keyword "volatile" - old_configs = dict((k, v) for k, v in self.old_instance_json['metadata'][key].items() if not k.startswith('volatile.')) - for k, v in self.config['config'].items(): - if k not in old_configs: - return True - if old_configs[k] != v: - return True - return False - elif key == 'config': # next default behavior - old_configs = dict((k, v) for k, v in self.old_instance_json['metadata'][key].items()) + + if key == 'config': + # self.old_sections is already filtered for volatile keys if necessary + old_configs = dict(self.old_sections.get(key, None) or {}) for k, v in self.config['config'].items(): if k not in old_configs: return True @@ -672,55 +708,65 @@ class LXDContainerManagement(object): return True return False else: - old_configs = self.old_instance_json['metadata'][key] + old_configs = self.old_sections.get(key, {}) return self.config[key] != old_configs def _needs_to_apply_instance_configs(self): - return ( - self._needs_to_change_instance_config('architecture') or - self._needs_to_change_instance_config('config') or - self._needs_to_change_instance_config('ephemeral') or - self._needs_to_change_instance_config('devices') or - self._needs_to_change_instance_config('profiles') - ) + for param in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS): + if self._needs_to_change_instance_config(param): + return True + return False def _apply_instance_configs(self): - old_metadata = self.old_instance_json['metadata'] - body_json = { - 'architecture': old_metadata['architecture'], - 'config': old_metadata['config'], - 'devices': old_metadata['devices'], - 'profiles': old_metadata['profiles'] - } - - if self._needs_to_change_instance_config('architecture'): - body_json['architecture'] = self.config['architecture'] - if self._needs_to_change_instance_config('config'): - for k, v in self.config['config'].items(): - body_json['config'][k] = v - if self._needs_to_change_instance_config('ephemeral'): - body_json['ephemeral'] = self.config['ephemeral'] - if self._needs_to_change_instance_config('devices'): - body_json['devices'] = self.config['devices'] - if self._needs_to_change_instance_config('profiles'): - body_json['profiles'] = self.config['profiles'] + old_metadata = copy.deepcopy(self.old_instance_json).get('metadata', None) or {} + body_json = {} + for param in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS): + if param in old_metadata: + body_json[param] = old_metadata[param] + if self._needs_to_change_instance_config(param): + if param == 'config': + body_json['config'] = body_json.get('config', None) or {} + for k, v in self.config['config'].items(): + body_json['config'][k] = v + else: + body_json[param] = self.config[param] + self.diff['after']['instance'] = body_json url = '{0}/{1}'.format(self.api_endpoint, self.name) if self.project: url = '{0}?{1}'.format(url, urlencode(dict(project=self.project))) - self.client.do('PUT', url, body_json=body_json) + if not self.module.check_mode: + self.client.do('PUT', url, body_json=body_json) self.actions.append('apply_instance_configs') def run(self): """Run the main method.""" + def adjust_content(content): + return content if not isinstance(content, dict) else { + k: v for k, v in content.items() if not (self.ignore_volatile_options and k.startswith('volatile.')) + } + try: if self.trust_password is not None: self.client.authenticate(self.trust_password) self.ignore_volatile_options = self.module.params.get('ignore_volatile_options') self.old_instance_json = self._get_instance_json() + self.old_sections = { + section: adjust_content(content) + for section, content in (self.old_instance_json.get('metadata') or {}).items() + if section in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS) + } + + self.diff['before']['instance'] = self.old_sections + # preliminary, will be overwritten in _apply_instance_configs() if called + self.diff['after']['instance'] = self.config + self.old_state = self._instance_json_to_module_state(self.old_instance_json) + self.diff['before']['state'] = self.old_state + self.diff['after']['state'] = self.state + action = getattr(self, LXD_ANSIBLE_STATES[self.state]) action() @@ -729,7 +775,8 @@ class LXDContainerManagement(object): 'log_verbosity': self.module._verbosity, 'changed': state_changed, 'old_state': self.old_state, - 'actions': self.actions + 'actions': self.actions, + 'diff': self.diff, } if self.client.debug: result_json['logs'] = self.client.logs @@ -741,7 +788,8 @@ class LXDContainerManagement(object): fail_params = { 'msg': e.msg, 'changed': state_changed, - 'actions': self.actions + 'actions': self.actions, + 'diff': self.diff, } if self.client.debug: fail_params['logs'] = e.kwargs['logs'] @@ -755,7 +803,7 @@ def main(): argument_spec=dict( name=dict( type='str', - required=True + required=True, ), project=dict( type='str', @@ -768,6 +816,7 @@ def main(): ), ignore_volatile_options=dict( type='bool', + default=False, ), devices=dict( type='dict', @@ -784,7 +833,7 @@ def main(): ), state=dict( choices=list(LXD_ANSIBLE_STATES.keys()), - default='started' + default='started', ), target=dict( type='str', @@ -800,47 +849,37 @@ def main(): ), wait_for_container=dict( type='bool', - default=False + default=False, ), wait_for_ipv4_addresses=dict( type='bool', - default=False + default=False, ), force_stop=dict( type='bool', - default=False + default=False, ), url=dict( type='str', - default=ANSIBLE_LXD_DEFAULT_URL + default=ANSIBLE_LXD_DEFAULT_URL, ), snap_url=dict( type='str', - default='unix:/var/snap/lxd/common/lxd/unix.socket' + default='unix:/var/snap/lxd/common/lxd/unix.socket', ), client_key=dict( type='path', - aliases=['key_file'] + aliases=['key_file'], ), client_cert=dict( type='path', - aliases=['cert_file'] + aliases=['cert_file'], ), - trust_password=dict(type='str', no_log=True) + trust_password=dict(type='str', no_log=True), ), - supports_check_mode=False, + supports_check_mode=True, ) - if module.params['ignore_volatile_options'] is None: - module.params['ignore_volatile_options'] = True - module.deprecate( - 'If the keyword "volatile" is used in a playbook in the config' - 'section, a "changed" message will appear with every run, even without a change' - 'to the playbook.' - 'This will change in the future. Please test your scripts' - 'by "ignore_volatile_options: false". To keep the old behavior, set that option explicitly to "true"', - version='6.0.0', collection_name='community.general') - lxd_manage = LXDContainerManagement(module=module) lxd_manage.run() diff --git a/plugins/modules/cloud/lxd/lxd_profile.py b/plugins/modules/lxd_profile.py similarity index 72% rename from plugins/modules/cloud/lxd/lxd_profile.py rename to plugins/modules/lxd_profile.py index 82244f0bac..8a6fd19aa6 100644 --- a/plugins/modules/cloud/lxd/lxd_profile.py +++ b/plugins/modules/lxd_profile.py @@ -1,125 +1,120 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Hiroaki Nakamura -# Copyright: (c) 2020, Frank Dornheim -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Hiroaki Nakamura +# Copyright (c) 2020, Frank Dornheim +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: lxd_profile short_description: Manage LXD profiles description: - - Management of LXD profiles + - Management of LXD profiles. author: "Hiroaki Nakamura (@hnakamur)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - Name of a profile. - required: true - type: str - project: - description: - - 'Project of a profile. - See U(https://github.com/lxc/lxd/blob/master/doc/projects.md).' - type: str - required: false - version_added: 4.8.0 + name: description: - description: - - Description of the profile. - type: str - config: - description: - - 'The config for the container (e.g. {"limits.memory": "4GB"}). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)' - - If the profile already exists and its "config" value in metadata - obtained from - GET /1.0/profiles/ - U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19) - are different, they this module tries to apply the configurations. - - Not all config values are supported to apply the existing profile. - Maybe you need to delete and recreate a profile. - required: false - type: dict - devices: - description: - - 'The devices for the profile - (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}). - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)' - required: false - type: dict - new_name: - description: - - A new name of a profile. - - If this parameter is specified a profile will be renamed to this name. - See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11) - required: false - type: str - merge_profile: - description: - - Merge the configuration of the present profile with the new desired configuration, - instead of replacing it. - required: false - default: false - type: bool - version_added: 2.1.0 - state: - choices: - - present - - absent - description: - - Define the state of a profile. - required: false - default: present - type: str - url: - description: - - The unix domain socket path or the https URL for the LXD server. - required: false - default: unix:/var/lib/lxd/unix.socket - type: str - snap_url: - description: - - The unix domain socket path when LXD is installed by snap package manager. - required: false - default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str - client_key: - description: - - The client certificate key file path. - - If not specified, it defaults to C($HOME/.config/lxc/client.key). - required: false - aliases: [ key_file ] - type: path - client_cert: - description: - - The client certificate file path. - - If not specified, it defaults to C($HOME/.config/lxc/client.crt). - required: false - aliases: [ cert_file ] - type: path - trust_password: - description: - - The client trusted password. - - You need to set this password on the LXD server before - running this module using the following command. - lxc config set core.trust_password - See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/) - - If trust_password is set, this module send a request for - authentication before sending any requests. - required: false - type: str + - Name of a profile. + required: true + type: str + project: + description: + - Project of a profile. See U(https://documentation.ubuntu.com/lxd/en/latest/projects/). + type: str + required: false + version_added: 4.8.0 + description: + description: + - Description of the profile. + type: str + config: + description: + - 'The config for the instance (for example V({"limits.memory": "4GB"})).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get). + - If the profile already exists and its C(config) value in metadata obtained from GET /1.0/profiles/ + U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get) + are different, then this module tries to apply the configurations U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_put). + - Not all config values are supported to apply the existing profile. Maybe you need to delete and recreate a profile. + required: false + type: dict + devices: + description: + - 'The devices for the profile (for example V({"rootfs": {"path": "/dev/kvm", "type": "unix-char"})).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get). + required: false + type: dict + new_name: + description: + - A new name of a profile. + - If this parameter is specified a profile is renamed to this name. + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_post). + required: false + type: str + merge_profile: + description: + - Merge the configuration of the present profile with the new desired configuration, instead of replacing it. + required: false + default: false + type: bool + version_added: 2.1.0 + state: + choices: + - present + - absent + description: + - Define the state of a profile. + required: false + default: present + type: str + url: + description: + - The unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + type: str + snap_url: + description: + - The unix domain socket path when LXD is installed by snap package manager. + required: false + default: unix:/var/snap/lxd/common/lxd/unix.socket + type: str + client_key: + description: + - The client certificate key file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.key). + required: false + aliases: [key_file] + type: path + client_cert: + description: + - The client certificate file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.crt). + required: false + aliases: [cert_file] + type: path + trust_password: + description: + - The client trusted password. + - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config + set core.trust_password ). See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' + - If O(trust_password) is set, this module send a request for authentication before sending any requests. + required: false + type: str notes: - - Profiles must have a unique name. If you attempt to create a profile - with a name that already existed in the users namespace the module will - simply return as "unchanged". -''' + - Profiles must have a unique name. If you attempt to create a profile with a name that already existed in the users namespace + the module simply returns as "unchanged". +""" -EXAMPLES = ''' +EXAMPLES = r""" # An example for creating a profile - hosts: localhost connection: local @@ -153,22 +148,22 @@ EXAMPLES = ''' - hosts: localhost connection: local tasks: - - name: Create macvlan profile - community.general.lxd_profile: - url: https://127.0.0.1:8443 - # These client_cert and client_key values are equal to the default values. - #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" - #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" - trust_password: mypassword - name: macvlan - state: present - config: {} - description: my macvlan profile - devices: - eth0: - nictype: macvlan - parent: br0 - type: nic + - name: Create macvlan profile + community.general.lxd_profile: + url: https://127.0.0.1:8443 + # These client_cert and client_key values are equal to the default values. + # client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" + # client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" + trust_password: mypassword + name: macvlan + state: present + config: {} + description: my macvlan profile + devices: + eth0: + nictype: macvlan + parent: br0 + type: nic # An example for modify/merge a profile - hosts: localhost @@ -205,11 +200,11 @@ EXAMPLES = ''' name: macvlan new_name: macvlan2 state: present -''' +""" -RETURN = ''' +RETURN = r""" old_state: - description: The old state of the profile + description: The old state of the profile. returned: success type: str sample: "absent" @@ -222,13 +217,14 @@ actions: description: List of actions performed for the profile. returned: success type: list - sample: '["create"]' -''' + sample: ["create"] +""" import os +from urllib.parse import urlencode + from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException -from ansible.module_utils.six.moves.urllib.parse import urlencode # ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket' @@ -246,7 +242,7 @@ CONFIG_PARAMS = [ class LXDProfileManagement(object): def __init__(self, module): - """Management of LXC containers via Ansible. + """Management of LXC profiles via Ansible. :param module: Processed Ansible Module. :type module: ``object`` @@ -360,7 +356,7 @@ class LXDProfileManagement(object): ) def _merge_dicts(self, source, destination): - """Merge Dictionarys + """Merge Dictionaries Get a list of filehandle numbers from logger to be handed to DaemonContext.files_preserve @@ -411,7 +407,7 @@ class LXDProfileManagement(object): Rebuild the Profile by the configuration provided in the play. Existing configurations are discarded. - This ist the default behavior. + This is the default behavior. Args: dict(config): Dict with the old config in 'metadata' and new config in 'config' diff --git a/plugins/modules/cloud/lxd/lxd_project.py b/plugins/modules/lxd_project.py similarity index 74% rename from plugins/modules/cloud/lxd/lxd_project.py rename to plugins/modules/lxd_project.py index d1488272c8..a0bd710547 100644 --- a/plugins/modules/cloud/lxd/lxd_project.py +++ b/plugins/modules/lxd_project.py @@ -1,106 +1,105 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import annotations -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: lxd_project short_description: Manage LXD projects version_added: 4.8.0 description: - Management of LXD projects. author: "Raymond Chang (@we10710aa)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - Name of the project. - required: true - type: str + name: description: - description: - - Description of the project. - type: str - config: - description: - - 'The config for the project (for example C({"features.profiles": "true"})). - See U(https://linuxcontainers.org/lxd/docs/master/projects/).' - - If the project already exists and its "config" value in metadata - obtained from - C(GET /1.0/projects/) - U(https://linuxcontainers.org/lxd/docs/master/api/#/projects/project_get) - are different, then this module tries to apply the configurations. - type: dict - new_name: - description: - - A new name of a project. - - If this parameter is specified a project will be renamed to this name. - See U(https://linuxcontainers.org/lxd/docs/master/api/#/projects/project_post). - required: false - type: str - merge_project: - description: - - Merge the configuration of the present project with the new desired configuration, - instead of replacing it. If configuration is the same after merged, no change will be made. - required: false - default: false - type: bool - state: - choices: - - present - - absent - description: - - Define the state of a project. - required: false - default: present - type: str - url: - description: - - The Unix domain socket path or the https URL for the LXD server. - required: false - default: unix:/var/lib/lxd/unix.socket - type: str - snap_url: - description: - - The Unix domain socket path when LXD is installed by snap package manager. - required: false - default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str - client_key: - description: - - The client certificate key file path. - - If not specified, it defaults to C($HOME/.config/lxc/client.key). - required: false - aliases: [ key_file ] - type: path - client_cert: - description: - - The client certificate file path. - - If not specified, it defaults to C($HOME/.config/lxc/client.crt). - required: false - aliases: [ cert_file ] - type: path - trust_password: - description: - - The client trusted password. - - 'You need to set this password on the LXD server before - running this module using the following command: - C(lxc config set core.trust_password ) - See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' - - If I(trust_password) is set, this module send a request for - authentication before sending any requests. - required: false - type: str + - Name of the project. + required: true + type: str + description: + description: + - Description of the project. + type: str + config: + description: + - 'The config for the project (for example V({"features.profiles": "true"})).' + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get). + - If the project already exists and its "config" value in metadata obtained from C(GET /1.0/projects/) + U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get) + are different, then this module tries to apply the configurations U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_put). + type: dict + new_name: + description: + - A new name of a project. + - If this parameter is specified a project is renamed to this name. + - See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_post). + required: false + type: str + merge_project: + description: + - Merge the configuration of the present project with the new desired configuration, instead of replacing it. If configuration + is the same after merged, no change is made. + required: false + default: false + type: bool + state: + choices: + - present + - absent + description: + - Define the state of a project. + required: false + default: present + type: str + url: + description: + - The Unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + type: str + snap_url: + description: + - The Unix domain socket path when LXD is installed by snap package manager. + required: false + default: unix:/var/snap/lxd/common/lxd/unix.socket + type: str + client_key: + description: + - The client certificate key file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.key). + required: false + aliases: [key_file] + type: path + client_cert: + description: + - The client certificate file path. + - If not specified, it defaults to C($HOME/.config/lxc/client.crt). + required: false + aliases: [cert_file] + type: path + trust_password: + description: + - The client trusted password. + - 'You need to set this password on the LXD server before running this module using the following command: C(lxc config + set core.trust_password ) See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).' + - If O(trust_password) is set, this module send a request for authentication before sending any requests. + required: false + type: str notes: - - Projects must have a unique name. If you attempt to create a project - with a name that already existed in the users namespace the module will - simply return as "unchanged". -''' + - Projects must have a unique name. If you attempt to create a project with a name that already existed in the users namespace + the module simply returns as "unchanged". +""" -EXAMPLES = ''' +EXAMPLES = r""" # An example for creating a project - hosts: localhost connection: local @@ -123,9 +122,9 @@ EXAMPLES = ''' state: present config: {} description: my new project -''' +""" -RETURN = ''' +RETURN = r""" old_state: description: The old state of the project. returned: success @@ -138,7 +137,7 @@ logs: elements: dict contains: type: - description: Type of actions performed, currently only C(sent request). + description: Type of actions performed, currently only V(sent request). type: str sample: "sent request" request: @@ -158,7 +157,7 @@ logs: type: str sample: "(too long to be placed here)" timeout: - description: Timeout of HTTP request, C(null) if unset. + description: Timeout of HTTP request, V(null) if unset. type: int sample: null response: @@ -175,9 +174,11 @@ actions: type: list elements: str sample: ["create"] -''' +""" -from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException +from ansible_collections.community.general.plugins.module_utils.lxd import ( + LXDClient, LXDClientException, default_key_file, default_cert_file +) from ansible.module_utils.basic import AnsibleModule import os @@ -210,10 +211,10 @@ class LXDProjectManagement(object): self.key_file = self.module.params.get('client_key') if self.key_file is None: - self.key_file = os.path.expanduser('~/.config/lxc/client.key') + self.key_file = default_key_file() self.cert_file = self.module.params.get('client_cert') if self.cert_file is None: - self.cert_file = os.path.expanduser('~/.config/lxc/client.crt') + self.cert_file = default_cert_file() self.debug = self.module._verbosity >= 4 try: @@ -303,7 +304,7 @@ class LXDProjectManagement(object): ) def _merge_dicts(self, source, destination): - """ Return a new dict taht merge two dict, + """ Return a new dict that merge two dict, with values in source dict overwrite destination dict Args: diff --git a/plugins/modules/packaging/os/macports.py b/plugins/modules/macports.py similarity index 77% rename from plugins/modules/packaging/os/macports.py rename to plugins/modules/macports.py index 1d3f47a240..7e9e3c0b57 100644 --- a/plugins/modules/packaging/os/macports.py +++ b/plugins/modules/macports.py @@ -1,57 +1,63 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, Jimmy Tang +# Copyright (c) 2013, Jimmy Tang # Based on okpg (Patrick Pelletier ), pacman # (Afterburn) and pkgin (Shaun Zinck) modules # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: macports author: "Jimmy Tang (@jcftang)" short_description: Package manager for MacPorts description: - - Manages MacPorts packages (ports) + - Manages MacPorts packages (ports). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - A list of port names. - aliases: ['port'] - type: list - elements: str - selfupdate: - description: - - Update Macports and the ports tree, either prior to installing ports or as a separate step. - - Equivalent to running C(port selfupdate). - aliases: ['update_cache', 'update_ports'] - default: "no" - type: bool - state: - description: - - Indicates the desired state of the port. - choices: [ 'present', 'absent', 'active', 'inactive', 'installed', 'removed'] - default: present - type: str - upgrade: - description: - - Upgrade all outdated ports, either prior to installing ports or as a separate step. - - Equivalent to running C(port upgrade outdated). - default: "no" - type: bool - variant: - description: - - A port variant specification. - - 'C(variant) is only supported with state: I(installed)/I(present).' - aliases: ['variants'] - type: str -''' -EXAMPLES = ''' + name: + description: + - A list of port names. + aliases: ['port'] + type: list + elements: str + selfupdate: + description: + - Update Macports and the ports tree, either prior to installing ports or as a separate step. + - Equivalent to running C(port selfupdate). + aliases: ['update_cache', 'update_ports'] + default: false + type: bool + state: + description: + - Indicates the desired state of the port. + choices: ['present', 'absent', 'active', 'inactive', 'installed', 'removed'] + default: present + type: str + upgrade: + description: + - Upgrade all outdated ports, either prior to installing ports or as a separate step. + - Equivalent to running C(port upgrade outdated). + default: false + type: bool + variant: + description: + - A port variant specification. + - O(variant) is only supported with O(state=installed) and O(state=present). + aliases: ['variants'] + type: str +""" + +EXAMPLES = r""" - name: Install the foo port community.general.macports: name: foo @@ -66,18 +72,18 @@ EXAMPLES = ''' name: "{{ ports }}" vars: ports: - - foo - - foo-tools + - foo + - foo-tools - name: Update Macports and the ports tree, then upgrade all outdated ports community.general.macports: - selfupdate: yes - upgrade: yes + selfupdate: true + upgrade: true - name: Update Macports and the ports tree, then install the foo port community.general.macports: name: foo - selfupdate: yes + selfupdate: true - name: Remove the foo port community.general.macports: @@ -93,18 +99,17 @@ EXAMPLES = ''' community.general.macports: name: foo state: inactive -''' +""" import re from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import shlex_quote def selfupdate(module, port_path): """ Update Macports and the ports tree. """ - rc, out, err = module.run_command("%s -v selfupdate" % port_path) + rc, out, err = module.run_command([port_path, "-v", "selfupdate"]) if rc == 0: updated = any( @@ -128,7 +133,7 @@ def selfupdate(module, port_path): def upgrade(module, port_path): """ Upgrade outdated ports. """ - rc, out, err = module.run_command("%s upgrade outdated" % port_path) + rc, out, err = module.run_command([port_path, "upgrade", "outdated"]) # rc is 1 when nothing to upgrade so check stdout first. if out.strip() == "Nothing to upgrade.": @@ -175,7 +180,7 @@ def remove_ports(module, port_path, ports, stdout, stderr): if not query_port(module, port_path, port): continue - rc, out, err = module.run_command("%s uninstall %s" % (port_path, port)) + rc, out, err = module.run_command([port_path, "uninstall", port]) stdout += out stderr += err if query_port(module, port_path, port): @@ -199,7 +204,7 @@ def install_ports(module, port_path, ports, variant, stdout, stderr): if query_port(module, port_path, port): continue - rc, out, err = module.run_command("%s install %s %s" % (port_path, port, variant)) + rc, out, err = module.run_command([port_path, "install", port, variant]) stdout += out stderr += err if not query_port(module, port_path, port): @@ -214,7 +219,7 @@ def install_ports(module, port_path, ports, variant, stdout, stderr): def activate_ports(module, port_path, ports, stdout, stderr): - """ Activate a port if it's inactive. """ + """ Activate a port if it is inactive. """ activate_c = 0 @@ -225,7 +230,7 @@ def activate_ports(module, port_path, ports, stdout, stderr): if query_port(module, port_path, port, state="active"): continue - rc, out, err = module.run_command("%s activate %s" % (port_path, port)) + rc, out, err = module.run_command([port_path, "activate", port]) stdout += out stderr += err @@ -241,7 +246,7 @@ def activate_ports(module, port_path, ports, stdout, stderr): def deactivate_ports(module, port_path, ports, stdout, stderr): - """ Deactivate a port if it's active. """ + """ Deactivate a port if it is active. """ deactivated_c = 0 @@ -252,7 +257,7 @@ def deactivate_ports(module, port_path, ports, stdout, stderr): if not query_port(module, port_path, port, state="active"): continue - rc, out, err = module.run_command("%s deactivate %s" % (port_path, port)) + rc, out, err = module.run_command([port_path, "deactivate", port]) stdout += out stderr += err if query_port(module, port_path, port, state="active"): @@ -273,7 +278,7 @@ def main(): selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type='bool'), state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]), upgrade=dict(default=False, type='bool'), - variant=dict(aliases=["variants"], default=None, type='str') + variant=dict(aliases=["variants"], type='str') ) ) diff --git a/plugins/modules/notification/mail.py b/plugins/modules/mail.py similarity index 72% rename from plugins/modules/notification/mail.py rename to plugins/modules/mail.py index 82ca6d52b2..4365f56a33 100644 --- a/plugins/modules/notification/mail.py +++ b/plugins/modules/mail.py @@ -1,138 +1,146 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2012, Dag Wieers (@dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2012, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) module: mail short_description: Send an email description: -- This module is useful for sending emails from playbooks. -- One may wonder why automate sending emails? In complex environments - there are from time to time processes that cannot be automated, either - because you lack the authority to make it so, or because not everyone - agrees to a common approach. -- If you cannot automate a specific step, but the step is non-blocking, - sending out an email to the responsible party to make them perform their - part of the bargain is an elegant way to put the responsibility in - someone else's lap. -- Of course sending out a mail can be equally useful as a way to notify - one or more people in a team that a specific action has been - (successfully) taken. + - This module is useful for sending emails from playbooks. + - One may wonder why automate sending emails? In complex environments there are from time to time processes that cannot + be automated, either because you lack the authority to make it so, or because not everyone agrees to a common approach. + - If you cannot automate a specific step, but the step is non-blocking, sending out an email to the responsible party to + make them perform their part of the bargain is an elegant way to put the responsibility in someone else's lap. + - Of course sending out a mail can be equally useful as a way to notify one or more people in a team that a specific action + has been (successfully) taken. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: sender: description: - - The email-address the mail is sent from. May contain address and phrase. + - The email-address the mail is sent from. May contain address and phrase. type: str default: root - aliases: [ from ] + aliases: [from] to: description: - - The email-address(es) the mail is being sent to. - - This is a list, which may contain address and phrase portions. + - The email-address(es) the mail is being sent to. + - This is a list, which may contain address and phrase portions. type: list elements: str default: root - aliases: [ recipients ] + aliases: [recipients] cc: description: - - The email-address(es) the mail is being copied to. - - This is a list, which may contain address and phrase portions. + - The email-address(es) the mail is being copied to. + - This is a list, which may contain address and phrase portions. type: list elements: str + default: [] bcc: description: - - The email-address(es) the mail is being 'blind' copied to. - - This is a list, which may contain address and phrase portions. + - The email-address(es) the mail is being 'blind' copied to. + - This is a list, which may contain address and phrase portions. type: list elements: str + default: [] subject: description: - - The subject of the email being sent. - required: yes + - The subject of the email being sent. + required: true type: str - aliases: [ msg ] + aliases: [msg] body: description: - - The body of the email being sent. + - The body of the email being sent. type: str username: description: - - If SMTP requires username. + - If SMTP requires username. type: str password: description: - - If SMTP requires password. + - If SMTP requires password. type: str host: description: - - The mail server. + - The mail server. type: str default: localhost port: description: - - The mail server port. - - This must be a valid integer between 1 and 65534 + - The mail server port. + - This must be a valid integer between V(1) and V(65534). type: int default: 25 attach: description: - - A list of pathnames of files to attach to the message. - - Attached files will have their content-type set to C(application/octet-stream). + - A list of pathnames of files to attach to the message. + - Attached files have their content-type set to C(application/octet-stream). type: list elements: path default: [] headers: description: - - A list of headers which should be added to the message. - - Each individual header is specified as C(header=value) (see example below). + - A list of headers which should be added to the message. + - Each individual header is specified as V(header=value) (see example below). type: list elements: str default: [] charset: description: - - The character set of email being sent. + - The character set of email being sent. type: str default: utf-8 subtype: description: - - The minor mime type, can be either C(plain) or C(html). - - The major type is always C(text). + - The minor mime type, can be either V(plain) or V(html). + - The major type is always V(text). type: str - choices: [ html, plain ] + choices: [html, plain] default: plain secure: description: - - If C(always), the connection will only send email if the connection is Encrypted. - If the server doesn't accept the encrypted connection it will fail. - - If C(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send. - - If C(never), the connection will not attempt to setup a secure SSL/TLS session, before sending - - If C(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending. - If it is unable to do so it will fail. + - If V(always), the connection only sends email if the connection is Encrypted. If the server does not accept the encrypted + connection it fails. + - If V(try), the connection attempts to setup a secure SSL/TLS session, before trying to send. + - If V(never), the connection does not attempt to setup a secure SSL/TLS session, before sending. + - If V(starttls), the connection tries to upgrade to a secure SSL/TLS connection, before sending. If it is unable to + do so it fails. type: str - choices: [ always, never, starttls, try ] + choices: [always, never, starttls, try] default: try timeout: description: - - Sets the timeout in seconds for connection attempts. + - Sets the timeout in seconds for connection attempts. type: int default: 20 ehlohost: description: - - Allows for manual specification of host for EHLO. + - Allows for manual specification of host for EHLO. type: str version_added: 3.8.0 -''' + message_id_domain: + description: + - The domain name to use for the L(Message-ID header, https://en.wikipedia.org/wiki/Message-ID). + type: str + default: ansible + version_added: 8.2.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Example playbook sending mail to root community.general.mail: subject: System {{ ansible_hostname }} has been successfully provisioned. @@ -157,15 +165,15 @@ EXAMPLES = r''' body: Hello, this is an e-mail. I hope you like it ;-) from: jane@example.net (Jane Jolie) to: - - John Doe - - Suzie Something + - John Doe + - Suzie Something cc: Charlie Root attach: - - /etc/group - - /tmp/avatar2.png + - /etc/group + - /tmp/avatar2.png headers: - - Reply-To=john@example.com - - X-Special="Something or other" + - Reply-To=john@example.com + - X-Special="Something or other" charset: us-ascii delegate_to: localhost @@ -195,30 +203,30 @@ EXAMPLES = r''' body: System {{ ansible_hostname }} has been successfully provisioned. secure: starttls -- name: Sending an e-mail using StartTLS, remote server, custom EHLO +- name: Sending an e-mail using StartTLS, remote server, custom EHLO, and timeout of 10 seconds community.general.mail: host: some.smtp.host.tld port: 25 + timeout: 10 ehlohost: my-resolvable-hostname.tld to: John Smith subject: Ansible-report body: System {{ ansible_hostname }} has been successfully provisioned. secure: starttls -''' +""" import os import smtplib import ssl import traceback from email import encoders -from email.utils import parseaddr, formataddr, formatdate +from email.utils import parseaddr, formataddr, formatdate, make_msgid from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.header import Header from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import PY3 from ansible.module_utils.common.text.converters import to_native @@ -230,7 +238,7 @@ def main(): password=dict(type='str', no_log=True), host=dict(type='str', default='localhost'), port=dict(type='int', default=25), - ehlohost=dict(type='str', default=None), + ehlohost=dict(type='str'), sender=dict(type='str', default='root', aliases=['from']), to=dict(type='list', elements='str', default=['root'], aliases=['recipients']), cc=dict(type='list', elements='str', default=[]), @@ -243,6 +251,7 @@ def main(): subtype=dict(type='str', default='plain', choices=['html', 'plain']), secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']), timeout=dict(type='int', default=20), + message_id_domain=dict(type='str', default='ansible'), ), required_together=[['password', 'username']], ) @@ -264,6 +273,7 @@ def main(): subtype = module.params.get('subtype') secure = module.params.get('secure') timeout = module.params.get('timeout') + message_id_domain = module.params['message_id_domain'] code = 0 secure_state = False @@ -275,10 +285,7 @@ def main(): try: if secure != 'never': try: - if PY3: - smtp = smtplib.SMTP_SSL(host=host, port=port, local_hostname=local_hostname, timeout=timeout) - else: - smtp = smtplib.SMTP_SSL(local_hostname=local_hostname, timeout=timeout) + smtp = smtplib.SMTP_SSL(host=host, port=port, local_hostname=local_hostname, timeout=timeout) code, smtpmessage = smtp.connect(host, port) secure_state = True except ssl.SSLError as e: @@ -289,10 +296,7 @@ def main(): pass if not secure_state: - if PY3: - smtp = smtplib.SMTP(host=host, port=port, local_hostname=local_hostname, timeout=timeout) - else: - smtp = smtplib.SMTP(local_hostname=local_hostname, timeout=timeout) + smtp = smtplib.SMTP(host=host, port=port, local_hostname=local_hostname, timeout=timeout) code, smtpmessage = smtp.connect(host, port) except smtplib.SMTPException as e: @@ -338,13 +342,14 @@ def main(): msg['From'] = formataddr((sender_phrase, sender_addr)) msg['Date'] = formatdate(localtime=True) msg['Subject'] = Header(subject, charset) + msg['Message-ID'] = make_msgid(domain=message_id_domain) msg.preamble = "Multipart message" for header in headers: # NOTE: Backward compatible with old syntax using '|' as delimiter for hdr in [x.strip() for x in header.split('|')]: try: - h_key, h_val = hdr.split('=') + h_key, h_val = hdr.split('=', 1) h_val = to_native(Header(h_val, charset)) msg.add_header(h_key, h_val) except Exception: @@ -372,7 +377,7 @@ def main(): part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset) msg.attach(part) - # NOTE: Backware compatibility with old syntax using space as delimiter is not retained + # NOTE: Backward compatibility with old syntax using space as delimiter is not retained # This breaks files with spaces in it :-( for filename in attach_files: try: diff --git a/plugins/modules/system/make.py b/plugins/modules/make.py similarity index 63% rename from plugins/modules/system/make.py rename to plugins/modules/make.py index 6a66973369..141dd2df4d 100644 --- a/plugins/modules/system/make.py +++ b/plugins/modules/make.py @@ -1,31 +1,27 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2015, Linus Unnebäck -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Linus Unnebäck +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: make short_description: Run targets in a Makefile requirements: -- make + - make author: Linus Unnebäck (@LinusU) description: - Run targets in a Makefile. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - target: - description: - - The target to run. - - Typically this would be something like C(install),C(test) or C(all)." - type: str - params: - description: - - Any extra parameters to pass to make. - type: dict chdir: description: - Change to this directory before running make. @@ -35,11 +31,6 @@ options: description: - Use a custom Makefile. type: path - make: - description: - - Use a specific make binary. - type: path - version_added: '0.2.0' jobs: description: - Set the number of make jobs to run concurrently. @@ -47,9 +38,33 @@ options: - This is not supported by all make implementations. type: int version_added: 2.0.0 -''' + make: + description: + - Use a specific make binary. + type: path + version_added: '0.2.0' + params: + description: + - Any extra parameters to pass to make. + - If the value is empty, only the key is used. For example, V(FOO:) produces V(FOO), not V(FOO=). + type: dict + target: + description: + - The target to run. + - Typically this would be something like V(install), V(test), or V(all). + - O(target) and O(targets) are mutually exclusive. + type: str + targets: + description: + - The list of targets to run. + - Typically this would be something like V(install), V(test), or V(all). + - O(target) and O(targets) are mutually exclusive. + type: list + elements: str + version_added: 7.2.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Build the default target community.general.make: chdir: /home/ubuntu/cool-project @@ -58,7 +73,7 @@ EXAMPLES = r''' community.general.make: chdir: /home/ubuntu/cool-project target: install - become: yes + become: true - name: Build 'all' target with extra arguments community.general.make: @@ -73,11 +88,61 @@ EXAMPLES = r''' chdir: /home/ubuntu/cool-project target: all file: /some-project/Makefile -''' -RETURN = r'''# ''' +- name: build arm64 kernel on FreeBSD, with 16 parallel jobs + community.general.make: + chdir: /usr/src + jobs: 16 + target: buildkernel + params: + # This adds -DWITH_FDT to the command line: + -DWITH_FDT: + # The following adds TARGET=arm64 TARGET_ARCH=aarch64 to the command line: + TARGET: arm64 + TARGET_ARCH: aarch64 +""" -from ansible.module_utils.six import iteritems +RETURN = r""" +chdir: + description: + - The value of the module parameter O(chdir). + type: str + returned: success +command: + description: + - The command built and executed by the module. + type: str + returned: success + version_added: 6.5.0 +file: + description: + - The value of the module parameter O(file). + type: str + returned: success +jobs: + description: + - The value of the module parameter O(jobs). + type: int + returned: success +params: + description: + - The value of the module parameter O(params). + type: dict + returned: success +target: + description: + - The value of the module parameter O(target). + type: str + returned: success +targets: + description: + - The value of the module parameter O(targets). + type: str + returned: success + version_added: 7.2.0 +""" + +from shlex import quote as shlex_quote from ansible.module_utils.basic import AnsibleModule @@ -114,12 +179,14 @@ def main(): module = AnsibleModule( argument_spec=dict( target=dict(type='str'), + targets=dict(type='list', elements='str'), params=dict(type='dict'), chdir=dict(type='path', required=True), file=dict(type='path'), make=dict(type='path'), jobs=dict(type='int'), ), + mutually_exclusive=[('target', 'targets')], supports_check_mode=True, ) @@ -131,9 +198,8 @@ def main(): if not make_path: # Fall back to system make make_path = module.get_bin_path('make', required=True) - make_target = module.params['target'] if module.params['params'] is not None: - make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])] + make_parameters = [k + (('=' + str(v)) if v is not None else '') for k, v in module.params['params'].items()] else: make_parameters = [] @@ -147,7 +213,10 @@ def main(): base_command.extend(["-f", module.params['file']]) # add make target - base_command.append(make_target) + if module.params['target']: + base_command.append(module.params['target']) + elif module.params['targets']: + base_command.extend(module.params['targets']) # add makefile parameters base_command.extend(make_parameters) @@ -165,8 +234,7 @@ def main(): changed = False else: # The target isn't up to date, so we need to run it - rc, out, err = run_command(base_command, module, - check_rc=True) + rc, out, err = run_command(base_command, module, check_rc=True) changed = True # We don't report the return code, as if this module failed @@ -180,10 +248,12 @@ def main(): stdout=out, stderr=err, target=module.params['target'], + targets=module.params['targets'], params=module.params['params'], chdir=module.params['chdir'], file=module.params['file'], jobs=module.params['jobs'], + command=' '.join([shlex_quote(part) for part in base_command]), ) diff --git a/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py b/plugins/modules/manageiq_alert_profiles.py similarity index 88% rename from plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py rename to plugins/modules/manageiq_alert_profiles.py index d76c334259..6f04309eff 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py +++ b/plugins/modules/manageiq_alert_profiles.py @@ -1,57 +1,59 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017 Red Hat Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: manageiq_alert_profiles short_description: Configuration of alert profiles for ManageIQ extends_documentation_fragment: -- community.general.manageiq + - community.general.manageiq + - community.general.attributes author: Elad Alfassa (@elad661) description: - The manageiq_alert_profiles module supports adding, updating and deleting alert profiles in ManageIQ. +attributes: + check_mode: + support: none + diff_mode: + support: none options: state: type: str description: - - absent - alert profile should not exist, - - present - alert profile should exist, + - V(absent) - alert profile should not exist, + - V(present) - alert profile should exist. choices: ['absent', 'present'] default: 'present' name: type: str description: - The unique alert profile name in ManageIQ. - - Required when state is "absent" or "present". + required: true resource_type: type: str description: - - The resource type for the alert profile in ManageIQ. Required when state is "present". - choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', - 'ExtManagementSystem', 'MiddlewareServer'] + - The resource type for the alert profile in ManageIQ. Required when O(state=present). + choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', 'ExtManagementSystem', 'MiddlewareServer'] alerts: type: list elements: str description: - List of alert descriptions to assign to this profile. - - Required if state is "present" + - Required if O(state=present). notes: type: str description: - - Optional notes for this profile + - Optional notes for this profile. +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Add an alert profile to ManageIQ community.general.manageiq_alert_profiles: state: present @@ -64,7 +66,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: False + validate_certs: false # only do this when you trust the network! - name: Delete an alert profile from ManageIQ community.general.manageiq_alert_profiles: @@ -74,11 +76,11 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: False -''' + validate_certs: false # only do this when you trust the network! +""" -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec @@ -110,8 +112,7 @@ class ManageIQAlertProfiles(object): """ alerts = [] for alert_description in alert_descriptions: - alert = self.manageiq.find_collection_resource_or_fail("alert_definitions", - description=alert_description) + alert = self.manageiq.find_collection_resource_or_fail("alert_definitions", description=alert_description) alerts.append(alert['href']) return alerts @@ -198,7 +199,7 @@ class ManageIQAlertProfiles(object): # alert which currently exist in the profile if 'alert_definitions' in old_profile: # we use get_alert_href to have a direct href to the alert - existing_alerts = set([self.get_alert_href(alert) for alert in old_profile['alert_definitions']]) + existing_alerts = set(self.get_alert_href(alert) for alert in old_profile['alert_definitions']) else: # no alerts in this profile existing_alerts = set() @@ -238,7 +239,7 @@ class ManageIQAlertProfiles(object): except Exception as e: msg = "Updating profile '{name}' failed: {error}" msg = msg.format(name=old_profile['name'], error=e) - self.module.fail_json(msg=msg, result=result) + self.module.fail_json(msg=msg) if changed: msg = "Profile {name} updated successfully".format(name=desired_profile['name']) @@ -249,7 +250,7 @@ class ManageIQAlertProfiles(object): def main(): argument_spec = dict( - name=dict(type='str'), + name=dict(type='str', required=True), resource_type=dict(type='str', choices=['Vm', 'ContainerNode', 'MiqServer', @@ -266,8 +267,7 @@ def main(): argument_spec.update(manageiq_argument_spec()) module = AnsibleModule(argument_spec=argument_spec, - required_if=[('state', 'present', ['name', 'resource_type']), - ('state', 'absent', ['name'])]) + required_if=[('state', 'present', ['resource_type', 'alerts'])]) state = module.params['state'] name = module.params['name'] @@ -275,8 +275,7 @@ def main(): manageiq = ManageIQ(module) manageiq_alert_profiles = ManageIQAlertProfiles(manageiq) - existing_profile = manageiq.find_collection_resource_by("alert_definition_profiles", - name=name) + existing_profile = manageiq.find_collection_resource_by("alert_definition_profiles", name=name) # we need to add or update the alert profile if state == "present": diff --git a/plugins/modules/remote_management/manageiq/manageiq_alerts.py b/plugins/modules/manageiq_alerts.py similarity index 90% rename from plugins/modules/remote_management/manageiq/manageiq_alerts.py rename to plugins/modules/manageiq_alerts.py index de85e96fcb..dec3dfad57 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_alerts.py +++ b/plugins/modules/manageiq_alerts.py @@ -1,31 +1,35 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017 Red Hat Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: manageiq_alerts short_description: Configuration of alerts in ManageIQ extends_documentation_fragment: -- community.general.manageiq + - community.general.manageiq + - community.general.attributes author: Elad Alfassa (@elad661) description: - The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ. +attributes: + check_mode: + support: none + diff_mode: + support: none options: state: type: str description: - - absent - alert should not exist, - - present - alert should exist, - required: False + - V(absent) - alert should not exist, + - V(present) - alert should exist. + required: false choices: ['absent', 'present'] default: 'present' description: @@ -36,9 +40,8 @@ options: resource_type: type: str description: - - The entity type for the alert in ManageIQ. Required when state is "present". - choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', - 'ExtManagementSystem', 'MiddlewareServer'] + - The entity type for the alert in ManageIQ. Required when O(state=present). + choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster', 'ExtManagementSystem', 'MiddlewareServer'] expression_type: type: str description: @@ -50,20 +53,18 @@ options: description: - The alert expression for ManageIQ. - Can either be in the "Miq Expression" format or the "Hash Expression format". - - Required if state is "present". + - Required if O(state=present). enabled: description: - - Enable or disable the alert. Required if state is "present". + - Enable or disable the alert. Required if O(state=present). type: bool options: type: dict description: - - Additional alert options, such as notification type and frequency + - Additional alert options, such as notification type and frequency. +""" - -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Add an alert with a "hash expression" to ManageIQ community.general.manageiq_alerts: state: present @@ -75,15 +76,15 @@ EXAMPLES = ''' from: "example@example.com" resource_type: ContainerNode expression: - eval_method: hostd_log_threshold - mode: internal - options: {} + eval_method: hostd_log_threshold + mode: internal + options: {} enabled: true manageiq_connection: url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: False + validate_certs: false # only do this when you trust the network! - name: Add an alert with a "miq expression" to ManageIQ community.general.manageiq_alerts: @@ -97,20 +98,20 @@ EXAMPLES = ''' resource_type: Vm expression_type: miq expression: - and: - - CONTAINS: - tag: Vm.managed-environment - value: prod - - not: - CONTAINS: - tag: Vm.host.managed-environment - value: prod + and: + - CONTAINS: + tag: Vm.managed-environment + value: prod + - not: + CONTAINS: + tag: Vm.host.managed-environment + value: prod enabled: true manageiq_connection: url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: False + validate_certs: false # only do this when you trust the network! - name: Delete an alert from ManageIQ community.general.manageiq_alerts: @@ -120,11 +121,11 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: False -''' + validate_certs: false # only do this when you trust the network! +""" -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec @@ -148,7 +149,7 @@ class ManageIQAlert(object): self.miq_expression = alert['miq_expression'] if 'exp' in self.miq_expression: # miq_expression is a field that needs a special case, because - # it's returned surrounded by a dict named exp even though we don't + # it is returned surrounded by a dict named exp even though we don't # send it with that dict. self.miq_expression = self.miq_expression['exp'] @@ -297,7 +298,7 @@ def main(): expression=dict(type='dict'), options=dict(type='dict'), enabled=dict(type='bool'), - state=dict(required=False, default='present', + state=dict(default='present', choices=['present', 'absent']), ) # add the manageiq connection arguments to the arguments diff --git a/plugins/modules/remote_management/manageiq/manageiq_group.py b/plugins/modules/manageiq_group.py similarity index 84% rename from plugins/modules/remote_management/manageiq/manageiq_group.py rename to plugins/modules/manageiq_group.py index 2452e101d1..39bc641967 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_group.py +++ b/plugins/modules/manageiq_group.py @@ -1,119 +1,108 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn ) -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn ) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: manageiq_group -short_description: Management of groups in ManageIQ. +short_description: Management of groups in ManageIQ extends_documentation_fragment: -- community.general.manageiq + - community.general.manageiq + - community.general.attributes author: Evert Mulder (@evertmulder) description: - The manageiq_group module supports adding, updating and deleting groups in ManageIQ. requirements: -- manageiq-client + - manageiq-client + +attributes: + check_mode: + support: none + diff_mode: + support: none options: state: type: str description: - - absent - group should not exist, present - group should be. + - V(absent) - group should not exist, + - V(present) - group should exist. choices: ['absent', 'present'] default: 'present' description: type: str description: - - The group description. + - The group description. required: true - default: null role_id: type: int description: - - The the group role id + - The the group role ID. required: false - default: null role: type: str description: - - The the group role name - - The C(role_id) has precedence over the C(role) when supplied. + - The the group role name. + - The O(role_id) has precedence over the O(role) when supplied. required: false - default: null + default: tenant_id: type: int description: - - The tenant for the group identified by the tenant id. + - The tenant for the group identified by the tenant ID. required: false - default: null + default: tenant: type: str description: - - The tenant for the group identified by the tenant name. - - The C(tenant_id) has precedence over the C(tenant) when supplied. - - Tenant names are case sensitive. + - The tenant for the group identified by the tenant name. + - The O(tenant_id) has precedence over the O(tenant) when supplied. + - Tenant names are case sensitive. required: false - default: null + default: managed_filters: - description: The tag values per category + description: The tag values per category. type: dict required: false - default: null + default: managed_filters_merge_mode: type: str description: - - In merge mode existing categories are kept or updated, new categories are added. - - In replace mode all categories will be replaced with the supplied C(managed_filters). - choices: [ merge, replace ] + - In V(merge) mode existing categories are kept or updated, new categories are added. + - In V(replace) mode all categories are replaced with the supplied O(managed_filters). + choices: [merge, replace] default: replace belongsto_filters: - description: A list of strings with a reference to the allowed host, cluster or folder + description: A list of strings with a reference to the allowed host, cluster or folder. type: list elements: str required: false - default: null + default: belongsto_filters_merge_mode: type: str description: - - In merge mode existing settings are merged with the supplied C(belongsto_filters). - - In replace mode current values are replaced with the supplied C(belongsto_filters). - choices: [ merge, replace ] + - In merge mode existing settings are merged with the supplied O(belongsto_filters). + - In replace mode current values are replaced with the supplied O(belongsto_filters). + choices: [merge, replace] default: replace -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a group in ManageIQ with the role EvmRole-user and tenant 'my_tenant' community.general.manageiq_group: description: 'MyGroup-user' role: 'EvmRole-user' tenant: 'my_tenant' manageiq_connection: - url: 'https://manageiq_server' + url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: False + validate_certs: false # only do this when you trust the network! - name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4 community.general.manageiq_group: @@ -121,36 +110,36 @@ EXAMPLES = ''' role: 'EvmRole-user' tenant_id: 4 manageiq_connection: - url: 'https://manageiq_server' + url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: False + validate_certs: false # only do this when you trust the network! - name: - - Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant. - - Apply 3 prov_max_cpu and 2 department tags to the group. - - Limit access to a cluster for the group. + - Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant. + - Apply 3 prov_max_cpu and 2 department tags to the group. + - Limit access to a cluster for the group. community.general.manageiq_group: description: 'MyGroup-user' role: 'EvmRole-user' tenant: my_tenant managed_filters: prov_max_cpu: - - '1' - - '2' - - '4' + - '1' + - '2' + - '4' department: - - defense - - engineering + - defense + - engineering managed_filters_merge_mode: replace belongsto_filters: - - "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name" + - "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name" belongsto_filters_merge_mode: merge manageiq_connection: - url: 'https://manageiq_server' + url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: False + validate_certs: false # only do this when you trust the network! - name: Delete a group in ManageIQ community.general.manageiq_group: @@ -168,53 +157,53 @@ EXAMPLES = ''' manageiq_connection: url: 'http://127.0.0.1:3000' token: 'sometoken' -''' +""" -RETURN = ''' +RETURN = r""" group: description: The group. returned: success type: complex contains: description: - description: The group description + description: The group description. returned: success type: str id: - description: The group id + description: The group ID. returned: success type: int group_type: - description: The group type, system or user + description: The group type, system or user. returned: success type: str role: - description: The group role name + description: The group role name. returned: success type: str tenant: - description: The group tenant name + description: The group tenant name. returned: success type: str managed_filters: - description: The tag values per category + description: The tag values per category. returned: success type: dict belongsto_filters: - description: A list of strings with a reference to the allowed host, cluster or folder + description: A list of strings with a reference to the allowed host, cluster or folder. returned: success type: list created_on: - description: Group creation date + description: Group creation date. returned: success type: str sample: "2018-08-12T08:37:55+00:00" updated_on: - description: Group update date + description: Group update date. returned: success type: int sample: "2018-08-12T08:37:55+00:00" -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec @@ -260,7 +249,7 @@ class ManageIQgroup(object): if not tenant_res: self.module.fail_json(msg="Tenant '%s' not found in manageiq" % tenant_name) if len(tenant_res) > 1: - self.module.fail_json(msg="Multiple tenants found in manageiq with name '%s" % tenant_name) + self.module.fail_json(msg="Multiple tenants found in manageiq with name '%s'" % tenant_name) tenant = tenant_res[0] return tenant else: @@ -285,7 +274,7 @@ class ManageIQgroup(object): if not role_res: self.module.fail_json(msg="Role '%s' not found in manageiq" % role_name) if len(role_res) > 1: - self.module.fail_json(msg="Multiple roles found in manageiq with name '%s" % role_name) + self.module.fail_json(msg="Multiple roles found in manageiq with name '%s'" % role_name) return role_res[0] else: # No role name or role id supplied @@ -579,14 +568,14 @@ def main(): argument_spec = dict( description=dict(required=True, type='str'), state=dict(choices=['absent', 'present'], default='present'), - role_id=dict(required=False, type='int'), - role=dict(required=False, type='str'), - tenant_id=dict(required=False, type='int'), - tenant=dict(required=False, type='str'), - managed_filters=dict(required=False, type='dict'), - managed_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'), - belongsto_filters=dict(required=False, type='list', elements='str'), - belongsto_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'), + role_id=dict(type='int'), + role=dict(type='str'), + tenant_id=dict(type='int'), + tenant=dict(type='str'), + managed_filters=dict(type='dict'), + managed_filters_merge_mode=dict(choices=['merge', 'replace'], default='replace'), + belongsto_filters=dict(type='list', elements='str'), + belongsto_filters_merge_mode=dict(choices=['merge', 'replace'], default='replace'), ) # add the manageiq connection arguments to the arguments argument_spec.update(manageiq_argument_spec()) diff --git a/plugins/modules/manageiq_policies.py b/plugins/modules/manageiq_policies.py new file mode 100644 index 0000000000..a5539724dc --- /dev/null +++ b/plugins/modules/manageiq_policies.py @@ -0,0 +1,184 @@ +#!/usr/bin/python +# Copyright (c) 2017, Daniel Korn +# Copyright (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: manageiq_policies + +short_description: Management of resource policy_profiles in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Daniel Korn (@dkorn) +description: + - The manageiq_policies module supports adding and deleting policy_profiles in ManageIQ. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - V(absent) - policy_profiles should not exist, + - V(present) - policy_profiles should exist. + choices: ['absent', 'present'] + default: 'present' + policy_profiles: + type: list + elements: dict + description: + - List of dictionaries, each includes the policy_profile V(name) key. + - Required if O(state) is V(present) or V(absent). + resource_type: + type: str + description: + - The type of the resource to which the profile should be [un]assigned. + required: true + choices: + - provider + - host + - vm + - blueprint + - category + - cluster + - data store + - group + - resource pool + - service + - service template + - template + - tenant + - user + resource_name: + type: str + description: + - The name of the resource to which the profile should be [un]assigned. + - Must be specified if O(resource_id) is not set. Both options are mutually exclusive. + resource_id: + type: int + description: + - The ID of the resource to which the profile should be [un]assigned. + - Must be specified if O(resource_name) is not set. Both options are mutually exclusive. + version_added: 2.2.0 +""" + +EXAMPLES = r""" +- name: Assign new policy_profile for a provider in ManageIQ + community.general.manageiq_policies: + resource_name: 'EngLab' + resource_type: 'provider' + policy_profiles: + - name: openscap profile + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! + +- name: Unassign a policy_profile for a provider in ManageIQ + community.general.manageiq_policies: + state: absent + resource_name: 'EngLab' + resource_type: 'provider' + policy_profiles: + - name: openscap profile + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when you trust the network! +""" + +RETURN = r""" +manageiq_policies: + description: + - List current policy_profile and policies for a provider in ManageIQ. + returned: always + type: dict + sample: + { + "changed": false, + "profiles": [ + { + "policies": [ + { + "active": true, + "description": "OpenSCAP", + "name": "openscap policy" + }, + { + "active": true, + "description": "Analyse incoming container images", + "name": "analyse incoming container images" + }, + { + "active": true, + "description": "Schedule compliance after smart state analysis", + "name": "schedule compliance after smart state analysis" + } + ], + "profile_description": "OpenSCAP profile", + "profile_name": "openscap profile" + } + ] + } +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities + + +def main(): + actions = {'present': 'assign', 'absent': 'unassign'} + argument_spec = dict( + policy_profiles=dict(type='list', elements='dict'), + resource_id=dict(type='int'), + resource_name=dict(type='str'), + resource_type=dict(required=True, type='str', + choices=list(manageiq_entities().keys())), + state=dict(type='str', + choices=['present', 'absent'], default='present'), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[["resource_id", "resource_name"]], + required_one_of=[["resource_id", "resource_name"]], + required_if=[ + ('state', 'present', ['policy_profiles']), + ('state', 'absent', ['policy_profiles']) + ], + ) + + policy_profiles = module.params['policy_profiles'] + resource_id = module.params['resource_id'] + resource_type_key = module.params['resource_type'] + resource_name = module.params['resource_name'] + state = module.params['state'] + + # get the action and resource type + action = actions[state] + resource_type = manageiq_entities()[resource_type_key] + + manageiq = ManageIQ(module) + manageiq_policies = manageiq.policies(resource_id, resource_type, resource_name) + + # assign or unassign the profiles + res_args = manageiq_policies.assign_or_unassign_profiles(policy_profiles, action) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/manageiq_policies_info.py b/plugins/modules/manageiq_policies_info.py new file mode 100644 index 0000000000..bf96679e29 --- /dev/null +++ b/plugins/modules/manageiq_policies_info.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# Copyright (c) 2022, Alexei Znamensky +# Copyright (c) 2017, Daniel Korn +# Copyright (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: manageiq_policies_info +version_added: 5.8.0 + +short_description: Listing of resource policy_profiles in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + - community.general.attributes.info_module + +author: Alexei Znamensky (@russoz) +description: + - The manageiq_policies module supports listing policy_profiles in ManageIQ. +options: + resource_type: + type: str + description: + - The type of the resource to obtain the profile for. + required: true + choices: + - provider + - host + - vm + - blueprint + - category + - cluster + - data store + - group + - resource pool + - service + - service template + - template + - tenant + - user + resource_name: + type: str + description: + - The name of the resource to obtain the profile for. + - Must be specified if O(resource_id) is not set. Both options are mutually exclusive. + resource_id: + type: int + description: + - The ID of the resource to obtain the profile for. + - Must be specified if O(resource_name) is not set. Both options are mutually exclusive. +""" + +EXAMPLES = r""" +- name: List current policy_profile and policies for a provider in ManageIQ + community.general.manageiq_policies_info: + resource_name: 'EngLab' + resource_type: 'provider' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + register: result +""" + +RETURN = r""" +profiles: + description: + - List current policy_profile and policies for a provider in ManageIQ. + returned: always + type: list + elements: dict + sample: + - policies: + - active: true + description: OpenSCAP + name: openscap policy + - active: true, + description: Analyse incoming container images + name: analyse incoming container images + - active: true + description: Schedule compliance after smart state analysis + name: schedule compliance after smart state analysis + profile_description: OpenSCAP profile + profile_name: openscap profile +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities + + +def main(): + argument_spec = dict( + resource_id=dict(type='int'), + resource_name=dict(type='str'), + resource_type=dict(required=True, type='str', + choices=list(manageiq_entities().keys())), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[["resource_id", "resource_name"]], + required_one_of=[["resource_id", "resource_name"]], + supports_check_mode=True, + ) + + resource_id = module.params['resource_id'] + resource_type_key = module.params['resource_type'] + resource_name = module.params['resource_name'] + + # get the resource type + resource_type = manageiq_entities()[resource_type_key] + + manageiq_policies = ManageIQ(module).policies(resource_id, resource_type, resource_name) + + # return a list of current profiles for this object + current_profiles = manageiq_policies.query_resource_profiles() + res_args = dict(changed=False, profiles=current_profiles) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_provider.py b/plugins/modules/manageiq_provider.py similarity index 66% rename from plugins/modules/remote_management/manageiq/manageiq_provider.py rename to plugins/modules/manageiq_provider.py index f17cbec910..0268baa5c8 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_provider.py +++ b/plugins/modules/manageiq_provider.py @@ -1,28 +1,35 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Daniel Korn -# (c) 2017, Yaacov Zamir -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Daniel Korn +# Copyright (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: manageiq_provider -short_description: Management of provider in ManageIQ. +short_description: Management of provider in ManageIQ extends_documentation_fragment: -- community.general.manageiq + - community.general.manageiq + - community.general.attributes author: Daniel Korn (@dkorn) description: - The manageiq_provider module supports adding, updating, and deleting provider in ManageIQ. +attributes: + check_mode: + support: none + diff_mode: + support: none options: state: type: str description: - - absent - provider should not exist, present - provider should be present, refresh - provider will be refreshed + - V(absent) - provider should not exist, + - V(present) - provider should be present, + - V(refresh) - provider is refreshed. choices: ['absent', 'present', 'refresh'] default: 'present' name: @@ -35,134 +42,194 @@ options: choices: ['Openshift', 'Amazon', 'oVirt', 'VMware', 'Azure', 'Director', 'OpenStack', 'GCE'] zone: type: str - description: The ManageIQ zone name that will manage the provider. + description: The ManageIQ zone name that manages the provider. default: 'default' provider_region: type: str - description: The provider region name to connect to (e.g. AWS region for Amazon). + description: The provider region name to connect to (for example AWS region for Amazon). host_default_vnc_port_start: type: str - description: The first port in the host VNC range. defaults to None. + description: The first port in the host VNC range. host_default_vnc_port_end: type: str - description: The last port in the host VNC range. defaults to None. + description: The last port in the host VNC range. subscription: type: str - description: Microsoft Azure subscription ID. defaults to None. + description: Microsoft Azure subscription ID. project: type: str - description: Google Compute Engine Project ID. defaults to None. + description: Google Compute Engine Project ID. azure_tenant_id: type: str - description: Tenant ID. defaults to None. - aliases: [ keystone_v3_domain_id ] + description: Tenant ID. Defaults to V(null). + aliases: [keystone_v3_domain_id] tenant_mapping_enabled: type: bool - default: 'no' - description: Whether to enable mapping of existing tenants. defaults to False. + default: false + description: Whether to enable mapping of existing tenants. api_version: type: str - description: The OpenStack Keystone API version. defaults to None. + description: The OpenStack Keystone API version. choices: ['v2', 'v3'] provider: description: Default endpoint connection information, required if state is true. + type: dict suboptions: hostname: type: str - description: The provider's api hostname. + description: The provider's API hostname. required: true port: type: int - description: The provider's api port. + description: The provider's API port. userid: type: str - description: Provider's api endpoint authentication userid. defaults to None. + description: Provider's API endpoint authentication userid. password: type: str - description: Provider's api endpoint authentication password. defaults to None. + description: Provider's API endpoint authentication password. auth_key: type: str - description: Provider's api endpoint authentication bearer token. defaults to None. + description: Provider's API endpoint authentication bearer token. validate_certs: - description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. + description: Whether SSL certificates should be verified for HTTPS requests (deprecated). type: bool - default: 'yes' + default: true + aliases: [verify_ssl] security_protocol: type: str - description: How SSL certificates should be used for HTTPS requests. defaults to None. - choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl'] + description: How SSL certificates should be used for HTTPS requests. + choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl'] certificate_authority: type: str - description: The CA bundle string with custom certificates. defaults to None. - - metrics: - description: Metrics endpoint connection information. - suboptions: - hostname: - type: str - description: The provider's api hostname. - required: true - port: - type: int - description: The provider's api port. - userid: - type: str - description: Provider's api endpoint authentication userid. defaults to None. - password: - type: str - description: Provider's api endpoint authentication password. defaults to None. - auth_key: - type: str - description: Provider's api endpoint authentication bearer token. defaults to None. - validate_certs: - description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. - type: bool - default: 'yes' - security_protocol: - type: str - choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl'] - description: How SSL certificates should be used for HTTPS requests. defaults to None. - certificate_authority: - type: str - description: The CA bundle string with custom certificates. defaults to None. + description: The CA bundle string with custom certificates. path: type: str - description: Database name for oVirt metrics. Defaults to C(ovirt_engine_history). - - alerts: - description: Alerts endpoint connection information. + description: + - TODO needs documentation. + project: + type: str + description: + - TODO needs documentation. + role: + type: str + description: + - TODO needs documentation. + subscription: + type: str + description: + - TODO needs documentation. + uid_ems: + type: str + description: + - TODO needs documentation. + metrics: + description: Metrics endpoint connection information. + type: dict suboptions: hostname: type: str - description: The provider's api hostname. + description: The provider's API hostname. required: true port: type: int - description: The provider's api port. + description: The provider's API port. userid: type: str - description: Provider's api endpoint authentication userid. defaults to None. + description: Provider's API endpoint authentication userid. password: type: str - description: Provider's api endpoint authentication password. defaults to None. + description: Provider's API endpoint authentication password. auth_key: type: str - description: Provider's api endpoint authentication bearer token. defaults to None. + description: Provider's API endpoint authentication bearer token. validate_certs: + description: Whether SSL certificates should be verified for HTTPS requests (deprecated). type: bool - description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True. default: true + aliases: [verify_ssl] security_protocol: type: str - choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation', 'non-ssl'] - description: How SSL certificates should be used for HTTPS requests. defaults to None. + choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl'] + description: How SSL certificates should be used for HTTPS requests. certificate_authority: type: str - description: The CA bundle string with custom certificates. defaults to None. - + description: The CA bundle string with custom certificates. + path: + type: str + description: Database name for oVirt metrics. Defaults to V(ovirt_engine_history). + project: + type: str + description: + - TODO needs documentation. + role: + type: str + description: + - TODO needs documentation. + subscription: + type: str + description: + - TODO needs documentation. + uid_ems: + type: str + description: + - TODO needs documentation. + alerts: + description: Alerts endpoint connection information. + type: dict + suboptions: + hostname: + type: str + description: The provider's API hostname. + required: true + port: + type: int + description: The provider's API port. + userid: + type: str + description: Provider's API endpoint authentication userid. Defaults to V(null). + password: + type: str + description: Provider's API endpoint authentication password. Defaults to V(null). + auth_key: + type: str + description: Provider's API endpoint authentication bearer token. Defaults to V(null). + validate_certs: + type: bool + description: Whether SSL certificates should be verified for HTTPS requests (deprecated). Defaults to V(true). + default: true + aliases: [verify_ssl] + security_protocol: + type: str + choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl'] + description: How SSL certificates should be used for HTTPS requests. Defaults to V(null). + certificate_authority: + type: str + description: The CA bundle string with custom certificates. Defaults to V(null). + path: + type: str + description: + - TODO needs documentation. + project: + type: str + description: + - TODO needs documentation. + role: + type: str + description: + - TODO needs documentation. + subscription: + type: str + description: + - TODO needs documentation. + uid_ems: + type: str + description: + - TODO needs documentation. ssh_keypair: description: SSH key pair used for SSH connections to all hosts in this provider. + type: dict suboptions: hostname: type: str @@ -178,11 +245,48 @@ options: description: - Whether certificates should be verified for connections. type: bool - default: yes - aliases: [ verify_ssl ] -''' + default: true + aliases: [verify_ssl] + security_protocol: + type: str + choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl'] + description: + - TODO needs documentation. + certificate_authority: + type: str + description: + - TODO needs documentation. + password: + type: str + description: + - TODO needs documentation. + path: + type: str + description: + - TODO needs documentation. + project: + type: str + description: + - TODO needs documentation. + role: + type: str + description: + - TODO needs documentation. + subscription: + type: str + description: + - TODO needs documentation. + uid_ems: + type: str + description: + - TODO needs documentation. + port: + type: int + description: + - TODO needs documentation. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new provider in ManageIQ ('Hawkular' metrics) community.general.manageiq_provider: name: 'EngLab' @@ -196,22 +300,7 @@ EXAMPLES = ''' security_protocol: 'ssl-with-validation-custom-ca' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- metrics: auth_key: 'topSecret' @@ -222,22 +311,7 @@ EXAMPLES = ''' security_protocol: 'ssl-with-validation-custom-ca' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- manageiq_connection: url: 'https://127.0.0.1:80' @@ -259,22 +333,7 @@ EXAMPLES = ''' security_protocol: 'ssl-with-validation-custom-ca' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- metrics: auth_key: 'topSecret' @@ -284,22 +343,7 @@ EXAMPLES = ''' security_protocol: 'ssl-with-validation-custom-ca' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- manageiq_connection: url: 'https://127.0.0.1' @@ -347,22 +391,7 @@ EXAMPLES = ''' validate_certs: true certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- metrics: hostname: 'metrics.example.com' @@ -372,22 +401,7 @@ EXAMPLES = ''' validate_certs: true certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- manageiq_connection: url: 'https://127.0.0.1' @@ -427,7 +441,7 @@ EXAMPLES = ''' url: 'https://cf-6af0.rhpds.opentlc.com' username: 'admin' password: 'password' - validate_certs: false + validate_certs: true - name: Create a new OpenStack Director provider in ManageIQ with rsa keypair community.general.manageiq_provider: @@ -443,22 +457,7 @@ EXAMPLES = ''' validate_certs: 'true' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- ssh_keypair: hostname: director.example.com @@ -482,22 +481,7 @@ EXAMPLES = ''' validate_certs: 'true' certificate_authority: | -----BEGIN CERTIFICATE----- - FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu - c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw - MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw - ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S - ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm - AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw - Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa - z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ - ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ - AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG - SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI - QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA - aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051 - gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA - qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o - XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5 + FAKECERTsdKgAwI... -----END CERTIFICATE----- metrics: role: amqp @@ -519,10 +503,10 @@ EXAMPLES = ''' hostname: 'gce.example.com' auth_key: 'google_json_key' validate_certs: 'false' -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec @@ -607,7 +591,7 @@ def delete_nulls(h): if isinstance(h, list): return [delete_nulls(i) for i in h] if isinstance(h, dict): - return dict((k, delete_nulls(v)) for k, v in h.items() if v is not None) + return {k: delete_nulls(v) for k, v in h.items() if v is not None} return h diff --git a/plugins/modules/manageiq_tags.py b/plugins/modules/manageiq_tags.py new file mode 100644 index 0000000000..7715a04288 --- /dev/null +++ b/plugins/modules/manageiq_tags.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# Copyright (c) 2017, Daniel Korn +# Copyright (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: manageiq_tags + +short_description: Management of resource tags in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + +author: Daniel Korn (@dkorn) +description: + - The manageiq_tags module supports adding, updating and deleting tags in ManageIQ. +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + state: + type: str + description: + - V(absent) - tags should not exist, + - V(present) - tags should exist. + choices: ['absent', 'present'] + default: 'present' + tags: + type: list + elements: dict + description: + - V(tags) - list of dictionaries, each includes C(name) and C(category) keys. + - Required if O(state) is V(present) or V(absent). + resource_type: + type: str + description: + - The relevant resource type in manageiq. + required: true + choices: + - provider + - host + - vm + - blueprint + - category + - cluster + - data store + - group + - resource pool + - service + - service template + - template + - tenant + - user + resource_name: + type: str + description: + - The name of the resource at which tags are be controlled. + - Must be specified if O(resource_id) is not set. Both options are mutually exclusive. + resource_id: + description: + - The ID of the resource at which tags are controlled. + - Must be specified if O(resource_name) is not set. Both options are mutually exclusive. + type: int + version_added: 2.2.0 +""" + +EXAMPLES = r""" +- name: Create new tags for a provider in ManageIQ. + community.general.manageiq_tags: + resource_name: 'EngLab' + resource_type: 'provider' + tags: + - category: environment + name: prod + - category: owner + name: prod_ops + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when connecting to localhost! + +- name: Create new tags for a provider in ManageIQ. + community.general.manageiq_tags: + resource_id: 23000000790497 + resource_type: 'provider' + tags: + - category: environment + name: prod + - category: owner + name: prod_ops + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when connecting to localhost! + +- name: Remove tags for a provider in ManageIQ. + community.general.manageiq_tags: + state: absent + resource_name: 'EngLab' + resource_type: 'provider' + tags: + - category: environment + name: prod + - category: owner + name: prod_ops + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: false # only do this when connecting to localhost! +""" + +RETURN = r""" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ( + ManageIQ, ManageIQTags, manageiq_argument_spec, manageiq_entities +) + + +def main(): + actions = {'present': 'assign', 'absent': 'unassign'} + argument_spec = dict( + tags=dict(type='list', elements='dict'), + resource_id=dict(type='int'), + resource_name=dict(type='str'), + resource_type=dict(required=True, type='str', + choices=list(manageiq_entities().keys())), + state=dict(type='str', + choices=['present', 'absent'], default='present'), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[["resource_id", "resource_name"]], + required_one_of=[["resource_id", "resource_name"]], + required_if=[ + ('state', 'present', ['tags']), + ('state', 'absent', ['tags']) + ], + ) + + tags = module.params['tags'] + resource_id = module.params['resource_id'] + resource_type_key = module.params['resource_type'] + resource_name = module.params['resource_name'] + state = module.params['state'] + + # get the action and resource type + action = actions[state] + resource_type = manageiq_entities()[resource_type_key] + + manageiq = ManageIQ(module) + + # query resource id, fail if resource does not exist + if resource_id is None: + resource_id = manageiq.query_resource_id(resource_type, resource_name) + + manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id) + + # assign or unassign the tags + res_args = manageiq_tags.assign_or_unassign_tags(tags, action) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/manageiq_tags_info.py b/plugins/modules/manageiq_tags_info.py new file mode 100644 index 0000000000..eeb2e74685 --- /dev/null +++ b/plugins/modules/manageiq_tags_info.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# Copyright (c) 2017, Daniel Korn +# Copyright (c) 2017, Yaacov Zamir +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: manageiq_tags_info +version_added: 5.8.0 +short_description: Retrieve resource tags in ManageIQ +extends_documentation_fragment: + - community.general.manageiq + - community.general.attributes + - community.general.attributes.info_module + +author: Alexei Znamensky (@russoz) +description: + - This module supports retrieving resource tags from ManageIQ. +options: + resource_type: + type: str + description: + - The relevant resource type in ManageIQ. + required: true + choices: + - provider + - host + - vm + - blueprint + - category + - cluster + - data store + - group + - resource pool + - service + - service template + - template + - tenant + - user + resource_name: + type: str + description: + - The name of the resource at which tags are controlled. + - Must be specified if O(resource_id) is not set. Both options are mutually exclusive. + resource_id: + description: + - The ID of the resource at which tags are controlled. + - Must be specified if O(resource_name) is not set. Both options are mutually exclusive. + type: int +""" + +EXAMPLES = r""" +- name: List current tags for a provider in ManageIQ. + community.general.manageiq_tags_info: + resource_name: 'EngLab' + resource_type: 'provider' + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + register: result +""" + +RETURN = r""" +tags: + description: List of tags associated with the resource. + returned: on success + type: list + elements: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.manageiq import ( + ManageIQ, ManageIQTags, manageiq_argument_spec, manageiq_entities +) + + +def main(): + argument_spec = dict( + resource_id=dict(type='int'), + resource_name=dict(type='str'), + resource_type=dict(required=True, type='str', + choices=list(manageiq_entities().keys())), + ) + # add the manageiq connection arguments to the arguments + argument_spec.update(manageiq_argument_spec()) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[["resource_id", "resource_name"]], + required_one_of=[["resource_id", "resource_name"]], + supports_check_mode=True, + ) + + resource_id = module.params['resource_id'] + resource_type_key = module.params['resource_type'] + resource_name = module.params['resource_name'] + + # get the action and resource type + resource_type = manageiq_entities()[resource_type_key] + + manageiq = ManageIQ(module) + + # query resource id, fail if resource does not exist + if resource_id is None: + resource_id = manageiq.query_resource_id(resource_type, resource_name) + + manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id) + + # return a list of current tags for this object + current_tags = manageiq_tags.query_resource_tags() + res_args = dict(changed=False, tags=current_tags) + + module.exit_json(**res_args) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_tenant.py b/plugins/modules/manageiq_tenant.py similarity index 87% rename from plugins/modules/remote_management/manageiq/manageiq_tenant.py rename to plugins/modules/manageiq_tenant.py index 58c2e1ed71..0ba54bbc91 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_tenant.py +++ b/plugins/modules/manageiq_tenant.py @@ -1,45 +1,35 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn ) -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn ) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: manageiq_tenant -short_description: Management of tenants in ManageIQ. +short_description: Management of tenants in ManageIQ extends_documentation_fragment: -- community.general.manageiq + - community.general.manageiq + - community.general.attributes author: Evert Mulder (@evertmulder) description: - The manageiq_tenant module supports adding, updating and deleting tenants in ManageIQ. requirements: -- manageiq-client + - manageiq-client +attributes: + check_mode: + support: none + diff_mode: + support: none options: state: type: str description: - - absent - tenant should not exist, present - tenant should be. + - V(absent) - tenant should not exist, + - V(present) - tenant should be. choices: ['absent', 'present'] default: 'present' name: @@ -47,42 +37,42 @@ options: description: - The tenant name. required: true - default: null + default: description: type: str description: - - The tenant description. + - The tenant description. required: true - default: null + default: parent_id: type: int description: - - The id of the parent tenant. If not supplied the root tenant is used. - - The C(parent_id) takes president over C(parent) when supplied + - The ID of the parent tenant. If not supplied the root tenant is used. + - The O(parent_id) takes president over O(parent) when supplied. required: false - default: null + default: parent: type: str description: - - The name of the parent tenant. If not supplied and no C(parent_id) is supplied the root tenant is used. + - The name of the parent tenant. If not supplied and no O(parent_id) is supplied the root tenant is used. required: false - default: null + default: quotas: type: dict description: - - The tenant quotas. - - All parameters case sensitive. - - 'Valid attributes are:' - - ' - C(cpu_allocated) (int): use null to remove the quota.' - - ' - C(mem_allocated) (GB): use null to remove the quota.' - - ' - C(storage_allocated) (GB): use null to remove the quota.' - - ' - C(vms_allocated) (int): use null to remove the quota.' - - ' - C(templates_allocated) (int): use null to remove the quota.' + - The tenant quotas. + - All parameters case sensitive. + - 'Valid attributes are:' + - '- V(cpu_allocated) (int): use null to remove the quota.' + - '- V(mem_allocated) (GB): use null to remove the quota.' + - '- V(storage_allocated) (GB): use null to remove the quota.' + - '- V(vms_allocated) (int): use null to remove the quota.' + - '- V(templates_allocated) (int): use null to remove the quota.' required: false - default: null -''' + default: {} +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Update the root tenant in ManageIQ community.general.manageiq_tenant: name: 'My Company' @@ -91,7 +81,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: False + validate_certs: false # only do this when you trust the network! - name: Create a tenant in ManageIQ community.general.manageiq_tenant: @@ -102,7 +92,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: False + validate_certs: false # only do this when you trust the network! - name: Delete a tenant in ManageIQ community.general.manageiq_tenant: @@ -113,7 +103,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: False + validate_certs: false # only do this when you trust the network! - name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated community.general.manageiq_tenant: @@ -122,12 +112,12 @@ EXAMPLES = ''' quotas: - cpu_allocated: 100 - mem_allocated: 50 - - vms_allocated: null + - vms_allocated: manageiq_connection: url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: False + validate_certs: false # only do this when you trust the network! - name: Delete a tenant in ManageIQ using a token @@ -138,39 +128,39 @@ EXAMPLES = ''' manageiq_connection: url: 'http://127.0.0.1:3000' token: 'sometoken' - validate_certs: False -''' + validate_certs: false # only do this when you trust the network! +""" -RETURN = ''' +RETURN = r""" tenant: description: The tenant. returned: success type: complex contains: id: - description: The tenant id + description: The tenant ID. returned: success type: int name: - description: The tenant name + description: The tenant name. returned: success type: str description: - description: The tenant description + description: The tenant description. returned: success type: str parent_id: - description: The id of the parent tenant + description: The ID of the parent tenant. returned: success type: int quotas: - description: List of tenant quotas + description: List of tenant quotas. returned: success type: list sample: cpu_allocated: 100 mem_allocated: 50 -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec @@ -222,7 +212,7 @@ class ManageIQTenant(object): self.module.fail_json(msg="Parent tenant '%s' not found in manageiq" % parent) if len(parent_tenant_res) > 1: - self.module.fail_json(msg="Multiple parent tenants not found in manageiq with name '%s" % parent) + self.module.fail_json(msg="Multiple parent tenants not found in manageiq with name '%s'" % parent) parent_tenant = parent_tenant_res[0] parent_id = int(parent_tenant['id']) @@ -490,8 +480,8 @@ def main(): argument_spec = dict( name=dict(required=True, type='str'), description=dict(required=True, type='str'), - parent_id=dict(required=False, type='int'), - parent=dict(required=False, type='str'), + parent_id=dict(type='int'), + parent=dict(type='str'), state=dict(choices=['absent', 'present'], default='present'), quotas=dict(type='dict', default={}) ) diff --git a/plugins/modules/remote_management/manageiq/manageiq_user.py b/plugins/modules/manageiq_user.py similarity index 87% rename from plugins/modules/remote_management/manageiq/manageiq_user.py rename to plugins/modules/manageiq_user.py index f3dc8103f7..c116387e65 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_user.py +++ b/plugins/modules/manageiq_user.py @@ -1,44 +1,34 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2017, Daniel Korn -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) 2017, Daniel Korn +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: manageiq_user -short_description: Management of users in ManageIQ. +short_description: Management of users in ManageIQ extends_documentation_fragment: -- community.general.manageiq + - community.general.manageiq + - community.general.attributes author: Daniel Korn (@dkorn) description: - The manageiq_user module supports adding, updating and deleting users in ManageIQ. +attributes: + check_mode: + support: none + diff_mode: + support: none options: state: type: str description: - - absent - user should not exist, present - user should be. + - V(absent) - user should not exist, + - V(present) - user should be. choices: ['absent', 'present'] default: 'present' userid: @@ -67,10 +57,11 @@ options: default: always choices: ['always', 'on_create'] description: - - C(always) will update passwords unconditionally. C(on_create) will only set the password for a newly created user. -''' + - V(always) updates passwords unconditionally. + - V(on_create) only sets the password for a newly created user. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new user in ManageIQ community.general.manageiq_user: userid: 'jdoe' @@ -82,7 +73,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: False + validate_certs: false # only do this when you trust the network! - name: Create a new user in ManageIQ using a token community.general.manageiq_user: @@ -94,7 +85,7 @@ EXAMPLES = ''' manageiq_connection: url: 'http://127.0.0.1:3000' token: 'sometoken' - validate_certs: False + validate_certs: false # only do this when you trust the network! - name: Delete a user in ManageIQ community.general.manageiq_user: @@ -104,7 +95,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: False + validate_certs: false # only do this when you trust the network! - name: Delete a user in ManageIQ using a token community.general.manageiq_user: @@ -113,7 +104,7 @@ EXAMPLES = ''' manageiq_connection: url: 'http://127.0.0.1:3000' token: 'sometoken' - validate_certs: False + validate_certs: false # only do this when you trust the network! - name: Update email of user in ManageIQ community.general.manageiq_user: @@ -123,7 +114,7 @@ EXAMPLES = ''' url: 'http://127.0.0.1:3000' username: 'admin' password: 'smartvm' - validate_certs: False + validate_certs: false # only do this when you trust the network! - name: Update email of user in ManageIQ using a token community.general.manageiq_user: @@ -132,11 +123,11 @@ EXAMPLES = ''' manageiq_connection: url: 'http://127.0.0.1:3000' token: 'sometoken' - validate_certs: False -''' + validate_certs: false # only do this when you trust the network! +""" -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec diff --git a/plugins/modules/packaging/os/mas.py b/plugins/modules/mas.py similarity index 73% rename from plugins/modules/packaging/os/mas.py rename to plugins/modules/mas.py index 0afd858add..2e851f9ab6 100644 --- a/plugins/modules/packaging/os/mas.py +++ b/plugins/modules/mas.py @@ -1,55 +1,61 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Lukas Bestle -# Copyright: (c) 2017, Michael Heap -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, Lukas Bestle +# Copyright (c) 2017, Michael Heap +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: mas short_description: Manage Mac App Store applications with mas-cli description: - - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli). + - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli). version_added: '0.2.0' author: - - Michael Heap (@mheap) - - Lukas Bestle (@lukasbestle) + - Michael Heap (@mheap) + - Lukas Bestle (@lukasbestle) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - id: - description: - - The Mac App Store identifier of the app(s) you want to manage. - - This can be found by running C(mas search APP_NAME) on your machine. - type: list - elements: int - state: - description: - - Desired state of the app installation. - - The C(absent) value requires root permissions, also see the examples. - type: str - choices: - - absent - - latest - - present - default: present - upgrade_all: - description: - - Upgrade all installed Mac App Store apps. - type: bool - default: "no" - aliases: ["upgrade"] + id: + description: + - The Mac App Store identifier of the app(s) you want to manage. + - This can be found by running C(mas search APP_NAME) on your machine. + type: list + elements: int + state: + description: + - Desired state of the app installation. + - The V(absent) value requires root permissions, also see the examples. + type: str + choices: + - absent + - latest + - present + default: present + upgrade_all: + description: + - Upgrade all installed Mac App Store apps. + type: bool + default: false + aliases: ["upgrade"] requirements: - - macOS 10.11+ - - "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path" - - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)). -notes: - - This module supports C(check_mode). -''' + - macOS 10.11 or higher. + - "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path" + - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)). + - The feature of "checking if user is signed in" is disabled for anyone using macOS 12.0+. + - Users need to sign in to the Mac App Store GUI beforehand for anyone using macOS 12.0+ due to U(https://github.com/mas-cli/mas/issues/417). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install Keynote community.general.mas: id: 409183694 @@ -76,7 +82,7 @@ EXAMPLES = ''' - name: Upgrade all installed Mac App Store apps community.general.mas: - upgrade_all: yes + upgrade_all: true - name: Install specific apps and also upgrade all others community.general.mas: @@ -84,23 +90,25 @@ EXAMPLES = ''' - 409183694 # Keynote - 413857545 # Divvy state: present - upgrade_all: yes + upgrade_all: true - name: Uninstall Divvy community.general.mas: id: 413857545 state: absent - become: yes # Uninstallation requires root permissions -''' + become: true # Uninstallation requires root permissions +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native import os from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +import platform +NOT_WORKING_MAC_VERSION_MAS_ACCOUNT = '12.0' + class Mas(object): @@ -110,6 +118,7 @@ class Mas(object): # Initialize data properties self.mas_path = self.module.get_bin_path('mas') self._checked_signin = False + self._mac_version = platform.mac_ver()[0] or '0.0' self._installed = None # Populated only if needed self._outdated = None # Populated only if needed self.count_install = 0 @@ -151,14 +160,16 @@ class Mas(object): def check_signin(self): ''' Verifies that the user is signed in to the Mac App Store ''' - # Only check this once per execution if self._checked_signin: return - - rc, out, err = self.run(['account']) - if out.split("\n", 1)[0].rstrip() == 'Not signed in': - self.module.fail_json(msg='You must be signed in to the Mac App Store') + if LooseVersion(self._mac_version) >= LooseVersion(NOT_WORKING_MAC_VERSION_MAS_ACCOUNT): + # Checking if user is signed-in is disabled due to https://github.com/mas-cli/mas/issues/417 + self.module.log('WARNING: You must be signed in via the Mac App Store GUI beforehand else error will occur') + else: + rc, out, err = self.run(['account']) + if out.split("\n", 1)[0].rstrip() == 'Not signed in': + self.module.fail_json(msg='You must be signed in to the Mac App Store') self._checked_signin = True @@ -273,7 +284,7 @@ def main(): if mas.is_installed(app): # Ensure we are root if os.getuid() != 0: - module.fail_json(msg="Uninstalling apps requires root permissions ('become: yes')") + module.fail_json(msg="Uninstalling apps requires root permissions ('become: true')") mas.app_command('uninstall', app) diff --git a/plugins/modules/notification/matrix.py b/plugins/modules/matrix.py similarity index 61% rename from plugins/modules/notification/matrix.py rename to plugins/modules/matrix.py index d94ed2b8de..5b643357f5 100644 --- a/plugins/modules/notification/matrix.py +++ b/plugins/modules/matrix.py @@ -1,57 +1,63 @@ #!/usr/bin/python # coding: utf-8 -# (c) 2018, Jan Christian Grünhage -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Jan Christian Grünhage +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" author: "Jan Christian Grünhage (@jcgruenhage)" module: matrix short_description: Send notifications to matrix description: - - This module sends html formatted notifications to matrix rooms. + - This module sends HTML formatted notifications to matrix rooms. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - msg_plain: - type: str - description: - - Plain text form of the message to send to matrix, usually markdown - required: true - msg_html: - type: str - description: - - HTML form of the message to send to matrix - required: true - room_id: - type: str - description: - - ID of the room to send the notification to - required: true - hs_url: - type: str - description: - - URL of the homeserver, where the CS-API is reachable - required: true - token: - type: str - description: - - Authentication token for the API call. If provided, user_id and password are not required - user_id: - type: str - description: - - The user id of the user - password: - type: str - description: - - The password to log in with + msg_plain: + type: str + description: + - Plain text form of the message to send to matrix, usually markdown. + required: true + msg_html: + type: str + description: + - HTML form of the message to send to matrix. + required: true + room_id: + type: str + description: + - ID of the room to send the notification to. + required: true + hs_url: + type: str + description: + - URL of the homeserver, where the CS-API is reachable. + required: true + token: + type: str + description: + - Authentication token for the API call. If provided, O(user_id) and O(password) are not required. + user_id: + type: str + description: + - The user ID of the user. + password: + type: str + description: + - The password to log in with. requirements: - - matrix-client (Python library) -''' + - matrix-client (Python library) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Send matrix notification with token community.general.matrix: msg_plain: "**hello world**" @@ -68,10 +74,10 @@ EXAMPLES = ''' hs_url: "https://matrix.org" user_id: "ansible_notification_bot" password: "{{ matrix_auth_password }}" -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib @@ -92,9 +98,9 @@ def run_module(): msg_html=dict(type='str', required=True), room_id=dict(type='str', required=True), hs_url=dict(type='str', required=True), - token=dict(type='str', required=False, no_log=True), - user_id=dict(type='str', required=False), - password=dict(type='str', required=False, no_log=True), + token=dict(type='str', no_log=True), + user_id=dict(type='str'), + password=dict(type='str', no_log=True), ) result = dict( diff --git a/plugins/modules/notification/mattermost.py b/plugins/modules/mattermost.py similarity index 64% rename from plugins/modules/notification/mattermost.py rename to plugins/modules/mattermost.py index efee4c33eb..7739d62851 100644 --- a/plugins/modules/notification/mattermost.py +++ b/plugins/modules/mattermost.py @@ -1,75 +1,85 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Benjamin Jolivot +# Copyright (c) Benjamin Jolivot # Inspired by slack module : -# # (c) 2017, Steve Pletcher -# # (c) 2016, René Moser -# # (c) 2015, Stefan Berggren -# # (c) 2014, Ramon de la Fuente ) +# # Copyright (c) 2017, Steve Pletcher +# # Copyright (c) 2016, René Moser +# # Copyright (c) 2015, Stefan Berggren +# # Copyright (c) 2014, Ramon de la Fuente ) # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: mattermost short_description: Send Mattermost notifications description: - - Sends notifications to U(http://your.mattermost.url) via the Incoming WebHook integration. + - Sends notifications to U(http://your.mattermost.url) using the Incoming WebHook integration. author: "Benjamin Jolivot (@bjolivot)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: url: type: str description: - - Mattermost url (i.e. http://mattermost.yourcompany.com). + - Mattermost URL (for example V(http://mattermost.yourcompany.com)). required: true api_key: type: str description: - - Mattermost webhook api key. Log into your mattermost site, go to - Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook. - This will give you full URL. api_key is the last part. - http://mattermost.example.com/hooks/C(API_KEY) + - Mattermost webhook API key. Log into your Mattermost site, go to Menu -> Integration -> Incoming Webhook -> Add Incoming + Webhook. This gives you a full URL. O(api_key) is the last part. U(http://mattermost.example.com/hooks/API_KEY). required: true text: type: str description: - Text to send. Note that the module does not handle escaping characters. - - Required when I(attachments) is not set. + - Required when O(attachments) is not set. attachments: type: list elements: dict description: - Define a list of attachments. - For more information, see U(https://developers.mattermost.com/integrate/admin-guide/admin-message-attachments/). - - Required when I(text) is not set. + - Required when O(text) is not set. version_added: 4.3.0 channel: type: str description: - - Channel to send the message to. If absent, the message goes to the channel selected for the I(api_key). + - Channel to send the message to. If absent, the message goes to the channel selected for the O(api_key). username: type: str description: - - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc. + - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc). default: Ansible icon_url: type: str description: - - Url for the message sender's icon. - default: https://www.ansible.com/favicon.ico + - URL for the message sender's icon. + default: https://docs.ansible.com/favicon.ico + priority: + type: str + description: + - Set a priority for the message. + choices: [important, urgent] + version_added: 10.0.0 validate_certs: description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - default: yes + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + default: true type: bool -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Send notification message via Mattermost community.general.mattermost: url: http://mattermost.example.com @@ -84,6 +94,7 @@ EXAMPLES = """ channel: notifications username: 'Ansible on {{ inventory_hostname }}' icon_url: http://www.example.com/some-image-file.png + priority: important - name: Send attachments message via Mattermost community.general.mattermost: @@ -96,22 +107,22 @@ EXAMPLES = """ fields: - title: System A value: "load average: 0,74, 0,66, 0,63" - short: True + short: true - title: System B value: 'load average: 5,16, 4,64, 2,43' - short: True + short: true """ -RETURN = ''' +RETURN = r""" payload: - description: Mattermost payload - returned: success - type: str + description: Mattermost payload. + returned: success + type: str webhook_url: - description: URL the webhook is sent to - returned: success - type: str -''' + description: URL the webhook is sent to. + returned: success + type: str +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url @@ -124,9 +135,10 @@ def main(): url=dict(type='str', required=True), api_key=dict(type='str', required=True, no_log=True), text=dict(type='str'), - channel=dict(type='str', default=None), + channel=dict(type='str'), username=dict(type='str', default='Ansible'), - icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'), + icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'), + priority=dict(type='str', choices=['important', 'urgent']), validate_certs=dict(default=True, type='bool'), attachments=dict(type='list', elements='dict'), ), @@ -146,6 +158,8 @@ def main(): for param in ['text', 'channel', 'username', 'icon_url', 'attachments']: if module.params[param] is not None: payload[param] = module.params[param] + if module.params['priority'] is not None: + payload['priority'] = {'priority': module.params['priority']} payload = module.jsonify(payload) result['payload'] = payload diff --git a/plugins/modules/packaging/language/maven_artifact.py b/plugins/modules/maven_artifact.py similarity index 75% rename from plugins/modules/packaging/language/maven_artifact.py rename to plugins/modules/maven_artifact.py index eee3e2f67d..9b0d787a3e 100644 --- a/plugins/modules/packaging/language/maven_artifact.py +++ b/plugins/modules/maven_artifact.py @@ -1,164 +1,176 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2014, Chris Schmidt # # Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact # as a reference and starting point. # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: maven_artifact short_description: Downloads an Artifact from a Maven Repository description: - - Downloads an artifact from a maven repository given the maven coordinates provided to the module. - - Can retrieve snapshots or release versions of the artifact and will resolve the latest available - version if one is not available. + - Downloads an artifact from a maven repository given the maven coordinates provided to the module. + - Can retrieve snapshots or release versions of the artifact and resolve the latest available version if one is not available. author: "Chris Schmidt (@chrisisbeef)" requirements: - - lxml - - boto if using a S3 repository (s3://...) + - lxml + - boto if using a S3 repository (V(s3://...)) +attributes: + check_mode: + support: none + diff_mode: + support: none options: - group_id: - type: str - description: - - The Maven groupId coordinate - required: true - artifact_id: - type: str - description: - - The maven artifactId coordinate - required: true - version: - type: str - description: - - The maven version coordinate - - Mutually exclusive with I(version_by_spec). - version_by_spec: - type: str - description: - - The maven dependency version ranges. - - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution) - - The range type "(,1.0],[1.2,)" and "(,1.1),(1.1,)" is not supported. - - Mutually exclusive with I(version). - version_added: '0.2.0' - classifier: - type: str - description: - - The maven classifier coordinate - extension: - type: str - description: - - The maven type/extension coordinate - default: jar - repository_url: - type: str - description: - - The URL of the Maven Repository to download from. - - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2. - - Use file://... if the repository is local, added in version 2.6 - default: https://repo1.maven.org/maven2 - username: - type: str - description: - - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3 - aliases: [ "aws_secret_key" ] - password: - type: str - description: - - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3 - aliases: [ "aws_secret_access_key" ] - headers: - description: - - Add custom HTTP headers to a request in hash/dict format. - type: dict - force_basic_auth: - description: - - httplib2, the library used by the uri module only sends authentication information when a webservice - responds to an initial request with a 401 status. Since some basic auth services do not properly - send a 401, logins will fail. This option forces the sending of the Basic authentication header - upon initial request. - default: 'no' - type: bool - version_added: '0.2.0' - dest: - type: path - description: - - The path where the artifact should be written to - - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file - required: true - state: - type: str - description: - - The desired state of the artifact - default: present - choices: [present,absent] - timeout: - type: int - description: - - Specifies a timeout in seconds for the connection attempt - default: 10 - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists. - type: bool - default: 'yes' - client_cert: - description: - - PEM formatted certificate chain file to be used for SSL client authentication. - - This file can also include the key as well, and if the key is included, I(client_key) is not required. - type: path - version_added: '1.3.0' - client_key: - description: - - PEM formatted file that contains your private key to be used for SSL client authentication. - - If I(client_cert) contains both the certificate and key, this option is not required. - type: path - version_added: '1.3.0' - keep_name: - description: - - If C(yes), the downloaded artifact's name is preserved, i.e the version number remains part of it. - - This option only has effect when C(dest) is a directory and C(version) is set to C(latest) or C(version_by_spec) - is defined. - type: bool - default: 'no' - verify_checksum: - type: str - description: - - If C(never), the MD5/SHA1 checksum will never be downloaded and verified. - - If C(download), the MD5/SHA1 checksum will be downloaded and verified only after artifact download. This is the default. - - If C(change), the MD5/SHA1 checksum will be downloaded and verified if the destination already exist, - to verify if they are identical. This was the behaviour before 2.6. Since it downloads the checksum before (maybe) - downloading the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error - if the artifact has not been cached yet, it may fail unexpectedly. - If you still need it, you should consider using C(always) instead - if you deal with a checksum, it is better to - use it to verify integrity after download. - - C(always) combines C(download) and C(change). - required: false - default: 'download' - choices: ['never', 'download', 'change', 'always'] - checksum_alg: - type: str - description: - - If C(md5), checksums will use the MD5 algorithm. This is the default. - - If C(sha1), checksums will use the SHA1 algorithm. This can be used on systems configured to use - FIPS-compliant algorithms, since MD5 will be blocked on such systems. - default: 'md5' - choices: ['md5', 'sha1'] - version_added: 3.2.0 - directory_mode: - type: str - description: - - Filesystem permission mode applied recursively to I(dest) when it is a directory. + group_id: + type: str + description: + - The Maven groupId coordinate. + required: true + artifact_id: + type: str + description: + - The maven artifactId coordinate. + required: true + version: + type: str + description: + - The maven version coordinate. + - Mutually exclusive with O(version_by_spec). + version_by_spec: + type: str + description: + - The maven dependency version ranges. + - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution). + - The range type V((,1.0],[1.2,\)) and V((,1.1\),(1.1,\)) is not supported. + - Mutually exclusive with O(version). + version_added: '0.2.0' + classifier: + type: str + description: + - The maven classifier coordinate. + default: '' + extension: + type: str + description: + - The maven type/extension coordinate. + default: jar + repository_url: + type: str + description: + - The URL of the Maven Repository to download from. + - Use V(s3://...) if the repository is hosted on Amazon S3. + - Use V(file://...) if the repository is local. + default: https://repo1.maven.org/maven2 + username: + type: str + description: + - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3. + aliases: ["aws_secret_key"] + password: + type: str + description: + - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on + S3. + aliases: ["aws_secret_access_key"] + headers: + description: + - Add custom HTTP headers to a request in hash/dict format. + type: dict + force_basic_auth: + description: + - C(httplib2), the library used by the URI module only sends authentication information when a webservice responds to + an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins fail. This + option forces the sending of the Basic authentication header upon initial request. + default: false + type: bool + version_added: '0.2.0' + dest: + type: path + description: + - The path where the artifact should be written to. + - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file. + required: true + state: + type: str + description: + - The desired state of the artifact. + default: present + choices: [present, absent] + timeout: + type: int + description: + - Specifies a timeout in seconds for the connection attempt. + default: 10 + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be set to V(false) when no other option exists. + type: bool + default: true + client_cert: + description: + - PEM formatted certificate chain file to be used for SSL client authentication. + - This file can also include the key as well, and if the key is included, O(client_key) is not required. + type: path + version_added: '1.3.0' + client_key: + description: + - PEM formatted file that contains your private key to be used for SSL client authentication. + - If O(client_cert) contains both the certificate and key, this option is not required. + type: path + version_added: '1.3.0' + keep_name: + description: + - If V(true), the downloaded artifact's name is preserved, in other words the version number remains part of it. + - This option only has effect when O(dest) is a directory and O(version) is set to V(latest) or O(version_by_spec) is + defined. + type: bool + default: false + verify_checksum: + type: str + description: + - If V(never), the MD5/SHA1 checksum is never downloaded and verified. + - If V(download), the MD5/SHA1 checksum is downloaded and verified only after artifact download. This is the default. + - If V(change), the MD5/SHA1 checksum is downloaded and verified if the destination already exist, to verify if they + are identical. This was the behaviour before 2.6. Since it downloads the checksum before (maybe) downloading the artifact, + and since some repository software, when acting as a proxy/cache, return a 404 error if the artifact has not been + cached yet, it may fail unexpectedly. If you still need it, you should consider using V(always) instead - if you deal + with a checksum, it is better to use it to verify integrity after download. + - V(always) combines V(download) and V(change). + required: false + default: 'download' + choices: ['never', 'download', 'change', 'always'] + checksum_alg: + type: str + description: + - If V(md5), checksums use the MD5 algorithm. This is the default. + - If V(sha1), checksums use the SHA1 algorithm. This can be used on systems configured to use FIPS-compliant algorithms, + since MD5 is blocked on such systems. + default: 'md5' + choices: ['md5', 'sha1'] + version_added: 3.2.0 + unredirected_headers: + type: list + elements: str + version_added: 5.2.0 + description: + - A list of headers that should not be included in the redirection. This headers are sent to the C(fetch_url) function. + - On ansible-core version 2.12 or later, the default of this option is V([Authorization, Cookie]). + - Useful if the redirection URL does not need to have sensitive headers in the request. + - Requires ansible-core version 2.12 or later. + directory_mode: + type: str + description: + - Filesystem permission mode applied recursively to O(dest) when it is a directory. extends_documentation_fragment: - - files -''' + - ansible.builtin.files + - community.general.attributes +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Download the latest version of the JUnit framework artifact from Maven Central community.general.maven_artifact: group_id: junit @@ -204,7 +216,7 @@ EXAMPLES = ''' artifact_id: spring-core group_id: org.springframework dest: /tmp/ - keep_name: yes + keep_name: true - name: Download the latest version of the JUnit framework artifact from Maven local community.general.maven_artifact: @@ -219,7 +231,7 @@ EXAMPLES = ''' artifact_id: junit version_by_spec: "[3.8,4.0)" dest: /tmp/ -''' +""" import hashlib import os @@ -232,6 +244,7 @@ import re from ansible.module_utils.ansible_release import __version__ as ansible_version from re import match +from urllib.parse import urlparse LXML_ETREE_IMP_ERR = None try: @@ -259,7 +272,6 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six.moves.urllib.parse import urlparse from ansible.module_utils.urls import fetch_url from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text @@ -509,7 +521,18 @@ class MavenDownloader: self.module.params['url_password'] = self.module.params.get('password', '') self.module.params['http_agent'] = self.user_agent - response, info = fetch_url(self.module, url_to_use, timeout=req_timeout, headers=self.headers) + kwargs = {} + if self.module.params['unredirected_headers']: + kwargs['unredirected_headers'] = self.module.params['unredirected_headers'] + + response, info = fetch_url( + self.module, + url_to_use, + timeout=req_timeout, + headers=self.headers, + **kwargs + ) + if info['status'] == 200: return response if force: @@ -596,30 +619,35 @@ def main(): argument_spec=dict( group_id=dict(required=True), artifact_id=dict(required=True), - version=dict(default=None), - version_by_spec=dict(default=None), + version=dict(), + version_by_spec=dict(), classifier=dict(default=''), extension=dict(default='jar'), repository_url=dict(default='https://repo1.maven.org/maven2'), - username=dict(default=None, aliases=['aws_secret_key']), - password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']), + username=dict(aliases=['aws_secret_key']), + password=dict(no_log=True, aliases=['aws_secret_access_key']), headers=dict(type='dict'), force_basic_auth=dict(default=False, type='bool'), - state=dict(default="present", choices=["present", "absent"]), # TODO - Implement a "latest" state + state=dict(default="present", choices=["present", "absent"]), timeout=dict(default=10, type='int'), dest=dict(type="path", required=True), - validate_certs=dict(required=False, default=True, type='bool'), - client_cert=dict(type="path", required=False), - client_key=dict(type="path", required=False), - keep_name=dict(required=False, default=False, type='bool'), - verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']), - checksum_alg=dict(required=False, default='md5', choices=['md5', 'sha1']), + validate_certs=dict(default=True, type='bool'), + client_cert=dict(type="path"), + client_key=dict(type="path"), + keep_name=dict(default=False, type='bool'), + verify_checksum=dict(default='download', choices=['never', 'download', 'change', 'always']), + checksum_alg=dict(default='md5', choices=['md5', 'sha1']), + unredirected_headers=dict(type='list', elements='str'), directory_mode=dict(type='str'), ), add_file_common_args=True, mutually_exclusive=([('version', 'version_by_spec')]) ) + if module.params['unredirected_headers'] is None: + # if the user did not supply unredirected params, we use the default + module.params['unredirected_headers'] = ['Authorization', 'Cookie'] + if not HAS_LXML_ETREE: module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR) diff --git a/plugins/modules/cloud/memset/memset_dns_reload.py b/plugins/modules/memset_dns_reload.py similarity index 71% rename from plugins/modules/cloud/memset/memset_dns_reload.py rename to plugins/modules/memset_dns_reload.py index 6eefe133fd..e7c9c70ea4 100644 --- a/plugins/modules/cloud/memset/memset_dns_reload.py +++ b/plugins/modules/memset_dns_reload.py @@ -1,52 +1,53 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: memset_dns_reload author: "Simon Weald (@glitchcrab)" short_description: Request reload of Memset's DNS infrastructure, notes: - - DNS reload requests are a best-effort service provided by Memset; these generally - happen every 15 minutes by default, however you can request an immediate reload if - later tasks rely on the records being created. An API key generated via the - Memset customer control panel is required with the following minimum scope - - I(dns.reload). If you wish to poll the job status to wait until the reload has - completed, then I(job.status) is also required. + - DNS reload requests are a best-effort service provided by Memset; these generally happen every 15 minutes by default, + however you can request an immediate reload if later tasks rely on the records being created. An API key generated using + the Memset customer control panel is required with the following minimum scope - C(dns.reload). If you wish to poll the + job status to wait until the reload has completed, then C(job.status) is also required. description: - - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes. + - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - api_key: - required: true - type: str - description: - - The API key obtained from the Memset control panel. - poll: - default: false - type: bool - description: - - Boolean value, if set will poll the reload job's status and return - when the job has completed (unless the 30 second timeout is reached first). - If the timeout is reached then the task will not be marked as failed, but - stderr will indicate that the polling failed. -''' + api_key: + required: true + type: str + description: + - The API key obtained from the Memset control panel. + poll: + default: false + type: bool + description: + - If V(true), it polls the reload job's status and return when the job has completed (unless the 30 second timeout is + reached first). If the timeout is reached then the task does not return as failed, but stderr indicates that the polling + failed. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Submit DNS reload and poll community.general.memset_dns_reload: api_key: 5eb86c9196ab03919abcf03857163741 - poll: True + poll: true delegate_to: localhost -''' +""" -RETURN = ''' ---- +RETURN = r""" memset_api: description: Raw response from the Memset API. returned: always @@ -77,7 +78,7 @@ memset_api: returned: always type: str sample: "dns" -''' +""" from time import sleep @@ -111,7 +112,7 @@ def poll_reload_status(api_key=None, job_id=None, payload=None): memset_api = response.json() msg = None - return(memset_api, msg, stderr) + return memset_api, msg, stderr def reload_dns(args=None): @@ -131,9 +132,12 @@ def reload_dns(args=None): # manifest themselves at this point so we need to ensure the user is # informed of the reason. retvals['failed'] = has_failed - retvals['memset_api'] = response.json() + if response.status_code is not None: + retvals['memset_api'] = response.json() + else: + retvals['stderr'] = response.stderr retvals['msg'] = msg - return(retvals) + return retvals # set changed to true if the reload request was accepted. has_changed = True @@ -153,7 +157,7 @@ def reload_dns(args=None): if val is not None: retvals[val] = eval(val) - return(retvals) + return retvals def main(): @@ -161,15 +165,13 @@ def main(): module = AnsibleModule( argument_spec=dict( api_key=dict(required=True, type='str', no_log=True), - poll=dict(required=False, default=False, type='bool') + poll=dict(default=False, type='bool') ), supports_check_mode=False ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) retvals = reload_dns(args) diff --git a/plugins/modules/cloud/memset/memset_memstore_info.py b/plugins/modules/memset_memstore_info.py similarity index 64% rename from plugins/modules/cloud/memset/memset_memstore_info.py rename to plugins/modules/memset_memstore_info.py index e880b46009..bda8cf0435 100644 --- a/plugins/modules/cloud/memset/memset_memstore_info.py +++ b/plugins/modules/memset_memstore_info.py @@ -1,107 +1,109 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: memset_memstore_info author: "Simon Weald (@glitchcrab)" -short_description: Retrieve Memstore product usage information. +short_description: Retrieve Memstore product usage information notes: - - An API key generated via the Memset customer control panel is needed with the - following minimum scope - I(memstore.usage). + - An API key generated using the Memset customer control panel is needed with the following minimum scope - C(memstore.usage). description: - - Retrieve Memstore product usage information. - - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change. + - Retrieve Memstore product usage information. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - api_key: - required: true - type: str - description: - - The API key obtained from the Memset control panel. - name: - required: true - type: str - description: - - The Memstore product name (i.e. C(mstestyaa1)). -''' + api_key: + required: true + type: str + description: + - The API key obtained from the Memset control panel. + name: + required: true + type: str + description: + - The Memstore product name (that is, V(mstestyaa1)). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get usage for mstestyaa1 community.general.memset_memstore_info: name: mstestyaa1 api_key: 5eb86c9896ab03919abcf03857163741 delegate_to: localhost -''' +""" -RETURN = ''' ---- +RETURN = r""" memset_api: - description: Info from the Memset API + description: Info from the Memset API. returned: always type: complex contains: cdn_bandwidth: - description: Dictionary of CDN bandwidth facts + description: Dictionary of CDN bandwidth facts. returned: always type: complex contains: bytes_out: - description: Outbound CDN bandwidth for the last 24 hours in bytes + description: Outbound CDN bandwidth for the last 24 hours in bytes. returned: always type: int sample: 1000 requests: - description: Number of requests in the last 24 hours + description: Number of requests in the last 24 hours. returned: always type: int sample: 10 bytes_in: - description: Inbound CDN bandwidth for the last 24 hours in bytes + description: Inbound CDN bandwidth for the last 24 hours in bytes. returned: always type: int sample: 1000 containers: - description: Number of containers + description: Number of containers. returned: always type: int sample: 10 bytes: - description: Space used in bytes + description: Space used in bytes. returned: always type: int sample: 3860997965 objs: - description: Number of objects + description: Number of objects. returned: always type: int sample: 1000 bandwidth: - description: Dictionary of CDN bandwidth facts + description: Dictionary of CDN bandwidth facts. returned: always type: complex contains: bytes_out: - description: Outbound bandwidth for the last 24 hours in bytes + description: Outbound bandwidth for the last 24 hours in bytes. returned: always type: int sample: 1000 requests: - description: Number of requests in the last 24 hours + description: Number of requests in the last 24 hours. returned: always type: int sample: 10 bytes_in: - description: Inbound bandwidth for the last 24 hours in bytes + description: Inbound bandwidth for the last 24 hours in bytes. returned: always type: int sample: 1000 -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call @@ -126,8 +128,11 @@ def get_facts(args=None): # informed of the reason. retvals['failed'] = has_failed retvals['msg'] = msg - retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) - return(retvals) + if response.status_code is not None: + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + else: + retvals['stderr'] = "{0}" . format(response.stderr) + return retvals # we don't want to return the same thing twice msg = None @@ -139,7 +144,7 @@ def get_facts(args=None): if val is not None: retvals[val] = eval(val) - return(retvals) + return retvals def main(): @@ -153,9 +158,7 @@ def main(): ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) retvals = get_facts(args) diff --git a/plugins/modules/cloud/memset/memset_server_info.py b/plugins/modules/memset_server_info.py similarity index 65% rename from plugins/modules/cloud/memset/memset_server_info.py rename to plugins/modules/memset_server_info.py index 853e2c884d..3869edb98a 100644 --- a/plugins/modules/cloud/memset/memset_server_info.py +++ b/plugins/modules/memset_server_info.py @@ -1,48 +1,50 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: memset_server_info author: "Simon Weald (@glitchcrab)" -short_description: Retrieve server information. +short_description: Retrieve server information notes: - - An API key generated via the Memset customer control panel is needed with the - following minimum scope - I(server.info). + - An API key generated using the Memset customer control panel is needed with the following minimum scope - C(server.info). description: - - Retrieve server information. - - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change. + - Retrieve server information. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - api_key: - required: true - type: str - description: - - The API key obtained from the Memset control panel. - name: - required: true - type: str - description: - - The server product name (i.e. C(testyaa1)). -''' + api_key: + required: true + type: str + description: + - The API key obtained from the Memset control panel. + name: + required: true + type: str + description: + - The server product name (that is, C(testyaa1)). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get details for testyaa1 community.general.memset_server_info: name: testyaa1 api_key: 5eb86c9896ab03919abcf03857163741 delegate_to: localhost -''' +""" -RETURN = ''' ---- +RETURN = r""" memset_api: - description: Info from the Memset API + description: Info from the Memset API. returned: always type: complex contains: @@ -52,7 +54,7 @@ memset_api: type: bool sample: true control_panel: - description: Whether the server has a control panel (i.e. cPanel). + description: Whether the server has a control panel (for example cPanel). returned: always type: str sample: 'cpanel' @@ -70,33 +72,34 @@ memset_api: description: Details about the firewall group this server is in. returned: always type: dict - sample: { - "default_outbound_policy": "RETURN", - "name": "testyaa-fw1", - "nickname": "testyaa cPanel rules", - "notes": "", - "public": false, - "rules": { - "51d7db54d39c3544ef7c48baa0b9944f": { - "action": "ACCEPT", - "comment": "", - "dest_ip6s": "any", - "dest_ips": "any", - "dest_ports": "any", - "direction": "Inbound", - "ip_version": "any", - "ordering": 2, - "protocols": "icmp", - "rule_group_name": "testyaa-fw1", - "rule_id": "51d7db54d39c3544ef7c48baa0b9944f", - "source_ip6s": "any", - "source_ips": "any", - "source_ports": "any" + sample: + { + "default_outbound_policy": "RETURN", + "name": "testyaa-fw1", + "nickname": "testyaa cPanel rules", + "notes": "", + "public": false, + "rules": { + "51d7db54d39c3544ef7c48baa0b9944f": { + "action": "ACCEPT", + "comment": "", + "dest_ip6s": "any", + "dest_ips": "any", + "dest_ports": "any", + "direction": "Inbound", + "ip_version": "any", + "ordering": 2, + "protocols": "icmp", + "rule_group_name": "testyaa-fw1", + "rule_id": "51d7db54d39c3544ef7c48baa0b9944f", + "source_ip6s": "any", + "source_ips": "any", + "source_ports": "any" + } } } - } firewall_type: - description: The type of firewall the server has (i.e. self-managed, managed). + description: The type of firewall the server has (for example self-managed, managed). returned: always type: str sample: 'managed' @@ -106,7 +109,7 @@ memset_api: type: str sample: 'testyaa1.miniserver.com' ignore_monitoring_off: - description: When true, Memset won't remind the customer that monitoring is disabled. + description: When true, Memset does not remind the customer that monitoring is disabled. returned: always type: bool sample: true @@ -114,22 +117,23 @@ memset_api: description: List of dictionaries of all IP addresses assigned to the server. returned: always type: list - sample: [ - { - "address": "1.2.3.4", - "bytes_in_today": 1000.0, - "bytes_in_yesterday": 2000.0, - "bytes_out_today": 1000.0, - "bytes_out_yesterday": 2000.0 - } - ] + sample: + [ + { + "address": "1.2.3.4", + "bytes_in_today": 1000.0, + "bytes_in_yesterday": 2000.0, + "bytes_out_today": 1000.0, + "bytes_out_yesterday": 2000.0 + } + ] monitor: description: Whether the server has monitoring enabled. returned: always type: bool sample: true monitoring_level: - description: The server's monitoring level (i.e. basic). + description: The server's monitoring level (for example V(basic)). returned: always type: str sample: 'basic' @@ -142,7 +146,7 @@ memset_api: description: The network zone(s) the server is in. returned: always type: list - sample: [ 'reading' ] + sample: ["reading"] nickname: description: Customer-set nickname for the server. returned: always @@ -189,7 +193,7 @@ memset_api: type: str sample: 'GBP' renewal_price_vat: - description: VAT rate for renewal payments + description: VAT rate for renewal payments. returned: always type: str sample: '20' @@ -199,7 +203,7 @@ memset_api: type: str sample: '2013-04-10' status: - description: Current status of the server (i.e. live, onhold). + description: Current status of the server (for example live, onhold). returned: always type: str sample: 'LIVE' @@ -209,7 +213,7 @@ memset_api: type: str sample: 'managed' type: - description: What this server is (i.e. dedicated) + description: What this server is (for example V(dedicated)). returned: always type: str sample: 'miniserver' @@ -217,16 +221,20 @@ memset_api: description: Dictionary of tagged and untagged VLANs this server is in. returned: always type: dict - sample: { - tagged: [], - untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ] - } + sample: + { + "tagged": [], + "untagged": [ + "testyaa-vlan1", + "testyaa-vlan2" + ] + } vulnscan: description: Vulnerability scanning level. returned: always type: str sample: 'basic' -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call @@ -251,8 +259,11 @@ def get_facts(args=None): # informed of the reason. retvals['failed'] = has_failed retvals['msg'] = msg - retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) - return(retvals) + if response.status_code is not None: + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + else: + retvals['stderr'] = "{0}" . format(response.stderr) + return retvals # we don't want to return the same thing twice msg = None @@ -264,7 +275,7 @@ def get_facts(args=None): if val is not None: retvals[val] = eval(val) - return(retvals) + return retvals def main(): @@ -278,9 +289,7 @@ def main(): ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) retvals = get_facts(args) diff --git a/plugins/modules/cloud/memset/memset_zone.py b/plugins/modules/memset_zone.py similarity index 78% rename from plugins/modules/cloud/memset/memset_zone.py rename to plugins/modules/memset_zone.py index 9ef798bd74..3255e07a61 100644 --- a/plugins/modules/cloud/memset/memset_zone.py +++ b/plugins/modules/memset_zone.py @@ -1,58 +1,61 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: memset_zone author: "Simon Weald (@glitchcrab)" -short_description: Creates and deletes Memset DNS zones. +short_description: Creates and deletes Memset DNS zones notes: - - Zones can be thought of as a logical group of domains, all of which share the - same DNS records (i.e. they point to the same IP). An API key generated via the - Memset customer control panel is needed with the following minimum scope - - I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list). + - Zones can be thought of as a logical group of domains, all of which share the same DNS records (in other words they point + to the same IP). An API key generated using the Memset customer control panel is needed with the following minimum scope + - C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list). description: - - Manage DNS zones in a Memset account. + - Manage DNS zones in a Memset account. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - required: true - description: - - Indicates desired state of resource. - type: str - choices: [ absent, present ] - api_key: - required: true - description: - - The API key obtained from the Memset control panel. - type: str - name: - required: true - description: - - The zone nickname; usually the same as the main domain. Ensure this - value has at most 250 characters. - type: str - aliases: [ nickname ] - ttl: - description: - - The default TTL for all records created in the zone. This must be a - valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create). - type: int - choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ] - force: - required: false - default: false - type: bool - description: - - Forces deletion of a zone and all zone domains/zone records it contains. -''' + state: + required: true + description: + - Indicates desired state of resource. + type: str + choices: [absent, present] + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + type: str + name: + required: true + description: + - The zone nickname; usually the same as the main domain. Ensure this value has at most 250 characters. + type: str + aliases: [nickname] + ttl: + description: + - The default TTL for all records created in the zone. This must be a valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create). + type: int + default: 0 + choices: [0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400] + force: + required: false + default: false + type: bool + description: + - Forces deletion of a zone and all zone domains/zone records it contains. +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create the zone 'test' - name: Create zone community.general.memset_zone: @@ -70,40 +73,40 @@ EXAMPLES = ''' api_key: 5eb86c9196ab03919abcf03857163741 force: true delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" memset_api: - description: Zone info from the Memset API + description: Zone info from the Memset API. returned: when state == present type: complex contains: domains: - description: List of domains in this zone + description: List of domains in this zone. returned: always type: list sample: [] id: - description: Zone id + description: Zone ID. returned: always type: str sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" nickname: - description: Zone name + description: Zone name. returned: always type: str sample: "example.com" records: - description: List of DNS records for domains in this zone + description: List of DNS records for domains in this zone. returned: always type: list sample: [] ttl: - description: Default TTL for domains in this zone + description: Default TTL for domains in this zone. returned: always type: int sample: 300 -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.memset import check_zone @@ -139,7 +142,7 @@ def check(args=None): retvals['changed'] = has_changed retvals['failed'] = has_failed - return(retvals) + return retvals def create_zone(args=None, zone_exists=None, payload=None): @@ -185,7 +188,7 @@ def create_zone(args=None, zone_exists=None, payload=None): _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) memset_api = response.json() - return(has_failed, has_changed, memset_api, msg) + return has_failed, has_changed, memset_api, msg def delete_zone(args=None, zone_exists=None, payload=None): @@ -233,7 +236,7 @@ def delete_zone(args=None, zone_exists=None, payload=None): else: has_failed, has_changed = False, False - return(has_failed, has_changed, memset_api, msg) + return has_failed, has_changed, memset_api, msg def create_or_delete(args=None): @@ -255,7 +258,10 @@ def create_or_delete(args=None): retvals['failed'] = _has_failed retvals['msg'] = _msg - return(retvals) + if response.stderr is not None: + retvals['stderr'] = response.stderr + + return retvals zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json()) @@ -271,7 +277,7 @@ def create_or_delete(args=None): if val is not None: retvals[val] = eval(val) - return(retvals) + return retvals def main(): @@ -281,16 +287,14 @@ def main(): state=dict(required=True, choices=['present', 'absent'], type='str'), api_key=dict(required=True, type='str', no_log=True), name=dict(required=True, aliases=['nickname'], type='str'), - ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), - force=dict(required=False, default=False, type='bool') + ttl=dict(default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), + force=dict(default=False, type='bool') ), supports_check_mode=True ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) args['check_mode'] = module.check_mode # validate some API-specific limitations. diff --git a/plugins/modules/cloud/memset/memset_zone_domain.py b/plugins/modules/memset_zone_domain.py similarity index 79% rename from plugins/modules/cloud/memset/memset_zone_domain.py rename to plugins/modules/memset_zone_domain.py index 4aa0eada92..d8b8618862 100644 --- a/plugins/modules/cloud/memset/memset_zone_domain.py +++ b/plugins/modules/memset_zone_domain.py @@ -1,52 +1,55 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: memset_zone_domain author: "Simon Weald (@glitchcrab)" -short_description: Create and delete domains in Memset DNS zones. +short_description: Create and delete domains in Memset DNS zones notes: - - Zone domains can be thought of as a collection of domains, all of which share the - same DNS records (i.e. they point to the same IP). An API key generated via the - Memset customer control panel is needed with the following minimum scope - - I(dns.zone_domain_create), I(dns.zone_domain_delete), I(dns.zone_domain_list). - - Currently this module can only create one domain at a time. Multiple domains should - be created using C(with_items). + - Zone domains can be thought of as a collection of domains, all of which share the same DNS records (in other words, they + point to the same IP). An API key generated using the Memset customer control panel is needed with the following minimum + scope - C(dns.zone_domain_create), C(dns.zone_domain_delete), C(dns.zone_domain_list). + - Currently this module can only create one domain at a time. Multiple domains should be created using C(loop). description: - - Manage DNS zone domains in a Memset account. + - Manage DNS zone domains in a Memset account. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - default: present - description: - - Indicates desired state of resource. - type: str - choices: [ absent, present ] - api_key: - required: true - description: - - The API key obtained from the Memset control panel. - type: str - domain: - required: true - description: - - The zone domain name. Ensure this value has at most 250 characters. - type: str - aliases: ['name'] - zone: - required: true - description: - - The zone to add the domain to (this must already exist). - type: str -''' + state: + default: present + description: + - Indicates desired state of resource. + type: str + choices: [absent, present] + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + type: str + domain: + required: true + description: + - The zone domain name. Ensure this value has at most 250 characters. + type: str + aliases: ['name'] + zone: + required: true + description: + - The zone to add the domain to (this must already exist). + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create the zone domain 'test.com' - name: Create zone domain community.general.memset_zone_domain: @@ -55,25 +58,25 @@ EXAMPLES = ''' state: present api_key: 5eb86c9196ab03919abcf03857163741 delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" memset_api: - description: Domain info from the Memset API + description: Domain info from the Memset API. returned: when changed or state == present type: complex contains: domain: - description: Domain name + description: Domain name. returned: always type: str sample: "example.com" id: - description: Domain ID + description: Domain ID. returned: always type: str sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id @@ -110,7 +113,7 @@ def check(args=None): retvals['changed'] = has_changed retvals['failed'] = has_failed - return(retvals) + return retvals def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None): @@ -138,7 +141,7 @@ def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None): if not has_failed: has_changed = True - return(has_failed, has_changed, msg) + return has_failed, has_changed, msg def delete_zone_domain(args=None, payload=None): @@ -165,7 +168,7 @@ def delete_zone_domain(args=None, payload=None): # unset msg as we don't want to return unnecessary info to the user. msg = None - return(has_failed, has_changed, memset_api, msg) + return has_failed, has_changed, memset_api, msg def create_or_delete_domain(args=None): @@ -187,8 +190,11 @@ def create_or_delete_domain(args=None): # informed of the reason. retvals['failed'] = has_failed retvals['msg'] = msg - retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) - return(retvals) + if response.status_code is not None: + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + else: + retvals['stderr'] = response.stderr + return retvals zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json()) @@ -203,7 +209,7 @@ def create_or_delete_domain(args=None): retvals['failed'] = has_failed retvals['msg'] = stderr - return(retvals) + return retvals if args['state'] == 'present': has_failed, has_changed, msg = create_zone_domain(args=args, zone_exists=zone_exists, zone_id=zone_id, payload=payload) @@ -217,7 +223,7 @@ def create_or_delete_domain(args=None): if val is not None: retvals[val] = eval(val) - return(retvals) + return retvals def main(): @@ -233,9 +239,7 @@ def main(): ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) args['check_mode'] = module.check_mode # validate some API-specific limitations. @@ -247,7 +251,7 @@ def main(): retvals = create_or_delete_domain(args) # we would need to populate the return values with the API's response - # in several places so it's easier to do it at the end instead. + # in several places so it is easier to do it at the end instead. if not retvals['failed']: if args['state'] == 'present' and not module.check_mode: payload = dict() diff --git a/plugins/modules/cloud/memset/memset_zone_record.py b/plugins/modules/memset_zone_record.py similarity index 74% rename from plugins/modules/cloud/memset/memset_zone_record.py rename to plugins/modules/memset_zone_record.py index 981d2ac47c..71d7b841c9 100644 --- a/plugins/modules/cloud/memset/memset_zone_record.py +++ b/plugins/modules/memset_zone_record.py @@ -1,79 +1,84 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2018, Simon Weald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: memset_zone_record author: "Simon Weald (@glitchcrab)" -short_description: Create and delete records in Memset DNS zones. +short_description: Create and delete records in Memset DNS zones notes: - - Zones can be thought of as a logical group of domains, all of which share the - same DNS records (i.e. they point to the same IP). An API key generated via the - Memset customer control panel is needed with the following minimum scope - - I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list). - - Currently this module can only create one DNS record at a time. Multiple records - should be created using C(with_items). + - Zones can be thought of as a logical group of domains, all of which share the same DNS records (in other words they point + to the same IP). An API key generated using the Memset customer control panel is needed with the following minimum scope + - C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list). + - Currently this module can only create one DNS record at a time. Multiple records should be created using C(loop). description: - - Manage DNS records in a Memset account. + - Manage DNS records in a Memset account. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - default: present - description: - - Indicates desired state of resource. - type: str - choices: [ absent, present ] - api_key: - required: true - description: - - The API key obtained from the Memset control panel. - type: str - address: - required: true - description: - - The address for this record (can be IP or text string depending on record type). - type: str - aliases: [ ip, data ] - priority: - description: - - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive). - type: int - record: - required: false - description: - - The subdomain to create. - type: str - type: - required: true - description: - - The type of DNS record to create. - choices: [ A, AAAA, CNAME, MX, NS, SRV, TXT ] - type: str - relative: - type: bool - default: false - description: - - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS) - and C(SRV)record types. - ttl: - description: - - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a - valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create). - choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ] - type: int - zone: - required: true - description: - - The name of the zone to which to add the record to. - type: str -''' + state: + default: present + description: + - Indicates desired state of resource. + type: str + choices: [absent, present] + api_key: + required: true + description: + - The API key obtained from the Memset control panel. + type: str + address: + required: true + description: + - The address for this record (can be IP or text string depending on record type). + type: str + aliases: [ip, data] + priority: + description: + - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive). + type: int + default: 0 + record: + required: false + description: + - The subdomain to create. + type: str + default: '' + type: + required: true + description: + - The type of DNS record to create. + choices: [A, AAAA, CNAME, MX, NS, SRV, TXT] + type: str + relative: + type: bool + default: false + description: + - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS) and C(SRV)record types. + ttl: + description: + - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a valid int from + U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create). + default: 0 + choices: [0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400] + type: int + zone: + required: true + description: + - The name of the zone to which to add the record to. + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" # Create DNS record for www.domain.com - name: Create DNS record community.general.memset_zone_record: @@ -107,11 +112,11 @@ EXAMPLES = ''' address: "{{ item.address }}" delegate_to: localhost with_items: - - { 'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4' } - - { 'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1' } -''' + - {'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4'} + - {'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1'} +""" -RETURN = ''' +RETURN = r""" memset_api: description: Record info from the Memset API. returned: when state == present @@ -141,7 +146,7 @@ memset_api: description: Adds the current domain onto the address field for C(CNAME), C(MX), C(NS) and C(SRV) types. returned: always type: bool - sample: False + sample: false ttl: description: Record TTL. returned: always @@ -157,12 +162,11 @@ memset_api: returned: always type: str sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call -from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id def api_validation(args=None): @@ -171,6 +175,7 @@ def api_validation(args=None): https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create) ''' failed_validation = False + error = None # priority can only be integer 0 > 999 if not 0 <= args['priority'] <= 999: @@ -221,7 +226,7 @@ def create_zone_record(args=None, zone_id=None, records=None, payload=None): # nothing to do; record is already correct so we populate # the return var with the existing record's details. memset_api = zone_record - return(has_changed, has_failed, memset_api, msg) + return has_changed, has_failed, memset_api, msg else: # merge dicts ensuring we change any updated values payload = zone_record.copy() @@ -231,7 +236,7 @@ def create_zone_record(args=None, zone_id=None, records=None, payload=None): has_changed = True # return the new record to the user in the returned var. memset_api = new_record - return(has_changed, has_failed, memset_api, msg) + return has_changed, has_failed, memset_api, msg has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) if not has_failed: has_changed = True @@ -246,7 +251,7 @@ def create_zone_record(args=None, zone_id=None, records=None, payload=None): has_changed = True # populate the return var with the new record's details. memset_api = new_record - return(has_changed, has_failed, memset_api, msg) + return has_changed, has_failed, memset_api, msg has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) if not has_failed: has_changed = True @@ -254,7 +259,7 @@ def create_zone_record(args=None, zone_id=None, records=None, payload=None): # empty msg as we don't want to return a boatload of json to the user. msg = None - return(has_changed, has_failed, memset_api, msg) + return has_changed, has_failed, memset_api, msg def delete_zone_record(args=None, records=None, payload=None): @@ -270,7 +275,7 @@ def delete_zone_record(args=None, records=None, payload=None): for zone_record in records: if args['check_mode']: has_changed = True - return(has_changed, has_failed, memset_api, msg) + return has_changed, has_failed, memset_api, msg payload['id'] = zone_record['id'] api_method = 'dns.zone_record_delete' has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) @@ -280,7 +285,7 @@ def delete_zone_record(args=None, records=None, payload=None): # empty msg as we don't want to return a boatload of json to the user. msg = None - return(has_changed, has_failed, memset_api, msg) + return has_changed, has_failed, memset_api, msg def create_or_delete(args=None): @@ -303,8 +308,11 @@ def create_or_delete(args=None): # informed of the reason. retvals['failed'] = _has_failed retvals['msg'] = msg - retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) - return(retvals) + if response.status_code is not None: + retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) + else: + retvals['stderr'] = response.stderr + return retvals zone_exists, _msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json()) @@ -317,7 +325,7 @@ def create_or_delete(args=None): retvals['failed'] = has_failed retvals['msg'] = stderr retvals['stderr'] = stderr - return(retvals) + return retvals # get a list of all records ( as we can't limit records by zone) api_method = 'dns.zone_record_list' @@ -339,30 +347,28 @@ def create_or_delete(args=None): if val is not None: retvals[val] = eval(val) - return(retvals) + return retvals def main(): global module module = AnsibleModule( argument_spec=dict( - state=dict(required=False, default='present', choices=['present', 'absent'], type='str'), + state=dict(default='present', choices=['present', 'absent'], type='str'), api_key=dict(required=True, type='str', no_log=True), zone=dict(required=True, type='str'), type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'), address=dict(required=True, aliases=['ip', 'data'], type='str'), - record=dict(required=False, default='', type='str'), - ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), - priority=dict(required=False, default=0, type='int'), - relative=dict(required=False, default=False, type='bool') + record=dict(default='', type='str'), + ttl=dict(default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'), + priority=dict(default=0, type='int'), + relative=dict(default=False, type='bool') ), supports_check_mode=True ) # populate the dict with the user-provided vars. - args = dict() - for key, arg in module.params.items(): - args[key] = arg + args = dict(module.params) args['check_mode'] = module.check_mode # perform some Memset API-specific validation diff --git a/plugins/modules/mksysb.py b/plugins/modules/mksysb.py new file mode 100644 index 0000000000..c9a7eb7b60 --- /dev/null +++ b/plugins/modules/mksysb.py @@ -0,0 +1,168 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Alexei Znamensky (@russoz) +# Copyright (c) 2017, Kairo Araujo +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +author: Kairo Araujo (@kairoaraujo) +module: mksysb +short_description: Generates AIX mksysb rootvg backups +description: + - This module manages a basic AIX mksysb (image) of rootvg. +seealso: + - name: C(mksysb) command manual page + description: Manual page for the command. + link: https://www.ibm.com/docs/en/aix/7.3?topic=m-mksysb-command + +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + backup_crypt_files: + description: + - Backup encrypted files. + type: bool + default: true + backup_dmapi_fs: + description: + - Back up DMAPI filesystem files. + type: bool + default: true + create_map_files: + description: + - Creates a new MAP files. + type: bool + default: false + exclude_files: + description: + - Excludes files using C(/etc/rootvg.exclude). + type: bool + default: false + exclude_wpar_files: + description: + - Excludes WPAR files. + type: bool + default: false + extended_attrs: + description: + - Backup extended attributes. + type: bool + default: true + name: + type: str + description: + - Backup name. + required: true + new_image_data: + description: + - Creates a new file data. + type: bool + default: true + software_packing: + description: + - Exclude files from packing option listed in C(/etc/exclude_packing.rootvg). + type: bool + default: false + storage_path: + type: str + description: + - Storage path where the mksysb backup is stored. + required: true + use_snapshot: + description: + - Creates backup using snapshots. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Running a backup image mksysb + community.general.mksysb: + name: myserver + storage_path: /repository/images + exclude_files: true + exclude_wpar_files: true +""" + +RETURN = r""" +msg: + description: Return message regarding the action. + returned: always + type: str +""" + +import os + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + +class MkSysB(ModuleHelper): + module = dict( + argument_spec=dict( + backup_crypt_files=dict(type='bool', default=True), + backup_dmapi_fs=dict(type='bool', default=True), + create_map_files=dict(type='bool', default=False), + exclude_files=dict(type='bool', default=False), + exclude_wpar_files=dict(type='bool', default=False), + extended_attrs=dict(type='bool', default=True), + name=dict(type='str', required=True), + new_image_data=dict(type='bool', default=True), + software_packing=dict(type='bool', default=False), + storage_path=dict(type='str', required=True), + use_snapshot=dict(type='bool', default=False) + ), + supports_check_mode=True, + ) + command_args_formats = dict( + create_map_files=cmd_runner_fmt.as_bool("-m"), + use_snapshot=cmd_runner_fmt.as_bool("-T"), + exclude_files=cmd_runner_fmt.as_bool("-e"), + exclude_wpar_files=cmd_runner_fmt.as_bool("-G"), + new_image_data=cmd_runner_fmt.as_bool("-i"), + software_packing=cmd_runner_fmt.as_bool_not("-p"), + extended_attrs=cmd_runner_fmt.as_bool("-a"), + backup_crypt_files=cmd_runner_fmt.as_bool_not("-Z"), + backup_dmapi_fs=cmd_runner_fmt.as_bool("-A"), + combined_path=cmd_runner_fmt.as_func(cmd_runner_fmt.unpack_args(lambda p, n: ["%s/%s" % (p, n)])), + ) + + def __init_module__(self): + if not os.path.isdir(self.vars.storage_path): + self.do_raise("Storage path %s is not valid." % self.vars.storage_path) + + def __run__(self): + def process(rc, out, err): + if rc != 0: + self.do_raise("mksysb failed: {0}".format(out)) + + runner = CmdRunner( + self.module, + ['mksysb', '-X'], + self.command_args_formats, + ) + with runner(['create_map_files', 'use_snapshot', 'exclude_files', 'exclude_wpar_files', 'software_packing', + 'extended_attrs', 'backup_crypt_files', 'backup_dmapi_fs', 'new_image_data', 'combined_path'], + output_process=process, check_mode_skip=True) as ctx: + ctx.run(combined_path=[self.vars.storage_path, self.vars.name]) + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + + self.changed = True + + +def main(): + MkSysB.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/modprobe.py b/plugins/modules/modprobe.py new file mode 100644 index 0000000000..fddf0643bd --- /dev/null +++ b/plugins/modules/modprobe.py @@ -0,0 +1,326 @@ +#!/usr/bin/python + +# Copyright (c) 2013, David Stygstra +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: modprobe +short_description: Load or unload kernel modules +author: + - David Stygstra (@stygstra) + - Julien Dauphant (@jdauphant) + - Matt Jeffery (@mattjeffery) +description: + - Load or unload kernel modules. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + required: true + description: + - Name of kernel module to manage. + state: + type: str + description: + - Whether the module should be present or absent. + choices: [absent, present] + default: present + params: + type: str + description: + - Modules parameters. + default: '' + persistent: + type: str + choices: [disabled, absent, present] + default: disabled + version_added: 7.0.0 + description: + - Persistency between reboots for configured module. + - This option creates files in C(/etc/modules-load.d/) and C(/etc/modprobe.d/) that make your module configuration persistent + during reboots. + - If V(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module is loaded + on next reboot. + - If V(absent), comments out module name from C(/etc/modules-load.d/) and comments out params from C(/etc/modprobe.d/) + so the module is not loaded on next reboot. + - If V(disabled), does not touch anything and leaves C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is. + - Note that it is usually a better idea to rely on the automatic module loading by PCI IDs, USB IDs, DMI IDs or similar + triggers encoded in the kernel modules themselves instead of configuration like this. + - In fact, most modern kernel modules are prepared for automatic loading already. + - B(Note:) This option works only with distributions that use C(systemd) when set to values other than V(disabled). +""" + +EXAMPLES = r""" +- name: Add the 802.1q module + community.general.modprobe: + name: 8021q + state: present + +- name: Add the dummy module + community.general.modprobe: + name: dummy + state: present + params: 'numdummies=2' + +- name: Add the dummy module and make sure it is loaded after reboots + community.general.modprobe: + name: dummy + state: present + params: 'numdummies=2' + persistent: present +""" + +import os.path +import platform +import shlex +import traceback +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +RELEASE_VER = platform.release() +MODULES_LOAD_LOCATION = '/etc/modules-load.d' +PARAMETERS_FILES_LOCATION = '/etc/modprobe.d' + + +class Modprobe(object): + + def __init__(self, module): + self.module = module + self.modprobe_bin = module.get_bin_path('modprobe', True) + + self.check_mode = module.check_mode + self.desired_state = module.params['state'] + self.name = module.params['name'] + self.params = module.params['params'] + self.persistent = module.params['persistent'] + + self.changed = False + + self.re_find_module = re.compile(r'^ *{0} *(?:[#;].*)?\n?\Z'.format(self.name)) + self.re_find_params = re.compile(r'^options {0} \w+=\S+ *(?:[#;].*)?\n?\Z'.format(self.name)) + self.re_get_params_and_values = re.compile(r'^options {0} (\w+=\S+) *(?:[#;].*)?\n?\Z'.format(self.name)) + + def load_module(self): + command = [self.modprobe_bin] + if self.check_mode: + command.append('-n') + command.extend([self.name] + shlex.split(self.params)) + + rc, out, err = self.module.run_command(command) + + if rc != 0: + return self.module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **self.result) + + if self.check_mode or self.module_loaded(): + self.changed = True + else: + rc, stdout, stderr = self.module.run_command( + [self.modprobe_bin, '-n', '--first-time', self.name] + shlex.split(self.params) + ) + if rc != 0: + self.module.warn(stderr) + + @property + def module_is_loaded_persistently(self): + for module_file in self.modules_files: + with open(module_file) as file: + for line in file: + if self.re_find_module.match(line): + return True + + return False + + @property + def params_is_set(self): + desired_params = set(self.params.split()) + + return desired_params == self.permanent_params + + @property + def permanent_params(self): + params = set() + + for modprobe_file in self.modprobe_files: + with open(modprobe_file) as file: + for line in file: + match = self.re_get_params_and_values.match(line) + if match: + params.add(match.group(1)) + + return params + + def create_module_file(self): + file_path = os.path.join(MODULES_LOAD_LOCATION, + self.name + '.conf') + if not self.check_mode: + with open(file_path, 'w') as file: + file.write(self.name + '\n') + + @property + def module_options_file_content(self): + file_content = ['options {0} {1}'.format(self.name, param) + for param in self.params.split()] + return '\n'.join(file_content) + '\n' + + def create_module_options_file(self): + new_file_path = os.path.join(PARAMETERS_FILES_LOCATION, + self.name + '.conf') + if not self.check_mode: + with open(new_file_path, 'w') as file: + file.write(self.module_options_file_content) + + def disable_old_params(self): + + for modprobe_file in self.modprobe_files: + with open(modprobe_file) as file: + file_content = file.readlines() + + content_changed = False + for index, line in enumerate(file_content): + if self.re_find_params.match(line): + file_content[index] = '#' + line + content_changed = True + + if not self.check_mode and content_changed: + with open(modprobe_file, 'w') as file: + file.write('\n'.join(file_content)) + + def disable_module_permanent(self): + + for module_file in self.modules_files: + with open(module_file) as file: + file_content = file.readlines() + + content_changed = False + for index, line in enumerate(file_content): + if self.re_find_module.match(line): + file_content[index] = '#' + line + content_changed = True + + if not self.check_mode and content_changed: + with open(module_file, 'w') as file: + file.write('\n'.join(file_content)) + + def load_module_permanent(self): + + if not self.module_is_loaded_persistently: + self.create_module_file() + self.changed = True + + if not self.params_is_set: + self.disable_old_params() + self.create_module_options_file() + self.changed = True + + def unload_module_permanent(self): + if self.module_is_loaded_persistently: + self.disable_module_permanent() + self.changed = True + + if self.permanent_params: + self.disable_old_params() + self.changed = True + + @property + def modules_files(self): + if not os.path.isdir(MODULES_LOAD_LOCATION): + return [] + modules_paths = [os.path.join(MODULES_LOAD_LOCATION, path) + for path in os.listdir(MODULES_LOAD_LOCATION)] + return [path for path in modules_paths if os.path.isfile(path)] + + @property + def modprobe_files(self): + if not os.path.isdir(PARAMETERS_FILES_LOCATION): + return [] + modules_paths = [os.path.join(PARAMETERS_FILES_LOCATION, path) + for path in os.listdir(PARAMETERS_FILES_LOCATION)] + return [path for path in modules_paths if os.path.isfile(path)] + + def module_loaded(self): + is_loaded = False + try: + with open('/proc/modules') as modules: + module_name = self.name.replace('-', '_') + ' ' + for line in modules: + if line.startswith(module_name): + is_loaded = True + break + + if not is_loaded: + module_file = '/' + self.name + '.ko' + builtin_path = os.path.join('/lib/modules/', RELEASE_VER, 'modules.builtin') + with open(builtin_path) as builtins: + for line in builtins: + if line.rstrip().endswith(module_file): + is_loaded = True + break + except (IOError, OSError) as e: + self.module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **self.result) + + return is_loaded + + def unload_module(self): + command = [self.modprobe_bin, '-r', self.name] + if self.check_mode: + command.append('-n') + + rc, out, err = self.module.run_command(command) + if rc != 0: + return self.module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **self.result) + + self.changed = True + + @property + def result(self): + return { + 'changed': self.changed, + 'name': self.name, + 'params': self.params, + 'state': self.desired_state, + } + + +def build_module(): + return AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + params=dict(type='str', default=''), + persistent=dict(type='str', default='disabled', choices=['disabled', 'present', 'absent']), + ), + supports_check_mode=True, + ) + + +def main(): + module = build_module() + + modprobe = Modprobe(module) + + if modprobe.desired_state == 'present' and not modprobe.module_loaded(): + modprobe.load_module() + elif modprobe.desired_state == 'absent' and modprobe.module_loaded(): + modprobe.unload_module() + + if modprobe.persistent == 'present' and not (modprobe.module_is_loaded_persistently and modprobe.params_is_set): + modprobe.load_module_permanent() + elif modprobe.persistent == 'absent' and (modprobe.module_is_loaded_persistently or modprobe.permanent_params): + modprobe.unload_module_permanent() + + module.exit_json(**modprobe.result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/monit.py b/plugins/modules/monit.py similarity index 89% rename from plugins/modules/monitoring/monit.py rename to plugins/modules/monit.py index dfbe9cee35..63c83741b5 100644 --- a/plugins/modules/monitoring/monit.py +++ b/plugins/modules/monit.py @@ -1,49 +1,53 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, Darryl Stoflet -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, Darryl Stoflet +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: monit -short_description: Manage the state of a program monitored via Monit +short_description: Manage the state of a program monitored using Monit description: - - Manage the state of a program monitored via I(Monit). + - Manage the state of a program monitored using Monit. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - - The name of the I(monit) program/process to manage. + - The name of the C(monit) program/process to manage. required: true type: str state: description: - The state of service. required: true - choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ] + choices: ["present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded"] type: str timeout: description: - - If there are pending actions for the service monitored by monit, then Ansible will check - for up to this many seconds to verify the requested action has been performed. - Ansible will sleep for five seconds between each check. + - If there are pending actions for the service monitored by monit, then it checks for up to this many seconds to verify + the requested action has been performed. The module sleeps for five seconds between each check. default: 300 type: int author: - - Darryl Stoflet (@dstoflet) - - Simon Kelly (@snopoke) -''' + - Darryl Stoflet (@dstoflet) + - Simon Kelly (@snopoke) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Manage the state of program httpd to be in started state community.general.monit: name: httpd state: started -''' +""" import time import re @@ -51,7 +55,6 @@ import re from collections import namedtuple from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import python_2_unicode_compatible STATE_COMMAND_MAP = { @@ -66,7 +69,6 @@ MONIT_SERVICES = ['Process', 'File', 'Fifo', 'Filesystem', 'Directory', 'Remote 'Network'] -@python_2_unicode_compatible class StatusValue(namedtuple("Status", "value, is_pending")): MISSING = 'missing' OK = 'ok' @@ -168,7 +170,11 @@ class Monit(object): status_val = status_val.split(' | ')[0] if ' - ' not in status_val: status_val = status_val.replace(' ', '_') - return getattr(Status, status_val) + try: + return getattr(Status, status_val) + except AttributeError: + self.module.warn("Unknown monit status '%s', treating as execution failed" % status_val) + return Status.EXECUTION_FAILED else: status_val, substatus = status_val.split(' - ') action, state = substatus.split() @@ -210,7 +216,7 @@ class Monit(object): return running_status def wait_for_monit_to_stop_pending(self, current_status=None): - """Fails this run if there is no status or it's pending/initializing for timeout""" + """Fails this run if there is no status or it is pending/initializing for timeout""" timeout_time = time.time() + self.timeout if not current_status: diff --git a/plugins/modules/monitoring/newrelic_deployment.py b/plugins/modules/monitoring/newrelic_deployment.py deleted file mode 100644 index af953e0a75..0000000000 --- a/plugins/modules/monitoring/newrelic_deployment.py +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Matt Coddington -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: newrelic_deployment -author: "Matt Coddington (@mcodd)" -short_description: Notify newrelic about app deployments -description: - - Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api) -options: - token: - type: str - description: - - API token, to place in the x-api-key header. - required: true - app_name: - type: str - description: - - (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application - required: false - application_id: - type: str - description: - - (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM - required: false - changelog: - type: str - description: - - A list of changes for this deployment - required: false - description: - type: str - description: - - Text annotation for the deployment - notes for you - required: false - revision: - type: str - description: - - A revision number (e.g., git commit SHA) - required: false - user: - type: str - description: - - The name of the user/process that triggered this deployment - required: false - appname: - type: str - description: - - Name of the application - required: false - environment: - type: str - description: - - The environment for this deployment - required: false - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - type: bool - -requirements: [] -''' - -EXAMPLES = ''' -- name: Notify newrelic about an app deployment - community.general.newrelic_deployment: - token: AAAAAA - app_name: myapp - user: ansible deployment - revision: '1.0' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlencode - -# =========================================== -# Module execution. -# - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - app_name=dict(required=False), - application_id=dict(required=False), - changelog=dict(required=False), - description=dict(required=False), - revision=dict(required=False), - user=dict(required=False), - appname=dict(required=False), - environment=dict(required=False), - validate_certs=dict(default=True, type='bool'), - ), - required_one_of=[['app_name', 'application_id']], - supports_check_mode=True - ) - - # build list of params - params = {} - if module.params["app_name"] and module.params["application_id"]: - module.fail_json(msg="only one of 'app_name' or 'application_id' can be set") - - if module.params["app_name"]: - params["app_name"] = module.params["app_name"] - elif module.params["application_id"]: - params["application_id"] = module.params["application_id"] - else: - module.fail_json(msg="you must set one of 'app_name' or 'application_id'") - - for item in ["changelog", "description", "revision", "user", "appname", "environment"]: - if module.params[item]: - params[item] = module.params[item] - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=True) - - # Send the data to NewRelic - url = "https://rpm.newrelic.com/deployments.xml" - data = urlencode(params) - headers = { - 'x-api-key': module.params["token"], - } - response, info = fetch_url(module, url, data=data, headers=headers) - if info['status'] in (200, 201): - module.exit_json(changed=True) - else: - module.fail_json(msg="unable to update newrelic: %s" % info['msg']) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/pagerduty_alert.py b/plugins/modules/monitoring/pagerduty_alert.py deleted file mode 100644 index 58a1f260fb..0000000000 --- a/plugins/modules/monitoring/pagerduty_alert.py +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: pagerduty_alert -short_description: Trigger, acknowledge or resolve PagerDuty incidents -description: - - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events -author: - - "Amanpreet Singh (@ApsOps)" -requirements: - - PagerDuty API access -options: - name: - type: str - description: - - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. - service_id: - type: str - description: - - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved. - required: true - service_key: - type: str - description: - - The GUID of one of your "Generic API" services. Obsolete. Please use I(integration_key). - integration_key: - type: str - description: - - The GUID of one of your "Generic API" services. - - This is the "integration key" listed on a "Integrations" tab of PagerDuty service. - state: - type: str - description: - - Type of event to be sent. - required: true - choices: - - 'triggered' - - 'acknowledged' - - 'resolved' - api_key: - type: str - description: - - The pagerduty API key (readonly access), generated on the pagerduty site. - required: true - desc: - type: str - description: - - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) - will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. - The maximum length is 1024 characters. - - For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event. - required: false - default: Created via Ansible - incident_key: - type: str - description: - - Identifies the incident to which this I(state) should be applied. - - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an - open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup" - problem reports. - - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a - trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded. - required: false - client: - type: str - description: - - The name of the monitoring client that is triggering this event. - required: false - client_url: - type: str - description: - - The URL of the monitoring client that is triggering this event. - required: false -''' - -EXAMPLES = ''' -- name: Trigger an incident with just the basic options - community.general.pagerduty_alert: - name: companyabc - integration_key: xxx - api_key: yourapikey - service_id: PDservice - state: triggered - desc: problem that led to this trigger - -- name: Trigger an incident with more options - community.general.pagerduty_alert: - integration_key: xxx - api_key: yourapikey - service_id: PDservice - state: triggered - desc: problem that led to this trigger - incident_key: somekey - client: Sample Monitoring Service - client_url: http://service.example.com - -- name: Acknowledge an incident based on incident_key - community.general.pagerduty_alert: - integration_key: xxx - api_key: yourapikey - service_id: PDservice - state: acknowledged - incident_key: somekey - desc: "some text for incident's log" - -- name: Resolve an incident based on incident_key - community.general.pagerduty_alert: - integration_key: xxx - api_key: yourapikey - service_id: PDservice - state: resolved - incident_key: somekey - desc: "some text for incident's log" -''' -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlparse, urlencode, urlunparse - - -def check(module, name, state, service_id, integration_key, api_key, incident_key=None, http_call=fetch_url): - url = 'https://api.pagerduty.com/incidents' - headers = { - "Content-type": "application/json", - "Authorization": "Token token=%s" % api_key, - 'Accept': 'application/vnd.pagerduty+json;version=2' - } - - params = { - 'service_ids[]': service_id, - 'sort_by': 'incident_number:desc', - 'time_zone': 'UTC' - } - if incident_key: - params['incident_key'] = incident_key - - url_parts = list(urlparse(url)) - url_parts[4] = urlencode(params, True) - - url = urlunparse(url_parts) - - response, info = http_call(module, url, method='get', headers=headers) - - if info['status'] != 200: - module.fail_json(msg="failed to check current incident status." - "Reason: %s" % info['msg']) - - incidents = json.loads(response.read())["incidents"] - msg = "No corresponding incident" - - if len(incidents) == 0: - if state in ('acknowledged', 'resolved'): - return msg, False - return msg, True - elif state != incidents[0]["status"]: - return incidents[0], True - - return incidents[0], False - - -def send_event(module, service_key, event_type, desc, - incident_key=None, client=None, client_url=None): - url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" - headers = { - "Content-type": "application/json" - } - - data = { - "service_key": service_key, - "event_type": event_type, - "incident_key": incident_key, - "description": desc, - "client": client, - "client_url": client_url - } - - response, info = fetch_url(module, url, method='post', - headers=headers, data=json.dumps(data)) - if info['status'] != 200: - module.fail_json(msg="failed to %s. Reason: %s" % - (event_type, info['msg'])) - json_out = json.loads(response.read()) - return json_out - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=False), - service_id=dict(required=True), - service_key=dict(required=False, no_log=True), - integration_key=dict(required=False, no_log=True), - api_key=dict(required=True, no_log=True), - state=dict(required=True, - choices=['triggered', 'acknowledged', 'resolved']), - client=dict(required=False, default=None), - client_url=dict(required=False, default=None), - desc=dict(required=False, default='Created via Ansible'), - incident_key=dict(required=False, default=None, no_log=False) - ), - supports_check_mode=True - ) - - name = module.params['name'] - service_id = module.params['service_id'] - integration_key = module.params['integration_key'] - service_key = module.params['service_key'] - api_key = module.params['api_key'] - state = module.params['state'] - client = module.params['client'] - client_url = module.params['client_url'] - desc = module.params['desc'] - incident_key = module.params['incident_key'] - - if integration_key is None: - if service_key is not None: - integration_key = service_key - module.warn('"service_key" is obsolete parameter and will be removed.' - ' Please, use "integration_key" instead') - else: - module.fail_json(msg="'integration_key' is required parameter") - - state_event_dict = { - 'triggered': 'trigger', - 'acknowledged': 'acknowledge', - 'resolved': 'resolve' - } - - event_type = state_event_dict[state] - - if event_type != 'trigger' and incident_key is None: - module.fail_json(msg="incident_key is required for " - "acknowledge or resolve events") - - out, changed = check(module, name, state, service_id, - integration_key, api_key, incident_key) - - if not module.check_mode and changed is True: - out = send_event(module, integration_key, event_type, desc, - incident_key, client, client_url) - - module.exit_json(result=out, changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/stackdriver.py b/plugins/modules/monitoring/stackdriver.py deleted file mode 100644 index fa6bacb951..0000000000 --- a/plugins/modules/monitoring/stackdriver.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: stackdriver -short_description: Send code deploy and annotation events to stackdriver -description: - - Send code deploy and annotation events to Stackdriver -author: "Ben Whaley (@bwhaley)" -options: - key: - type: str - description: - - API key. - required: true - event: - type: str - description: - - The type of event to send, either annotation or deploy - choices: ['annotation', 'deploy'] - required: true - revision_id: - type: str - description: - - The revision of the code that was deployed. Required for deploy events - deployed_by: - type: str - description: - - The person or robot responsible for deploying the code - default: "Ansible" - deployed_to: - type: str - description: - - "The environment code was deployed to. (ie: development, staging, production)" - repository: - type: str - description: - - The repository (or project) deployed - msg: - type: str - description: - - The contents of the annotation message, in plain text. Limited to 256 characters. Required for annotation. - annotated_by: - type: str - description: - - The person or robot who the annotation should be attributed to. - default: "Ansible" - level: - type: str - description: - - one of INFO/WARN/ERROR, defaults to INFO if not supplied. May affect display. - choices: ['INFO', 'WARN', 'ERROR'] - default: 'INFO' - instance_id: - type: str - description: - - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown - event_epoch: - type: str - description: - - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this." -''' - -EXAMPLES = ''' -- name: Send a code deploy event to stackdriver - community.general.stackdriver: - key: AAAAAA - event: deploy - deployed_to: production - deployed_by: leeroyjenkins - repository: MyWebApp - revision_id: abcd123 - -- name: Send an annotation event to stackdriver - community.general.stackdriver: - key: AAAAAA - event: annotation - msg: Greetings from Ansible - annotated_by: leeroyjenkins - level: WARN - instance_id: i-abcd1234 -''' - -# =========================================== -# Stackdriver module specific support methods. -# - -import json -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url - - -def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None): - """Send a deploy event to Stackdriver""" - deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent" - - params = {} - params['revision_id'] = revision_id - params['deployed_by'] = deployed_by - if deployed_to: - params['deployed_to'] = deployed_to - if repository: - params['repository'] = repository - - return do_send_request(module, deploy_api, params, key) - - -def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None): - """Send an annotation event to Stackdriver""" - annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent" - - params = {} - params['message'] = msg - if annotated_by: - params['annotated_by'] = annotated_by - if level: - params['level'] = level - if instance_id: - params['instance_id'] = instance_id - if event_epoch: - params['event_epoch'] = event_epoch - - return do_send_request(module, annotation_api, params, key) - - -def do_send_request(module, url, params, key): - data = json.dumps(params) - headers = { - 'Content-Type': 'application/json', - 'x-stackdriver-apikey': key - } - response, info = fetch_url(module, url, headers=headers, data=data, method='POST') - if info['status'] != 200: - module.fail_json(msg="Unable to send msg: %s" % info['msg']) - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( # @TODO add types - key=dict(required=True, no_log=True), - event=dict(required=True, choices=['deploy', 'annotation']), - msg=dict(), - revision_id=dict(), - annotated_by=dict(default='Ansible'), - level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']), - instance_id=dict(), - event_epoch=dict(), # @TODO int? - deployed_by=dict(default='Ansible'), - deployed_to=dict(), - repository=dict(), - ), - supports_check_mode=True - ) - - key = module.params["key"] - event = module.params["event"] - - # Annotation params - msg = module.params["msg"] - annotated_by = module.params["annotated_by"] - level = module.params["level"] - instance_id = module.params["instance_id"] - event_epoch = module.params["event_epoch"] - - # Deploy params - revision_id = module.params["revision_id"] - deployed_by = module.params["deployed_by"] - deployed_to = module.params["deployed_to"] - repository = module.params["repository"] - - ################################################################## - # deploy requires revision_id - # annotation requires msg - # We verify these manually - ################################################################## - - if event == 'deploy': - if not revision_id: - module.fail_json(msg="revision_id required for deploy events") - try: - send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository) - except Exception as e: - module.fail_json(msg="unable to sent deploy event: %s" % to_native(e), - exception=traceback.format_exc()) - - if event == 'annotation': - if not msg: - module.fail_json(msg="msg required for annotation events") - try: - send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch) - except Exception as e: - module.fail_json(msg="unable to sent annotation event: %s" % to_native(e), - exception=traceback.format_exc()) - - changed = True - module.exit_json(changed=changed, deployed_by=deployed_by) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/mqtt.py b/plugins/modules/mqtt.py similarity index 67% rename from plugins/modules/notification/mqtt.py rename to plugins/modules/mqtt.py index 6099196102..ab1fe59cdc 100644 --- a/plugins/modules/notification/mqtt.py +++ b/plugins/modules/mqtt.py @@ -1,29 +1,34 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, 2014, Jan-Piet Mens -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, 2014, Jan-Piet Mens +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: mqtt short_description: Publish a message on an MQTT topic for the IoT description: - - Publish a message on an MQTT topic. + - Publish a message on an MQTT topic. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: server: type: str description: - - MQTT broker address/name + - MQTT broker address/name. default: localhost port: type: int description: - - MQTT broker port number + - MQTT broker port number. default: 1883 username: type: str @@ -32,89 +37,81 @@ options: password: type: str description: - - Password for C(username) to authenticate against the broker. + - Password for O(username) to authenticate against the broker. client_id: type: str description: - - MQTT client identifier - - If not specified, a value C(hostname + pid) will be used. + - MQTT client identifier. + - If not specified, it uses a value C(hostname + pid). topic: type: str description: - - MQTT topic name + - MQTT topic name. required: true payload: type: str description: - - Payload. The special string C("None") may be used to send a NULL - (i.e. empty) payload which is useful to simply notify with the I(topic) - or to clear previously retained messages. + - Payload. The special string V("None") may be used to send a NULL (that is, empty) payload which is useful to simply + notify with the O(topic) or to clear previously retained messages. required: true qos: type: str description: - - QoS (Quality of Service) + - QoS (Quality of Service). default: "0" - choices: [ "0", "1", "2" ] + choices: ["0", "1", "2"] retain: description: - - Setting this flag causes the broker to retain (i.e. keep) the message so that - applications that subsequently subscribe to the topic can received the last - retained message immediately. + - Setting this flag causes the broker to retain (in other words keep) the message so that applications that subsequently + subscribe to the topic can received the last retained message immediately. type: bool - default: 'no' + default: false ca_cert: type: path description: - - The path to the Certificate Authority certificate files that are to be - treated as trusted by this client. If this is the only option given - then the client will operate in a similar manner to a web browser. That - is to say it will require the broker to have a certificate signed by the - Certificate Authorities in ca_certs and will communicate using TLS v1, - but will not attempt any form of authentication. This provides basic - network encryption but may not be sufficient depending on how the broker - is configured. - aliases: [ ca_certs ] + - The path to the Certificate Authority certificate files that are to be treated as trusted by this client. If this + is the only option given then the client operates in a similar manner to a web browser. That is to say it requires + the broker to have a certificate signed by the Certificate Authorities in ca_certs and communicates using TLS v1, + but does not attempt any form of authentication. This provides basic network encryption but may not be sufficient + depending on how the broker is configured. + aliases: [ca_certs] client_cert: type: path description: - - The path pointing to the PEM encoded client certificate. If this is not - None it will be used as client information for TLS based + - The path pointing to the PEM encoded client certificate. If this is set it is used as client information for TLS based authentication. Support for this feature is broker dependent. - aliases: [ certfile ] + aliases: [certfile] client_key: type: path description: - - The path pointing to the PEM encoded client private key. If this is not - None it will be used as client information for TLS based + - The path pointing to the PEM encoded client private key. If this is set it is used as client information for TLS based authentication. Support for this feature is broker dependent. - aliases: [ keyfile ] + aliases: [keyfile] tls_version: description: - Specifies the version of the SSL/TLS protocol to be used. - - By default (if the python version supports it) the highest TLS version is - detected. If unavailable, TLS v1 is used. + - By default (if the python version supports it) the highest TLS version is detected. If unavailable, TLS v1 is used. type: str choices: - tlsv1.1 - tlsv1.2 -requirements: [ mosquitto ] +requirements: [mosquitto] notes: - - This module requires a connection to an MQTT broker such as Mosquitto - U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.org/project/paho-mqtt/)). + - This module requires a connection to an MQTT broker such as Mosquitto U(http://mosquitto.org) and the I(Paho) C(mqtt) + Python client (U(https://pypi.org/project/paho-mqtt/)). author: "Jan-Piet Mens (@jpmens)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Publish a message on an MQTT topic community.general.mqtt: topic: 'service/ansible/{{ ansible_hostname }}' payload: 'Hello at {{ ansible_date_time.iso8601 }}' qos: 0 - retain: False + retain: false client_id: ans001 delegate_to: localhost -''' +""" # =========================================== # MQTT module support methods. @@ -163,15 +160,15 @@ def main(): port=dict(default=1883, type='int'), topic=dict(required=True), payload=dict(required=True), - client_id=dict(default=None), + client_id=dict(), qos=dict(default="0", choices=["0", "1", "2"]), retain=dict(default=False, type='bool'), - username=dict(default=None), - password=dict(default=None, no_log=True), - ca_cert=dict(default=None, type='path', aliases=['ca_certs']), - client_cert=dict(default=None, type='path', aliases=['certfile']), - client_key=dict(default=None, type='path', aliases=['keyfile']), - tls_version=dict(default=None, choices=['tlsv1.1', 'tlsv1.2']) + username=dict(), + password=dict(no_log=True), + ca_cert=dict(type='path', aliases=['ca_certs']), + client_cert=dict(type='path', aliases=['certfile']), + client_key=dict(type='path', aliases=['keyfile']), + tls_version=dict(choices=['tlsv1.1', 'tlsv1.2']) ), supports_check_mode=True ) diff --git a/plugins/modules/database/mssql/mssql_db.py b/plugins/modules/mssql_db.py similarity index 81% rename from plugins/modules/database/mssql/mssql_db.py rename to plugins/modules/mssql_db.py index e6c5f183fa..767cb2f554 100644 --- a/plugins/modules/database/mssql/mssql_db.py +++ b/plugins/modules/mssql_db.py @@ -1,72 +1,77 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2014, Vedit Firat Arig +# Copyright (c) 2014, Vedit Firat Arig # Outline and parts are reused from Mark Theunissen's mysql_db module -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: mssql_db -short_description: Add or remove MSSQL databases from a remote host. +short_description: Add or remove MSSQL databases from a remote host description: - - Add or remove MSSQL databases from a remote host. + - Add or remove MSSQL databases from a remote host. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: name: description: - - name of the database to add or remove + - Name of the database to add or remove. required: true - aliases: [ db ] + aliases: [db] type: str login_user: description: - - The username used to authenticate with + - The username used to authenticate with. type: str + default: '' login_password: description: - - The password used to authenticate with + - The password used to authenticate with. type: str + default: '' login_host: description: - - Host running the database + - Host running the database. type: str required: true login_port: description: - - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used + - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used. default: '1433' type: str state: description: - - The database state + - The database state. default: present - choices: [ "present", "absent", "import" ] + choices: ["present", "absent", "import"] type: str target: description: - - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL - files (C(.sql)) files are supported. + - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL files (C(.sql)) files are + supported. type: str autocommit: description: - - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed - within a transaction. + - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since + some content can not be changed within a transaction. type: bool - default: 'no' + default: false notes: - - Requires the pymssql Python package on the remote host. For Ubuntu, this - is as easy as pip install pymssql (See M(ansible.builtin.pip).) + - Requires the pymssql Python package on the remote host. For Ubuntu, this is as easy as pip install pymssql (See M(ansible.builtin.pip)). requirements: - - python >= 2.7 - - pymssql + - pymssql author: Vedit Firat Arig (@vedit) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new database with name 'jackdata' community.general.mssql_db: name: jackdata @@ -83,11 +88,11 @@ EXAMPLES = ''' name: my_db state: import target: /tmp/dump.sql -''' +""" -RETURN = ''' +RETURN = r""" # -''' +""" import os import traceback @@ -151,7 +156,7 @@ def main(): login_password=dict(default='', no_log=True), login_host=dict(required=True), login_port=dict(default='1433'), - target=dict(default=None), + target=dict(), autocommit=dict(type='bool', default=False), state=dict( default='present', choices=['present', 'absent', 'import']) @@ -203,7 +208,7 @@ def main(): rc, stdout, stderr = db_import(conn, cursor, module, db, target) if rc != 0: - module.fail_json(msg="%s" % stderr) + module.fail_json(msg=stderr) else: module.exit_json(changed=True, db=db, msg=stdout) else: @@ -222,7 +227,7 @@ def main(): rc, stdout, stderr = db_import(conn, cursor, module, db, target) if rc != 0: - module.fail_json(msg="%s" % stderr) + module.fail_json(msg=stderr) else: module.exit_json(changed=True, db=db, msg=stdout) diff --git a/plugins/modules/mssql_script.py b/plugins/modules/mssql_script.py new file mode 100644 index 0000000000..ab367203c9 --- /dev/null +++ b/plugins/modules/mssql_script.py @@ -0,0 +1,410 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Kris Budde 0: + queries.append(''.join(current_batch)) + + result['changed'] = True + if module.check_mode: + module.exit_json(**result) + + query_results = [] + for query in queries: + # Catch and exit on any bad query errors + try: + cursor.execute(query, sql_params) + qry_result = [] + rows = cursor.fetchall() + while rows: + qry_result.append(rows) + rows = cursor.fetchall() + query_results.append(qry_result) + except Exception as e: + # We know we executed the statement so this error just means we have no resultset + # which is ok (eg UPDATE/INSERT) + if ( + type(e).__name__ == 'OperationalError' and + str(e) == 'Statement not executed or executed statement has no resultset' + ): + query_results.append([]) + else: + # Rollback transaction before failing the module in case of error + if transaction: + conn.rollback() + error_msg = '%s: %s' % (type(e).__name__, str(e)) + module.fail_json(msg="query failed", query=query, error=error_msg, **result) + + # Commit transaction before exiting the module in case of no error + if transaction: + conn.commit() + + # ensure that the result is json serializable + qry_results = json.loads(json.dumps(query_results, default=clean_output)) + + result[query_results_key] = qry_results + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/nagios.py b/plugins/modules/nagios.py similarity index 84% rename from plugins/modules/monitoring/nagios.py rename to plugins/modules/nagios.py index 248fd1051d..ecf15d764a 100644 --- a/plugins/modules/monitoring/nagios.py +++ b/plugins/modules/nagios.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # This file is largely copied from the Nagios module included in the # Func project. Original copyright follows: @@ -7,37 +6,54 @@ # func-nagios - Schedule downtime and enables/disable notifications # Copyright 2011, Red Hat, Inc. # Tim Bielawa -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: nagios -short_description: Perform common tasks in Nagios related to downtime and notifications. +short_description: Perform common tasks in Nagios related to downtime and notifications description: - - "The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts." + - 'The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts.' - The C(nagios) module is not idempotent. - - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer - to the host the playbook is currently running on. - - You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet). - - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime/acknowledge for the I(host itself), - e.g., C(service=host). This keyword may not be given with other services at the same time. - I(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the services running on it.) - To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all). + - All actions require the O(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) + variable to refer to the host the playbook is currently running on. + - The module executes commands and needs to be run directly on the Nagios server + with a user that has appropriate access rights. It does not use Nagios' HTTP API. + - Searches for a I(nagios.cfg) in I(/etc/nagios), I(/etc/nagios2), I(/etc/nagios3), I(/usr/local/etc/nagios), + I(/usr/local/groundwork/nagios/etc), I(/omd/sites/oppy/tmp/nagios), I(/usr/local/nagios/etc), + I(/usr/local/nagios), I(/opt/nagios/etc), and I(/opt/nagios), + or a I(icinga.cfg) in I(/etc/icinga) and I(/usr/local/icinga/etc). + (The Nagios configuration file should be readable by the Ansible user.) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: action: description: - Action to take. - - servicegroup options were added in 2.0. - - delete_downtime options were added in 2.2. - - The C(acknowledge) and C(forced_check) actions were added in community.general 1.2.0. + - The V(acknowledge) and V(forced_check) actions were added in community.general 1.2.0. required: true - choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", - "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime", - "servicegroup_host_downtime", "acknowledge", "forced_check" ] + choices: + - downtime + - delete_downtime + - enable_alerts + - disable_alerts + - silence + - unsilence + - silence_nagios + - unsilence_nagios + - command + - servicegroup_service_downtime + - servicegroup_host_downtime + - acknowledge + - forced_check type: str host: description: @@ -45,54 +61,59 @@ options: type: str cmdfile: description: - - Path to the nagios I(command file) (FIFO pipe). - Only required if auto-detection fails. + - Path to the nagios I(command file) (FIFO pipe). Only required if auto-detection fails. type: str author: description: - - Author to leave downtime comments as. - Only usable with the C(downtime) and C(acknowledge) action. + - Author to leave downtime comments as. Only used when O(action) is V(downtime) or V(acknowledge). type: str default: Ansible comment: description: - - Comment for C(downtime) and C(acknowledge)action. + - Comment when O(action) is V(downtime) or V(acknowledge). type: str default: Scheduling downtime start: description: - - When downtime should start, in time_t format (epoch seconds). + - When downtime should start, in C(time_t) format (epoch seconds). version_added: '0.2.0' type: str minutes: description: - Minutes to schedule downtime for. - - Only usable with the C(downtime) action. + - Only usable with O(action=downtime). type: int default: 30 services: description: - What to manage downtime/alerts for. Separate multiple services with commas. - C(service) is an alias for C(services). - B(Required) option when using the C(downtime), C(acknowledge), C(forced_check), C(enable_alerts), and C(disable_alerts) actions. - aliases: [ "service" ] - type: str + - 'B(Required) option when O(action) is one of: V(downtime), V(acknowledge), V(forced_check), V(enable_alerts), V(disable_alerts).' + - When specifying what O(services) to handle there is a special service value, V(host), which handles alerts/downtime/acknowledge + for the I(host itself), for example O(services=host). This keyword may not be given with other services at the same + time. B(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the + services running on it.) To schedule downtime for all O(services) on particular host use keyword V(all), for example + O(services=all). + - Before community.general 11.2.0, one could specify multiple services at once by separating them with commas, for example + O(services=httpd,nfs,puppet). Since community.general 11.2.0, there can be spaces around the commas, and an actual + list can be provided. + aliases: ["service"] + type: list + elements: str servicegroup: description: - The Servicegroup we want to set downtimes/alerts for. - B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime). + - B(Required) option when using the V(servicegroup_service_downtime) and V(servicegroup_host_downtime) O(action). type: str command: description: - - The raw command to send to nagios, which - should not include the submitted time header or the line-feed - B(Required) option when using the C(command) action. + - The raw command to send to Nagios, which should not include the submitted time header or the line-feed. + - B(Required) option when O(action=command). type: str author: "Tim Bielawa (@tbielawa)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Set 30 minutes of apache downtime community.general.nagios: action: downtime @@ -210,7 +231,9 @@ EXAMPLES = ''' - name: Disable httpd and nfs alerts community.general.nagios: action: disable_alerts - service: httpd,nfs + service: + - httpd + - nfs host: '{{ inventory_hostname }}' - name: Disable HOST alerts @@ -241,7 +264,7 @@ EXAMPLES = ''' community.general.nagios: action: command command: DISABLE_FAILURE_PREDICTION -''' +""" import time import os.path @@ -250,8 +273,6 @@ import stat from ansible.module_utils.basic import AnsibleModule -###################################################################### - def which_cmdfile(): locations = [ # rhel @@ -285,8 +306,6 @@ def which_cmdfile(): return None -###################################################################### - def main(): ACTION_CHOICES = [ @@ -307,95 +326,42 @@ def main(): module = AnsibleModule( argument_spec=dict( - action=dict(required=True, choices=ACTION_CHOICES), - author=dict(default='Ansible'), - comment=dict(default='Scheduling downtime'), - host=dict(required=False, default=None), - servicegroup=dict(required=False, default=None), - start=dict(required=False, default=None), - minutes=dict(default=30, type='int'), - cmdfile=dict(default=which_cmdfile()), - services=dict(default=None, aliases=['service']), - command=dict(required=False, default=None), - ) + action=dict(type='str', required=True, choices=ACTION_CHOICES), + author=dict(type='str', default='Ansible'), + comment=dict(type='str', default='Scheduling downtime'), + host=dict(type='str'), + servicegroup=dict(type='str'), + start=dict(type='str'), + minutes=dict(type='int', default=30), + cmdfile=dict(type='str', default=which_cmdfile()), + services=dict(type='list', elements='str', aliases=['service']), + command=dict(type='str'), + ), + required_if=[ + ('action', 'downtime', ['host', 'services']), + ('action', 'delete_downtime', ['host', 'services']), + ('action', 'silence', ['host']), + ('action', 'unsilence', ['host']), + ('action', 'enable_alerts', ['host', 'services']), + ('action', 'disable_alerts', ['host', 'services']), + ('action', 'command', ['command']), + ('action', 'servicegroup_host_downtime', ['host', 'servicegroup']), + ('action', 'servicegroup_service_downtime', ['host', 'servicegroup']), + ('action', 'acknowledge', ['host', 'services']), + ('action', 'forced_check', ['host', 'services']), + ], ) - action = module.params['action'] - host = module.params['host'] - servicegroup = module.params['servicegroup'] - start = module.params['start'] - services = module.params['services'] - cmdfile = module.params['cmdfile'] - command = module.params['command'] - - ################################################################## - # Required args per action: - # downtime = (minutes, service, host) - # acknowledge = (service, host) - # (un)silence = (host) - # (enable/disable)_alerts = (service, host) - # command = command - # - # AnsibleModule will verify most stuff, we need to verify - # 'service' manually. - - ################################################################## - if action not in ['command', 'silence_nagios', 'unsilence_nagios']: - if not host: - module.fail_json(msg='no host specified for action requiring one') - ###################################################################### - if action == 'downtime': - # Make sure there's an actual service selected - if not services: - module.fail_json(msg='no service selected to set downtime for') - - ###################################################################### - if action == 'delete_downtime': - # Make sure there's an actual service selected - if not services: - module.fail_json(msg='no service selected to set downtime for') - - ###################################################################### - - if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']: - # Make sure there's an actual servicegroup selected - if not servicegroup: - module.fail_json(msg='no servicegroup selected to set downtime for') - - ################################################################## - if action in ['enable_alerts', 'disable_alerts']: - if not services: - module.fail_json(msg='a service is required when setting alerts') - - if action in ['command']: - if not command: - module.fail_json(msg='no command passed for command action') - ###################################################################### - if action == 'acknowledge': - # Make sure there's an actual service selected - if not services: - module.fail_json(msg='no service selected to acknowledge') - - ################################################################## - if action == 'forced_check': - # Make sure there's an actual service selected - if not services: - module.fail_json(msg='no service selected to check') - - ################################################################## - if not cmdfile: + if not module.params['cmdfile']: module.fail_json(msg='unable to locate nagios.cfg') - ################################################################## ansible_nagios = Nagios(module, **module.params) if module.check_mode: module.exit_json(changed=True) else: ansible_nagios.act() - ################################################################## -###################################################################### class Nagios(object): """ Perform common tasks in Nagios related to downtime and @@ -426,10 +392,12 @@ class Nagios(object): self.cmdfile = kwargs['cmdfile'] self.command = kwargs['command'] - if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'): + if kwargs['services'] is None : self.services = kwargs['services'] + elif len(kwargs['services']) == 1 and kwargs['services'][0] in ['host', 'all']: + self.services = kwargs['services'][0] else: - self.services = kwargs['services'].split(',') + self.services = kwargs['services'] self.command_results = [] @@ -452,10 +420,9 @@ class Nagios(object): self.module.fail_json(msg='nagios command file is not a fifo file', cmdfile=self.cmdfile) try: - fp = open(self.cmdfile, 'w') - fp.write(cmd) - fp.flush() - fp.close() + with open(self.cmdfile, 'w') as fp: + fp.write(cmd) + fp.flush() self.command_results.append(cmd.strip()) except IOError: self.module.fail_json(msg='unable to write to nagios command file', @@ -1288,14 +1255,9 @@ class Nagios(object): elif self.action == 'unsilence_nagios': self.unsilence_nagios() - elif self.action == 'command': + else: # self.action == 'command' self.nagios_cmd(self.command) - # wtf? - else: - self.module.fail_json(msg="unknown action specified: '%s'" % - self.action) - self.module.exit_json(nagios_commands=self.command_results, changed=True) diff --git a/plugins/modules/net_tools/dnsimple_info.py b/plugins/modules/net_tools/dnsimple_info.py deleted file mode 100644 index 4ac22be0cb..0000000000 --- a/plugins/modules/net_tools/dnsimple_info.py +++ /dev/null @@ -1,335 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Edward Hilgendorf, -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: dnsimple_info - -short_description: Pull basic info from DNSimple API - -version_added: "4.2.0" - -description: Retrieve existing records and domains from DNSimple API. - -options: - name: - description: - - The domain name to retrieve info from. - - Will return all associated records for this domain if specified. - - If not specified, will return all domains associated with the account ID. - type: str - - account_id: - description: The account ID to query. - required: true - type: str - - api_key: - description: The API key to use. - required: true - type: str - - record: - description: - - The record to find. - - If specified, only this record will be returned instead of all records. - required: false - type: str - - sandbox: - description: Whether or not to use sandbox environment. - required: false - default: false - type: bool - -author: - - Edward Hilgendorf (@edhilgendorf) -''' - -EXAMPLES = r''' -- name: Get all domains from an account - community.general.dnsimple_info: - account_id: "1234" - api_key: "1234" - -- name: Get all records from a domain - community.general.dnsimple_info: - name: "example.com" - account_id: "1234" - api_key: "1234" - -- name: Get all info from a matching record - community.general.dnsimple_info: - name: "example.com" - record: "subdomain" - account_id: "1234" - api_key: "1234" -''' - -RETURN = r''' -dnsimple_domain_info: - description: Returns a list of dictionaries of all domains associated with the supplied account ID. - type: list - elements: dict - returned: success when I(name) is not specified - sample: - - account_id: 1234 - created_at: '2021-10-16T21:25:42Z' - id: 123456 - last_transferred_at: - name: example.com - reverse: false - secondary: false - updated_at: '2021-11-10T20:22:50Z' - contains: - account_id: - description: The account ID. - type: int - created_at: - description: When the domain entry was created. - type: str - id: - description: ID of the entry. - type: int - last_transferred_at: - description: Date the domain was transferred, or empty if not. - type: str - name: - description: Name of the record. - type: str - reverse: - description: Whether or not it is a reverse zone record. - type: bool - updated_at: - description: When the domain entry was updated. - type: str - -dnsimple_records_info: - description: Returns a list of dictionaries with all records for the domain supplied. - type: list - elements: dict - returned: success when I(name) is specified, but I(record) is not - sample: - - content: ns1.dnsimple.com admin.dnsimple.com - created_at: '2021-10-16T19:07:34Z' - id: 12345 - name: 'catheadbiscuit' - parent_id: null - priority: null - regions: - - global - system_record: true - ttl: 3600 - type: SOA - updated_at: '2021-11-15T23:55:51Z' - zone_id: example.com - contains: - content: - description: Content of the returned record. - type: str - created_at: - description: When the domain entry was created. - type: str - id: - description: ID of the entry. - type: int - name: - description: Name of the record. - type: str - parent_id: - description: Parent record or null. - type: int - priority: - description: Priority setting of the record. - type: str - regions: - description: List of regions where the record is available. - type: list - system_record: - description: Whether or not it is a system record. - type: bool - ttl: - description: Record TTL. - type: int - type: - description: Record type. - type: str - updated_at: - description: When the domain entry was updated. - type: str - zone_id: - description: ID of the zone that the record is associated with. - type: str -dnsimple_record_info: - description: Returns a list of dictionaries that match the record supplied. - returned: success when I(name) and I(record) are specified - type: list - elements: dict - sample: - - content: 1.2.3.4 - created_at: '2021-11-15T23:55:51Z' - id: 123456 - name: catheadbiscuit - parent_id: null - priority: null - regions: - - global - system_record: false - ttl: 3600 - type: A - updated_at: '2021-11-15T23:55:51Z' - zone_id: example.com - contains: - content: - description: Content of the returned record. - type: str - created_at: - description: When the domain entry was created. - type: str - id: - description: ID of the entry. - type: int - name: - description: Name of the record. - type: str - parent_id: - description: Parent record or null. - type: int - priority: - description: Priority setting of the record. - type: str - regions: - description: List of regions where the record is available. - type: list - system_record: - description: Whether or not it is a system record. - type: bool - ttl: - description: Record TTL. - type: int - type: - description: Record type. - type: str - updated_at: - description: When the domain entry was updated. - type: str - zone_id: - description: ID of the zone that the record is associated with. - type: str -''' - -import traceback -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.basic import missing_required_lib -import json - -try: - from requests import Request, Session -except ImportError: - HAS_ANOTHER_LIBRARY = False - ANOTHER_LIBRARY_IMPORT_ERROR = traceback.format_exc() -else: - HAS_ANOTHER_LIBRARY = True - - -def build_url(account, key, is_sandbox): - headers = {'Accept': 'application/json', - 'Authorization': 'Bearer ' + key} - url = 'https://api{sandbox}.dnsimple.com/'.format( - sandbox=".sandbox" if is_sandbox else "") + 'v2/' + account - req = Request(url=url, headers=headers) - prepped_request = req.prepare() - return prepped_request - - -def iterate_data(module, request_object): - base_url = request_object.url - response = Session().send(request_object) - if 'pagination' in response.json(): - data = response.json()["data"] - pages = response.json()["pagination"]["total_pages"] - if int(pages) > 1: - for page in range(1, pages): - page = page + 1 - request_object.url = base_url + '&page=' + str(page) - new_results = Session().send(request_object) - data = data + new_results.json()["data"] - return(data) - else: - module.fail_json('API Call failed, check ID, key and sandbox values') - - -def record_info(dnsimple_mod, req_obj): - req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?name=' + dnsimple_mod.params["record"], 'GET' - return iterate_data(dnsimple_mod, req_obj) - - -def domain_info(dnsimple_mod, req_obj): - req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?per_page=100', 'GET' - return iterate_data(dnsimple_mod, req_obj) - - -def account_info(dnsimple_mod, req_obj): - req_obj.url, req_obj.method = req_obj.url + '/zones/?per_page=100', 'GET' - return iterate_data(dnsimple_mod, req_obj) - - -def main(): - # define available arguments/parameters a user can pass to the module - fields = { - "account_id": {"required": True, "type": "str"}, - "api_key": {"required": True, "type": "str", "no_log": True}, - "name": {"required": False, "type": "str"}, - "record": {"required": False, "type": "str"}, - "sandbox": {"required": False, "type": "bool", "default": False} - } - - result = { - 'changed': False - } - - module = AnsibleModule( - argument_spec=fields, - supports_check_mode=True - ) - - params = module.params - req = build_url(params['account_id'], - params['api_key'], - params['sandbox']) - - if not HAS_ANOTHER_LIBRARY: - # Needs: from ansible.module_utils.basic import missing_required_lib - module.exit_json( - msg=missing_required_lib('another_library'), - exception=ANOTHER_LIBRARY_IMPORT_ERROR) - - # At minimum we need account and key - if params['account_id'] and params['api_key']: - # If we have a record return info on that record - if params['name'] and params['record']: - result['dnsimple_record_info'] = record_info(module, req) - module.exit_json(**result) - - # If we have the account only and domain, return records for the domain - elif params['name']: - result['dnsimple_records_info'] = domain_info(module, req) - module.exit_json(**result) - - # If we have the account only, return domains - else: - result['dnsimple_domain_info'] = account_info(module, req) - module.exit_json(**result) - else: - module.fail_json(msg="Need at least account_id and api_key") - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/ldap/ldap_search.py b/plugins/modules/net_tools/ldap/ldap_search.py deleted file mode 100644 index d3378646ac..0000000000 --- a/plugins/modules/net_tools/ldap/ldap_search.py +++ /dev/null @@ -1,183 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Peter Sagerson -# Copyright: (c) 2020, Sebastian Pfahl -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r""" ---- -module: ldap_search -version_added: '0.2.0' -short_description: Search for entries in a LDAP server -description: - - Return the results of an LDAP search. -notes: - - The default authentication settings will attempt to use a SASL EXTERNAL - bind over a UNIX domain socket. This works well with the default Ubuntu - install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL - rule allowing root to modify the server configuration. If you need to use - a simple bind to access your server, pass the credentials in I(bind_dn) - and I(bind_pw). -author: - - Sebastian Pfahl (@eryx12o45) -requirements: - - python-ldap -options: - dn: - required: true - type: str - description: - - The LDAP DN to search in. - scope: - choices: [base, onelevel, subordinate, children] - default: base - type: str - description: - - The LDAP scope to use. - filter: - default: '(objectClass=*)' - type: str - description: - - Used for filtering the LDAP search result. - attrs: - type: list - elements: str - description: - - A list of attributes for limiting the result. Use an - actual list or a comma-separated string. - schema: - default: false - type: bool - description: - - Set to C(true) to return the full attribute schema of entries, not - their attribute values. Overrides I(attrs) when provided. -extends_documentation_fragment: - - community.general.ldap.documentation -""" - -EXAMPLES = r""" -- name: Return all entries within the 'groups' organizational unit. - community.general.ldap_search: - dn: "ou=groups,dc=example,dc=com" - register: ldap_groups - -- name: Return GIDs for all groups - community.general.ldap_search: - dn: "ou=groups,dc=example,dc=com" - scope: "onelevel" - attrs: - - "gidNumber" - register: ldap_group_gids -""" - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs - -LDAP_IMP_ERR = None -try: - import ldap - - HAS_LDAP = True -except ImportError: - LDAP_IMP_ERR = traceback.format_exc() - HAS_LDAP = False - - -def main(): - module = AnsibleModule( - argument_spec=gen_specs( - dn=dict(type='str', required=True), - scope=dict(type='str', default='base', choices=['base', 'onelevel', 'subordinate', 'children']), - filter=dict(type='str', default='(objectClass=*)'), - attrs=dict(type='list', elements='str'), - schema=dict(type='bool', default=False), - ), - supports_check_mode=True, - ) - - if not HAS_LDAP: - module.fail_json(msg=missing_required_lib('python-ldap'), - exception=LDAP_IMP_ERR) - - try: - LdapSearch(module).main() - except Exception as exception: - module.fail_json(msg="Attribute action failed.", details=to_native(exception)) - - module.exit_json(changed=False) - - -def _extract_entry(dn, attrs): - extracted = {'dn': dn} - for attr, val in list(attrs.items()): - if len(val) == 1: - extracted[attr] = val[0] - else: - extracted[attr] = val - return extracted - - -class LdapSearch(LdapGeneric): - def __init__(self, module): - LdapGeneric.__init__(self, module) - - self.dn = self.module.params['dn'] - self.filterstr = self.module.params['filter'] - self.attrlist = [] - self._load_scope() - self._load_attrs() - self._load_schema() - - def _load_schema(self): - self.schema = self.module.boolean(self.module.params['schema']) - if self.schema: - self.attrsonly = 1 - else: - self.attrsonly = 0 - - def _load_scope(self): - spec = dict( - base=ldap.SCOPE_BASE, - onelevel=ldap.SCOPE_ONELEVEL, - subordinate=ldap.SCOPE_SUBORDINATE, - children=ldap.SCOPE_SUBTREE, - ) - self.scope = spec[self.module.params['scope']] - - def _load_attrs(self): - self.attrlist = self.module.params['attrs'] or None - - def main(self): - results = self.perform_search() - self.module.exit_json(changed=False, results=results) - - def perform_search(self): - try: - results = self.connection.search_s( - self.dn, - self.scope, - filterstr=self.filterstr, - attrlist=self.attrlist, - attrsonly=self.attrsonly - ) - ldap_entries = [] - for result in results: - if isinstance(result[1], dict): - if self.schema: - ldap_entries.append(dict(dn=result[0], attrs=list(result[1].keys()))) - else: - ldap_entries.append(_extract_entry(result[0], result[1])) - return ldap_entries - except ldap.NO_SUCH_OBJECT: - self.module.fail_json(msg="Base not found: {0}".format(self.dn)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/lldp.py b/plugins/modules/net_tools/lldp.py deleted file mode 100644 index 1b8fa9eb06..0000000000 --- a/plugins/modules/net_tools/lldp.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: lldp -requirements: [ lldpctl ] -short_description: get details reported by lldp -description: - - Reads data out of lldpctl -options: {} -author: "Andy Hill (@andyhky)" -notes: - - Requires lldpd running and lldp enabled on switches -''' - -EXAMPLES = ''' -# Retrieve switch/port information - - name: Gather information from lldp - community.general.lldp: - - - name: Print each switch/port - ansible.builtin.debug: - msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}" - with_items: "{{ lldp.keys() }}" - -# TASK: [Print each switch/port] *********************************************************** -# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"} -# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"} -# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"} - -''' - -from ansible.module_utils.basic import AnsibleModule - - -def gather_lldp(module): - cmd = [module.get_bin_path('lldpctl'), '-f', 'keyvalue'] - rc, output, err = module.run_command(cmd) - if output: - output_dict = {} - current_dict = {} - lldp_entries = output.split("\n") - - for entry in lldp_entries: - if entry.startswith('lldp'): - path, value = entry.strip().split("=", 1) - path = path.split(".") - path_components, final = path[:-1], path[-1] - else: - value = current_dict[final] + '\n' + entry - - current_dict = output_dict - for path_component in path_components: - current_dict[path_component] = current_dict.get(path_component, {}) - current_dict = current_dict[path_component] - current_dict[final] = value - return output_dict - - -def main(): - module = AnsibleModule({}) - - lldp_output = gather_lldp(module) - try: - data = {'lldp': lldp_output['lldp']} - module.exit_json(ansible_facts=data) - except TypeError: - module.fail_json(msg="lldpctl command failed. is lldpd running?") - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py deleted file mode 100644 index 3c59b7efea..0000000000 --- a/plugins/modules/net_tools/nmcli.py +++ /dev/null @@ -1,2257 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2015, Chris Long -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: nmcli -author: -- Chris Long (@alcamie101) -short_description: Manage Networking -requirements: -- nmcli -description: - - 'Manage the network devices. Create, modify and manage various connection and device type e.g., ethernet, teams, bonds, vlans etc.' - - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager.' - - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-tui.' - - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager' - - 'On openSUSE, the requirements can be met by installing the following packages: NetworkManager.' -options: - state: - description: - - Whether the device should exist or not, taking action if the state is different from what is stated. - type: str - required: true - choices: [ absent, present ] - autoconnect: - description: - - Whether the connection should start on boot. - - Whether the connection profile can be automatically activated - type: bool - default: yes - conn_name: - description: - - The name used to call the connection. Pattern is [-][-]. - type: str - required: true - ifname: - description: - - The interface to bind the connection to. - - The connection will only be applicable to this interface name. - - A special value of C('*') can be used for interface-independent connections. - - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan. - - This parameter defaults to C(conn_name) when left unset. - type: str - type: - description: - - This is the type of device or network connection that you wish to create or modify. - - Type C(dummy) is added in community.general 3.5.0. - - Type C(generic) is added in Ansible 2.5. - - Type C(infiniband) is added in community.general 2.0.0. - - Type C(gsm) is added in community.general 3.7.0. - - Type C(wireguard) is added in community.general 4.3.0 - type: str - choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi, gsm, - wireguard ] - mode: - description: - - This is the type of device or network connection that you wish to create for a bond or bridge. - type: str - choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ] - default: balance-rr - master: - description: - - Master ] STP forwarding delay, in seconds. - type: int - default: 15 - hellotime: - description: - - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds. - type: int - default: 2 - maxage: - description: - - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds. - type: int - default: 20 - ageingtime: - description: - - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds. - type: int - default: 300 - mac: - description: - - MAC address of the connection. - - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel. - type: str - slavepriority: - description: - - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave. - type: int - default: 32 - path_cost: - description: - - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave. - type: int - default: 100 - hairpin: - description: - - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the - frame was received on. - - The default value is C(true), but that is being deprecated - and it will be changed to C(false) in community.general 7.0.0. - type: bool - runner: - description: - - This is the type of device or network connection that you wish to create for a team. - type: str - choices: [ broadcast, roundrobin, activebackup, loadbalance, lacp ] - default: roundrobin - version_added: 3.4.0 - runner_hwaddr_policy: - description: - - This defines the policy of how hardware addresses of team device and port devices - should be set during the team lifetime. - type: str - choices: [ same_all, by_active, only_active ] - version_added: 3.4.0 - vlanid: - description: - - This is only used with VLAN - VLAN ID in range <0-4095>. - type: int - vlandev: - description: - - This is only used with VLAN - parent device this VLAN is on, can use ifname. - type: str - flags: - description: - - This is only used with VLAN - flags. - type: str - ingress: - description: - - This is only used with VLAN - VLAN ingress priority mapping. - type: str - egress: - description: - - This is only used with VLAN - VLAN egress priority mapping. - type: str - vxlan_id: - description: - - This is only used with VXLAN - VXLAN ID. - type: int - vxlan_remote: - description: - - This is only used with VXLAN - VXLAN destination IP address. - type: str - vxlan_local: - description: - - This is only used with VXLAN - VXLAN local IP address. - type: str - ip_tunnel_dev: - description: - - This is used with GRE/IPIP/SIT - parent device this GRE/IPIP/SIT tunnel, can use ifname. - type: str - ip_tunnel_remote: - description: - - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT destination IP address. - type: str - ip_tunnel_local: - description: - - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT local IP address. - type: str - ip_tunnel_input_key: - description: - - The key used for tunnel input packets. - - Only used when I(type=gre). - type: str - version_added: 3.6.0 - ip_tunnel_output_key: - description: - - The key used for tunnel output packets. - - Only used when I(type=gre). - type: str - version_added: 3.6.0 - zone: - description: - - The trust level of the connection. - - When updating this property on a currently activated connection, the change takes effect immediately. - type: str - version_added: 2.0.0 - wifi_sec: - description: - - The security configuration of the WiFi connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. - - 'An up-to-date list of supported attributes can be found here: - U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).' - - 'For instance to use common WPA-PSK auth with a password: - C({key-mgmt: wpa-psk, psk: my_password}).' - type: dict - suboptions: - auth-alg: - description: - - When WEP is used (that is, if I(key-mgmt) = C(none) or C(ieee8021x)) indicate the 802.11 authentication algorithm required by the AP here. - - One of C(open) for Open System, C(shared) for Shared Key, or C(leap) for Cisco LEAP. - - When using Cisco LEAP (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)) the I(leap-username) and I(leap-password) properties - must be specified. - type: str - choices: [ open, shared, leap ] - fils: - description: - - Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection. - - One of C(0) (use global default value), C(1) (disable FILS), C(2) (enable FILS if the supplicant and the access point support it) or C(3) - (enable FILS and fail if not supported). - - When set to C(0) and no global default is set, FILS will be optionally enabled. - type: int - choices: [ 0, 1, 2, 3 ] - default: 0 - group: - description: - - A list of group/broadcast encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in - the list. - - For maximum compatibility leave this property empty. - type: list - elements: str - choices: [ wep40, wep104, tkip, ccmp ] - key-mgmt: - description: - - Key management used for the connection. - - One of C(none) (WEP or no password protection), C(ieee8021x) (Dynamic WEP), C(owe) (Opportunistic Wireless Encryption), C(wpa-psk) (WPA2 - + WPA3 personal), C(sae) (WPA3 personal only), C(wpa-eap) (WPA2 + WPA3 enterprise) or C(wpa-eap-suite-b-192) (WPA3 enterprise only). - - This property must be set for any Wi-Fi connection that uses security. - type: str - choices: [ none, ieee8021x, owe, wpa-psk, sae, wpa-eap, wpa-eap-suite-b-192 ] - leap-password-flags: - description: Flags indicating how to handle the I(leap-password) property. - type: list - elements: int - leap-password: - description: The login password for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)). - type: str - leap-username: - description: The login username for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)). - type: str - pairwise: - description: - - A list of pairwise encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in the - list. - - For maximum compatibility leave this property empty. - type: list - elements: str - choices: [ tkip, ccmp ] - pmf: - description: - - Indicates whether Protected Management Frames (802.11w) must be enabled for the connection. - - One of C(0) (use global default value), C(1) (disable PMF), C(2) (enable PMF if the supplicant and the access point support it) or C(3) - (enable PMF and fail if not supported). - - When set to C(0) and no global default is set, PMF will be optionally enabled. - type: int - choices: [ 0, 1, 2, 3 ] - default: 0 - proto: - description: - - List of strings specifying the allowed WPA protocol versions to use. - - Each element may be C(wpa) (allow WPA) or C(rsn) (allow WPA2/RSN). - - If not specified, both WPA and RSN connections are allowed. - type: list - elements: str - choices: [ wpa, rsn ] - psk-flags: - description: Flags indicating how to handle the I(psk) property. - type: list - elements: int - psk: - description: - - Pre-Shared-Key for WPA networks. - - For WPA-PSK, it is either an ASCII passphrase of 8 to 63 characters that is (as specified in the 802.11i standard) hashed to derive the - actual key, or the key in form of 64 hexadecimal character. - - The WPA3-Personal networks use a passphrase of any length for SAE authentication. - type: str - wep-key-flags: - description: Flags indicating how to handle the I(wep-key0), I(wep-key1), I(wep-key2), and I(wep-key3) properties. - type: list - elements: int - wep-key-type: - description: - - Controls the interpretation of WEP keys. - - Allowed values are C(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or 13-character ASCII - password; or C(2), in which case the passphrase is provided as a string and will be hashed using the de-facto MD5 method to derive the - actual WEP key. - type: int - choices: [ 1, 2 ] - wep-key0: - description: - - Index 0 WEP key. This is the WEP key used in most networks. - - See the I(wep-key-type) property for a description of how this key is interpreted. - type: str - wep-key1: - description: - - Index 1 WEP key. This WEP index is not used by most networks. - - See the I(wep-key-type) property for a description of how this key is interpreted. - type: str - wep-key2: - description: - - Index 2 WEP key. This WEP index is not used by most networks. - - See the I(wep-key-type) property for a description of how this key is interpreted. - type: str - wep-key3: - description: - - Index 3 WEP key. This WEP index is not used by most networks. - - See the I(wep-key-type) property for a description of how this key is interpreted. - type: str - wep-tx-keyidx: - description: - - When static WEP is used (that is, if I(key-mgmt=none)) and a non-default WEP key index is used by the AP, put that WEP key index here. - - Valid values are C(0) (default key) through C(3). - - Note that some consumer access points (like the Linksys WRT54G) number the keys C(1) - C(4). - type: int - choices: [ 0, 1, 2, 3 ] - default: 0 - wps-method: - description: - - Flags indicating which mode of WPS is to be used if any. - - There is little point in changing the default setting as NetworkManager will automatically determine whether it is feasible to start WPS - enrollment from the Access Point capabilities. - - WPS can be disabled by setting this property to a value of C(1). - type: int - default: 0 - version_added: 3.0.0 - ssid: - description: - - Name of the Wireless router or the access point. - type: str - version_added: 3.0.0 - wifi: - description: - - The configuration of the WiFi connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. - - 'An up-to-date list of supported attributes can be found here: - U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).' - - 'For instance to create a hidden AP mode WiFi connection: - C({hidden: true, mode: ap}).' - type: dict - suboptions: - ap-isolation: - description: - - Configures AP isolation, which prevents communication between wireless devices connected to this AP. - - This property can be set to a value different from C(-1) only when the interface is configured in AP mode. - - If set to C(1), devices are not able to communicate with each other. This increases security because it protects devices against attacks - from other clients in the network. At the same time, it prevents devices to access resources on the same wireless networks as file - shares, printers, etc. - - If set to C(0), devices can talk to each other. - - When set to C(-1), the global default is used; in case the global default is unspecified it is assumed to be C(0). - type: int - choices: [ -1, 0, 1 ] - default: -1 - assigned-mac-address: - description: - - The new field for the cloned MAC address. - - It can be either a hardware address in ASCII representation, or one of the special values C(preserve), C(permanent), C(random) or - C(stable). - - This field replaces the deprecated I(cloned-mac-address) on D-Bus, which can only contain explicit hardware addresses. - - Note that this property only exists in D-Bus API. libnm and nmcli continue to call this property I(cloned-mac-address). - type: str - band: - description: - - 802.11 frequency band of the network. - - One of C(a) for 5GHz 802.11a or C(bg) for 2.4GHz 802.11. - - This will lock associations to the Wi-Fi network to the specific band, so for example, if C(a) is specified, the device will not - associate with the same network in the 2.4GHz band even if the network's settings are compatible. - - This setting depends on specific driver capability and may not work with all drivers. - type: str - choices: [ a, bg ] - bssid: - description: - - If specified, directs the device to only associate with the given access point. - - This capability is highly driver dependent and not supported by all devices. - - Note this property does not control the BSSID used when creating an Ad-Hoc network and is unlikely to in the future. - type: str - channel: - description: - - Wireless channel to use for the Wi-Fi connection. - - The device will only join (or create for Ad-Hoc networks) a Wi-Fi network on the specified channel. - - Because channel numbers overlap between bands, this property also requires the I(band) property to be set. - type: int - default: 0 - cloned-mac-address: - description: - - This D-Bus field is deprecated in favor of I(assigned-mac-address) which is more flexible and allows specifying special variants like - C(random). - - For libnm and nmcli, this field is called I(cloned-mac-address). - type: str - generate-mac-address-mask: - description: - - With I(cloned-mac-address) setting C(random) or C(stable), by default all bits of the MAC address are scrambled and a - locally-administered, unicast MAC address is created. This property allows to specify that certain bits are fixed. - - Note that the least significant bit of the first MAC address will always be unset to create a unicast MAC address. - - If the property is C(null), it is eligible to be overwritten by a default connection setting. - - If the value is still c(null) or an empty string, the default is to create a locally-administered, unicast MAC address. - - If the value contains one MAC address, this address is used as mask. The set bits of the mask are to be filled with the current MAC - address of the device, while the unset bits are subject to randomization. - - Setting C(FE:FF:FF:00:00:00) means to preserve the OUI of the current MAC address and only randomize the lower 3 bytes using the - C(random) or C(stable) algorithm. - - If the value contains one additional MAC address after the mask, this address is used instead of the current MAC address to fill the bits - that shall not be randomized. - - For example, a value of C(FE:FF:FF:00:00:00 68:F7:28:00:00:00) will set the OUI of the MAC address to 68:F7:28, while the lower bits are - randomized. - - A value of C(02:00:00:00:00:00 00:00:00:00:00:00) will create a fully scrambled globally-administered, burned-in MAC address. - - If the value contains more than one additional MAC addresses, one of them is chosen randomly. For example, - C(02:00:00:00:00:00 00:00:00:00:00:00 02:00:00:00:00:00) will create a fully scrambled MAC address, randomly locally or globally - administered. - type: str - hidden: - description: - - If C(true), indicates that the network is a non-broadcasting network that hides its SSID. This works both in infrastructure and AP mode. - - In infrastructure mode, various workarounds are used for a more reliable discovery of hidden networks, such as probe-scanning the SSID. - However, these workarounds expose inherent insecurities with hidden SSID networks, and thus hidden SSID networks should be used with - caution. - - In AP mode, the created network does not broadcast its SSID. - - Note that marking the network as hidden may be a privacy issue for you (in infrastructure mode) or client stations (in AP mode), as the - explicit probe-scans are distinctly recognizable on the air. - type: bool - default: false - mac-address-blacklist: - description: - - A list of permanent MAC addresses of Wi-Fi devices to which this connection should never apply. - - Each MAC address should be given in the standard hex-digits-and-colons notation (for example, C(00:11:22:33:44:55)). - type: list - elements: str - mac-address-randomization: - description: - - One of C(0) (never randomize unless the user has set a global default to randomize and the supplicant supports randomization), C(1) - (never randomize the MAC address), or C(2) (always randomize the MAC address). - - This property is deprecated for I(cloned-mac-address). - type: int - default: 0 - choices: [ 0, 1, 2 ] - mac-address: - description: - - If specified, this connection will only apply to the Wi-Fi device whose permanent MAC address matches. - - This property does not change the MAC address of the device (for example for MAC spoofing). - type: str - mode: - description: Wi-Fi network mode. If blank, C(infrastructure) is assumed. - type: str - choices: [ infrastructure, mesh, adhoc, ap ] - default: infrastructure - mtu: - description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames. - type: int - default: 0 - powersave: - description: - - One of C(2) (disable Wi-Fi power saving), C(3) (enable Wi-Fi power saving), C(1) (don't touch currently configure setting) or C(0) (use - the globally configured value). - - All other values are reserved. - type: int - default: 0 - choices: [ 0, 1, 2, 3 ] - rate: - description: - - If non-zero, directs the device to only use the specified bitrate for communication with the access point. - - Units are in Kb/s, so for example C(5500) = 5.5 Mbit/s. - - This property is highly driver dependent and not all devices support setting a static bitrate. - type: int - default: 0 - tx-power: - description: - - If non-zero, directs the device to use the specified transmit power. - - Units are dBm. - - This property is highly driver dependent and not all devices support setting a static transmit power. - type: int - default: 0 - wake-on-wlan: - description: - - The NMSettingWirelessWakeOnWLan options to enable. Not all devices support all options. - - May be any combination of C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_ANY) (C(0x2)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_DISCONNECT) (C(0x4)), - C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_MAGIC) (C(0x8)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_GTK_REKEY_FAILURE) (C(0x10)), - C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_EAP_IDENTITY_REQUEST) (C(0x20)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_4WAY_HANDSHAKE) (C(0x40)), - C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_RFKILL_RELEASE) (C(0x80)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_TCP) (C(0x100)) or the special values - C(0x1) (to use global settings) and C(0x8000) (to disable management of Wake-on-LAN in NetworkManager). - - Note the option values' sum must be specified in order to combine multiple options. - type: int - default: 1 - version_added: 3.5.0 - ignore_unsupported_suboptions: - description: - - Ignore suboptions which are invalid or unsupported by the version of NetworkManager/nmcli installed on the host. - - Only I(wifi) and I(wifi_sec) options are currently affected. - type: bool - default: false - version_added: 3.6.0 - gsm: - description: - - The configuration of the GSM connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. - - 'An up-to-date list of supported attributes can be found here: - U(https://networkmanager.dev/docs/api/latest/settings-gsm.html).' - - 'For instance to use apn, pin, username and password: - C({apn: provider.apn, pin: 1234, username: apn.username, password: apn.password}).' - type: dict - version_added: 3.7.0 - suboptions: - apn: - description: - - The GPRS Access Point Name specifying the APN used when establishing a data session with the GSM-based network. - - The APN often determines how the user will be billed for their network usage and whether the user has access to the Internet or - just a provider-specific walled-garden, so it is important to use the correct APN for the user's mobile broadband plan. - - The APN may only be composed of the characters a-z, 0-9, ., and - per GSM 03.60 Section 14.9. - type: str - auto-config: - description: When C(true), the settings such as I(gsm.apn), I(gsm.username), or I(gsm.password) will default to values that match the network - the modem will register to in the Mobile Broadband Provider database. - type: bool - default: false - device-id: - description: - - The device unique identifier (as given by the C(WWAN) management service) which this connection applies to. - - If given, the connection will only apply to the specified device. - type: str - home-only: - description: - - When C(true), only connections to the home network will be allowed. - - Connections to roaming networks will not be made. - type: bool - default: false - mtu: - description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames. - type: int - default: 0 - network-id: - description: - - The Network ID (GSM LAI format, ie MCC-MNC) to force specific network registration. - - If the Network ID is specified, NetworkManager will attempt to force the device to register only on the specified network. - - This can be used to ensure that the device does not roam when direct roaming control of the device is not otherwise possible. - type: str - number: - description: Legacy setting that used to help establishing PPP data sessions for GSM-based modems. - type: str - password: - description: - - The password used to authenticate with the network, if required. - - Many providers do not require a password, or accept any password. - - But if a password is required, it is specified here. - type: str - password-flags: - description: - - NMSettingSecretFlags indicating how to handle the I(password) property. - - 'Following choices are allowed: - C(0) B(NONE): The system is responsible for providing and storing this secret (default), - C(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be - asked to retrieve it - C(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed - C(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required - (some VPNs and PPP providers do not require all secrets) this flag indicates that the specific secret is not required.' - type: int - choices: [ 0, 1, 2 , 4 ] - default: 0 - pin: - description: - - If the SIM is locked with a PIN it must be unlocked before any other operations are requested. - - Specify the PIN here to allow operation of the device. - type: str - pin-flags: - description: - - NMSettingSecretFlags indicating how to handle the I(gsm.pin) property. - - See I(gsm.password-flags) for NMSettingSecretFlags choices. - type: int - choices: [ 0, 1, 2 , 4 ] - default: 0 - sim-id: - description: - - The SIM card unique identifier (as given by the C(WWAN) management service) which this connection applies to. - - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) which contains a SIM card matching - the given identifier.' - type: str - sim-operator-id: - description: - - A MCC/MNC string like C(310260) or C(21601I) identifying the specific mobile network operator which this connection applies to. - - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) and I(gsm.sim-id) which contains a SIM card - provisioned by the given operator.' - type: str - username: - description: - - The username used to authenticate with the network, if required. - - Many providers do not require a username, or accept any username. - - But if a username is required, it is specified here. - wireguard: - description: - - The configuration of the Wireguard connection. - - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. - - 'An up-to-date list of supported attributes can be found here: - U(https://networkmanager.dev/docs/api/latest/settings-wireguard.html).' - - 'For instance to configure a listen port: - C({listen-port: 12345}).' - type: dict - version_added: 4.3.0 - suboptions: - fwmark: - description: - - The 32-bit fwmark for outgoing packets. - - The use of fwmark is optional and is by default off. Setting it to 0 disables it. - - Note that I(wireguard.ip4-auto-default-route) or I(wireguard.ip6-auto-default-route) enabled, implies to automatically choose a fwmark. - type: int - ip4-auto-default-route: - description: - - Whether to enable special handling of the IPv4 default route. - - If enabled, the IPv4 default route from I(wireguard.peer-routes) will be placed to a dedicated routing-table and two policy - routing rules will be added. - - The fwmark number is also used as routing-table for the default-route, and if fwmark is zero, an unused fwmark/table is chosen - automatically. This corresponds to what wg-quick does with Table=auto and what WireGuard calls "Improved Rule-based Routing" - type: bool - ip6-auto-default-route: - description: - - Like I(wireguard.ip4-auto-default-route), but for the IPv6 default route. - type: bool - listen-port: - description: The WireGuard connection listen-port. If not specified, the port will be chosen randomly when the - interface comes up. - type: int - mtu: - description: - - If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple fragments. - - If zero a default MTU is used. Note that contrary to wg-quick's MTU setting, this does not take into account the current routes - at the time of activation. - type: int - peer-routes: - description: - - Whether to automatically add routes for the AllowedIPs ranges of the peers. - - If C(true) (the default), NetworkManager will automatically add routes in the routing tables according to C(ipv4.route-table) and - C(ipv6.route-table). Usually you want this automatism enabled. - - If C(false), no such routes are added automatically. In this case, the user may want to configure static routes in C(ipv4.routes) - and C(ipv6.routes), respectively. - - Note that if the peer's AllowedIPs is C(0.0.0.0/0) or C(::/0) and the profile's C(ipv4.never-default) or C(ipv6.never-default) - setting is enabled, the peer route for this peer won't be added automatically. - type: bool - private-key: - description: The 256 bit private-key in base64 encoding. - type: str - private-key-flags: - description: C(NMSettingSecretFlags) indicating how to handle the I(wireguard.private-key) property. - type: int - choices: [ 0, 1, 2 ] -''' - -EXAMPLES = r''' -# These examples are using the following inventory: -# -# ## Directory layout: -# -# |_/inventory/cloud-hosts -# | /group_vars/openstack-stage.yml -# | /host_vars/controller-01.openstack.host.com -# | /host_vars/controller-02.openstack.host.com -# |_/playbook/library/nmcli.py -# | /playbook-add.yml -# | /playbook-del.yml -# ``` -# -# ## inventory examples -# ### groups_vars -# ```yml -# --- -# #devops_os_define_network -# storage_gw: "192.0.2.254" -# external_gw: "198.51.100.254" -# tenant_gw: "203.0.113.254" -# -# #Team vars -# nmcli_team: -# - conn_name: tenant -# ip4: '{{ tenant_ip }}' -# gw4: '{{ tenant_gw }}' -# - conn_name: external -# ip4: '{{ external_ip }}' -# gw4: '{{ external_gw }}' -# - conn_name: storage -# ip4: '{{ storage_ip }}' -# gw4: '{{ storage_gw }}' -# nmcli_team_slave: -# - conn_name: em1 -# ifname: em1 -# master: tenant -# - conn_name: em2 -# ifname: em2 -# master: tenant -# - conn_name: p2p1 -# ifname: p2p1 -# master: storage -# - conn_name: p2p2 -# ifname: p2p2 -# master: external -# -# #bond vars -# nmcli_bond: -# - conn_name: tenant -# ip4: '{{ tenant_ip }}' -# gw4: '' -# mode: balance-rr -# - conn_name: external -# ip4: '{{ external_ip }}' -# gw4: '' -# mode: balance-rr -# - conn_name: storage -# ip4: '{{ storage_ip }}' -# gw4: '{{ storage_gw }}' -# mode: balance-rr -# nmcli_bond_slave: -# - conn_name: em1 -# ifname: em1 -# master: tenant -# - conn_name: em2 -# ifname: em2 -# master: tenant -# - conn_name: p2p1 -# ifname: p2p1 -# master: storage -# - conn_name: p2p2 -# ifname: p2p2 -# master: external -# -# #ethernet vars -# nmcli_ethernet: -# - conn_name: em1 -# ifname: em1 -# ip4: -# - '{{ tenant_ip }}' -# - '{{ second_tenant_ip }}' -# gw4: '{{ tenant_gw }}' -# - conn_name: em2 -# ifname: em2 -# ip4: '{{ tenant_ip1 }}' -# gw4: '{{ tenant_gw }}' -# - conn_name: p2p1 -# ifname: p2p1 -# ip4: '{{ storage_ip }}' -# gw4: '{{ storage_gw }}' -# - conn_name: p2p2 -# ifname: p2p2 -# ip4: '{{ external_ip }}' -# gw4: '{{ external_gw }}' -# ``` -# -# ### host_vars -# ```yml -# --- -# storage_ip: "192.0.2.91/23" -# external_ip: "198.51.100.23/21" -# tenant_ip: "203.0.113.77/23" -# second_tenant_ip: "204.0.113.77/23" -# ``` - - - -## playbook-add.yml example - ---- -- hosts: openstack-stage - remote_user: root - tasks: - - - name: Install needed network manager libs - ansible.builtin.package: - name: - - NetworkManager-libnm - - nm-connection-editor - - libsemanage-python - - policycoreutils-python - state: present - -##### Working with all cloud nodes - Teaming - - name: Try nmcli add team - conn_name only & ip4 gw4 - community.general.nmcli: - type: team - conn_name: '{{ item.conn_name }}' - ip4: '{{ item.ip4 }}' - gw4: '{{ item.gw4 }}' - state: present - with_items: - - '{{ nmcli_team }}' - - - name: Try nmcli add teams-slave - community.general.nmcli: - type: team-slave - conn_name: '{{ item.conn_name }}' - ifname: '{{ item.ifname }}' - master: '{{ item.master }}' - state: present - with_items: - - '{{ nmcli_team_slave }}' - -###### Working with all cloud nodes - Bonding - - name: Try nmcli add bond - conn_name only & ip4 gw4 mode - community.general.nmcli: - type: bond - conn_name: '{{ item.conn_name }}' - ip4: '{{ item.ip4 }}' - gw4: '{{ item.gw4 }}' - mode: '{{ item.mode }}' - state: present - with_items: - - '{{ nmcli_bond }}' - - - name: Try nmcli add bond-slave - community.general.nmcli: - type: bond-slave - conn_name: '{{ item.conn_name }}' - ifname: '{{ item.ifname }}' - master: '{{ item.master }}' - state: present - with_items: - - '{{ nmcli_bond_slave }}' - -##### Working with all cloud nodes - Ethernet - - name: Try nmcli add Ethernet - conn_name only & ip4 gw4 - community.general.nmcli: - type: ethernet - conn_name: '{{ item.conn_name }}' - ip4: '{{ item.ip4 }}' - gw4: '{{ item.gw4 }}' - state: present - with_items: - - '{{ nmcli_ethernet }}' - -## playbook-del.yml example -- hosts: openstack-stage - remote_user: root - tasks: - - - name: Try nmcli del team - multiple - community.general.nmcli: - conn_name: '{{ item.conn_name }}' - state: absent - with_items: - - conn_name: em1 - - conn_name: em2 - - conn_name: p1p1 - - conn_name: p1p2 - - conn_name: p2p1 - - conn_name: p2p2 - - conn_name: tenant - - conn_name: storage - - conn_name: external - - conn_name: team-em1 - - conn_name: team-em2 - - conn_name: team-p1p1 - - conn_name: team-p1p2 - - conn_name: team-p2p1 - - conn_name: team-p2p2 - - - name: Add an Ethernet connection with static IP configuration - community.general.nmcli: - conn_name: my-eth1 - ifname: eth1 - type: ethernet - ip4: 192.0.2.100/24 - gw4: 192.0.2.1 - state: present - - - name: Add an Team connection with static IP configuration - community.general.nmcli: - conn_name: my-team1 - ifname: my-team1 - type: team - ip4: 192.0.2.100/24 - gw4: 192.0.2.1 - state: present - autoconnect: yes - - - name: Optionally, at the same time specify IPv6 addresses for the device - community.general.nmcli: - conn_name: my-eth1 - ifname: eth1 - type: ethernet - ip4: 192.0.2.100/24 - gw4: 192.0.2.1 - ip6: 2001:db8::cafe - gw6: 2001:db8::1 - state: present - - - name: Add two IPv4 DNS server addresses - community.general.nmcli: - conn_name: my-eth1 - type: ethernet - dns4: - - 192.0.2.53 - - 198.51.100.53 - state: present - - - name: Make a profile usable for all compatible Ethernet interfaces - community.general.nmcli: - ctype: ethernet - name: my-eth1 - ifname: '*' - state: present - - - name: Change the property of a setting e.g. MTU - community.general.nmcli: - conn_name: my-eth1 - mtu: 9000 - type: ethernet - state: present - - - name: Add second ip4 address - community.general.nmcli: - conn_name: my-eth1 - ifname: eth1 - type: ethernet - ip4: - - 192.0.2.100/24 - - 192.0.3.100/24 - state: present - - - name: Add second ip6 address - community.general.nmcli: - conn_name: my-eth1 - ifname: eth1 - type: ethernet - ip6: - - 2001:db8::cafe - - 2002:db8::cafe - state: present - - - name: Add VxLan - community.general.nmcli: - type: vxlan - conn_name: vxlan_test1 - vxlan_id: 16 - vxlan_local: 192.168.1.2 - vxlan_remote: 192.168.1.5 - - - name: Add gre - community.general.nmcli: - type: gre - conn_name: gre_test1 - ip_tunnel_dev: eth0 - ip_tunnel_local: 192.168.1.2 - ip_tunnel_remote: 192.168.1.5 - - - name: Add ipip - community.general.nmcli: - type: ipip - conn_name: ipip_test1 - ip_tunnel_dev: eth0 - ip_tunnel_local: 192.168.1.2 - ip_tunnel_remote: 192.168.1.5 - - - name: Add sit - community.general.nmcli: - type: sit - conn_name: sit_test1 - ip_tunnel_dev: eth0 - ip_tunnel_local: 192.168.1.2 - ip_tunnel_remote: 192.168.1.5 - - - name: Add zone - community.general.nmcli: - type: ethernet - conn_name: my-eth1 - zone: external - state: present - -# nmcli exits with status 0 if it succeeds and exits with a status greater -# than zero when there is a failure. The following list of status codes may be -# returned: -# -# - 0 Success - indicates the operation succeeded -# - 1 Unknown or unspecified error -# - 2 Invalid user input, wrong nmcli invocation -# - 3 Timeout expired (see --wait option) -# - 4 Connection activation failed -# - 5 Connection deactivation failed -# - 6 Disconnecting device failed -# - 7 Connection deletion failed -# - 8 NetworkManager is not running -# - 9 nmcli and NetworkManager versions mismatch -# - 10 Connection, device, or access point does not exist. - -- name: Create the wifi connection - community.general.nmcli: - type: wifi - conn_name: Brittany - ifname: wlp4s0 - ssid: Brittany - wifi_sec: - key-mgmt: wpa-psk - psk: my_password - autoconnect: true - state: present - -- name: Create a hidden AP mode wifi connection - community.general.nmcli: - type: wifi - conn_name: ChocoMaster - ifname: wlo1 - ssid: ChocoMaster - wifi: - hidden: true - mode: ap - autoconnect: true - state: present - -- name: Create a gsm connection - community.general.nmcli: - type: gsm - conn_name: my-gsm-provider - ifname: cdc-wdm0 - gsm: - apn: my.provider.apn - username: my-provider-username - password: my-provider-password - pin: my-sim-pin - autoconnect: true - state: present - -- name: Create a wireguard connection - community.general.nmcli: - type: wireguard - conn_name: my-wg-provider - ifname: mywg0 - wireguard: - listen-port: 51820 - private-key: my-private-key - autoconnect: true - state: present - -''' - -RETURN = r"""# -""" - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_text -import re - - -class NmcliModuleError(Exception): - pass - - -class Nmcli(object): - """ - This is the generic nmcli manipulation class that is subclassed based on platform. - A subclass may wish to override the following action methods:- - - create_connection() - - delete_connection() - - edit_connection() - - modify_connection() - - show_connection() - - up_connection() - - down_connection() - All subclasses MUST define platform and distribution (which may be None). - """ - - platform = 'Generic' - distribution = None - - SECRET_OPTIONS = ( - '802-11-wireless-security.leap-password', - '802-11-wireless-security.psk', - '802-11-wireless-security.wep-key0', - '802-11-wireless-security.wep-key1', - '802-11-wireless-security.wep-key2', - '802-11-wireless-security.wep-key3' - ) - - def __init__(self, module): - self.module = module - self.state = module.params['state'] - self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions'] - self.autoconnect = module.params['autoconnect'] - self.conn_name = module.params['conn_name'] - self.master = module.params['master'] - self.ifname = module.params['ifname'] - self.type = module.params['type'] - self.ip4 = module.params['ip4'] - self.gw4 = module.params['gw4'] - self.gw4_ignore_auto = module.params['gw4_ignore_auto'] - self.routes4 = module.params['routes4'] - self.routes4_extended = module.params['routes4_extended'] - self.route_metric4 = module.params['route_metric4'] - self.routing_rules4 = module.params['routing_rules4'] - self.never_default4 = module.params['never_default4'] - self.dns4 = module.params['dns4'] - self.dns4_search = module.params['dns4_search'] - self.dns4_ignore_auto = module.params['dns4_ignore_auto'] - self.method4 = module.params['method4'] - self.may_fail4 = module.params['may_fail4'] - self.ip6 = module.params['ip6'] - self.gw6 = module.params['gw6'] - self.gw6_ignore_auto = module.params['gw6_ignore_auto'] - self.routes6 = module.params['routes6'] - self.routes6_extended = module.params['routes6_extended'] - self.route_metric6 = module.params['route_metric6'] - self.dns6 = module.params['dns6'] - self.dns6_search = module.params['dns6_search'] - self.dns6_ignore_auto = module.params['dns6_ignore_auto'] - self.method6 = module.params['method6'] - self.ip_privacy6 = module.params['ip_privacy6'] - self.addr_gen_mode6 = module.params['addr_gen_mode6'] - self.mtu = module.params['mtu'] - self.stp = module.params['stp'] - self.priority = module.params['priority'] - self.mode = module.params['mode'] - self.miimon = module.params['miimon'] - self.primary = module.params['primary'] - self.downdelay = module.params['downdelay'] - self.updelay = module.params['updelay'] - self.arp_interval = module.params['arp_interval'] - self.arp_ip_target = module.params['arp_ip_target'] - self.slavepriority = module.params['slavepriority'] - self.forwarddelay = module.params['forwarddelay'] - self.hellotime = module.params['hellotime'] - self.maxage = module.params['maxage'] - self.ageingtime = module.params['ageingtime'] - # hairpin should be back to normal in 7.0.0 - self._hairpin = module.params['hairpin'] - self.path_cost = module.params['path_cost'] - self.mac = module.params['mac'] - self.runner = module.params['runner'] - self.runner_hwaddr_policy = module.params['runner_hwaddr_policy'] - self.vlanid = module.params['vlanid'] - self.vlandev = module.params['vlandev'] - self.flags = module.params['flags'] - self.ingress = module.params['ingress'] - self.egress = module.params['egress'] - self.vxlan_id = module.params['vxlan_id'] - self.vxlan_local = module.params['vxlan_local'] - self.vxlan_remote = module.params['vxlan_remote'] - self.ip_tunnel_dev = module.params['ip_tunnel_dev'] - self.ip_tunnel_local = module.params['ip_tunnel_local'] - self.ip_tunnel_remote = module.params['ip_tunnel_remote'] - self.ip_tunnel_input_key = module.params['ip_tunnel_input_key'] - self.ip_tunnel_output_key = module.params['ip_tunnel_output_key'] - self.nmcli_bin = self.module.get_bin_path('nmcli', True) - self.dhcp_client_id = module.params['dhcp_client_id'] - self.zone = module.params['zone'] - self.ssid = module.params['ssid'] - self.wifi = module.params['wifi'] - self.wifi_sec = module.params['wifi_sec'] - self.gsm = module.params['gsm'] - self.wireguard = module.params['wireguard'] - - if self.method4: - self.ipv4_method = self.method4 - elif self.type in ('dummy', 'wireguard') and not self.ip4: - self.ipv4_method = 'disabled' - elif self.ip4: - self.ipv4_method = 'manual' - else: - self.ipv4_method = None - - if self.method6: - self.ipv6_method = self.method6 - elif self.type in ('dummy', 'wireguard') and not self.ip6: - self.ipv6_method = 'disabled' - elif self.ip6: - self.ipv6_method = 'manual' - else: - self.ipv6_method = None - - self.edit_commands = [] - - @property - def hairpin(self): - if self._hairpin is None: - self.module.deprecate( - "Parameter 'hairpin' default value will change from true to false in community.general 7.0.0. " - "Set the value explicitly to supress this warning.", - version='7.0.0', collection_name='community.general', - ) - # Should be False in 7.0.0 but then that should be in argument_specs - self._hairpin = True - return self._hairpin - - def execute_command(self, cmd, use_unsafe_shell=False, data=None): - if isinstance(cmd, list): - cmd = [to_text(item) for item in cmd] - else: - cmd = to_text(cmd) - return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) - - def execute_edit_commands(self, commands, arguments): - arguments = arguments or [] - cmd = [self.nmcli_bin, 'con', 'edit'] + arguments - data = "\n".join(commands) - return self.execute_command(cmd, data=data) - - def connection_options(self, detect_change=False): - # Options common to multiple connection types. - options = { - 'connection.autoconnect': self.autoconnect, - 'connection.zone': self.zone, - } - - # IP address options. - if self.ip_conn_type and not self.master: - options.update({ - 'ipv4.addresses': self.enforce_ipv4_cidr_notation(self.ip4), - 'ipv4.dhcp-client-id': self.dhcp_client_id, - 'ipv4.dns': self.dns4, - 'ipv4.dns-search': self.dns4_search, - 'ipv4.ignore-auto-dns': self.dns4_ignore_auto, - 'ipv4.gateway': self.gw4, - 'ipv4.ignore-auto-routes': self.gw4_ignore_auto, - 'ipv4.routes': self.enforce_routes_format(self.routes4, self.routes4_extended), - 'ipv4.route-metric': self.route_metric4, - 'ipv4.routing-rules': self.routing_rules4, - 'ipv4.never-default': self.never_default4, - 'ipv4.method': self.ipv4_method, - 'ipv4.may-fail': self.may_fail4, - 'ipv6.addresses': self.enforce_ipv6_cidr_notation(self.ip6), - 'ipv6.dns': self.dns6, - 'ipv6.dns-search': self.dns6_search, - 'ipv6.ignore-auto-dns': self.dns6_ignore_auto, - 'ipv6.gateway': self.gw6, - 'ipv6.ignore-auto-routes': self.gw6_ignore_auto, - 'ipv6.routes': self.enforce_routes_format(self.routes6, self.routes6_extended), - 'ipv6.route-metric': self.route_metric6, - 'ipv6.method': self.ipv6_method, - 'ipv6.ip6-privacy': self.ip_privacy6, - 'ipv6.addr-gen-mode': self.addr_gen_mode6 - }) - - # Layer 2 options. - if self.mac: - options.update({self.mac_setting: self.mac}) - - if self.mtu_conn_type: - options.update({self.mtu_setting: self.mtu}) - - # Connections that can have a master. - if self.slave_conn_type: - options.update({ - 'connection.master': self.master, - }) - - # Options specific to a connection type. - if self.type == 'bond': - options.update({ - 'arp-interval': self.arp_interval, - 'arp-ip-target': self.arp_ip_target, - 'downdelay': self.downdelay, - 'miimon': self.miimon, - 'mode': self.mode, - 'primary': self.primary, - 'updelay': self.updelay, - }) - elif self.type == 'bond-slave': - options.update({ - 'connection.slave-type': 'bond', - }) - elif self.type == 'bridge': - options.update({ - 'bridge.ageing-time': self.ageingtime, - 'bridge.forward-delay': self.forwarddelay, - 'bridge.hello-time': self.hellotime, - 'bridge.max-age': self.maxage, - 'bridge.priority': self.priority, - 'bridge.stp': self.stp, - }) - elif self.type == 'team': - options.update({ - 'team.runner': self.runner, - 'team.runner-hwaddr-policy': self.runner_hwaddr_policy, - }) - elif self.type == 'bridge-slave': - options.update({ - 'connection.slave-type': 'bridge', - 'bridge-port.path-cost': self.path_cost, - 'bridge-port.hairpin-mode': self.hairpin, - 'bridge-port.priority': self.slavepriority, - }) - elif self.type == 'team-slave': - options.update({ - 'connection.slave-type': 'team', - }) - elif self.tunnel_conn_type: - options.update({ - 'ip-tunnel.local': self.ip_tunnel_local, - 'ip-tunnel.mode': self.type, - 'ip-tunnel.parent': self.ip_tunnel_dev, - 'ip-tunnel.remote': self.ip_tunnel_remote, - }) - if self.type == 'gre': - options.update({ - 'ip-tunnel.input-key': self.ip_tunnel_input_key, - 'ip-tunnel.output-key': self.ip_tunnel_output_key - }) - elif self.type == 'vlan': - options.update({ - 'vlan.id': self.vlanid, - 'vlan.parent': self.vlandev, - 'vlan.flags': self.flags, - 'vlan.ingress': self.ingress, - 'vlan.egress': self.egress, - }) - elif self.type == 'vxlan': - options.update({ - 'vxlan.id': self.vxlan_id, - 'vxlan.local': self.vxlan_local, - 'vxlan.remote': self.vxlan_remote, - }) - elif self.type == 'wifi': - options.update({ - '802-11-wireless.ssid': self.ssid, - 'connection.slave-type': 'bond' if self.master else None, - }) - if self.wifi: - for name, value in self.wifi.items(): - options.update({ - '802-11-wireless.%s' % name: value - }) - if self.wifi_sec: - for name, value in self.wifi_sec.items(): - options.update({ - '802-11-wireless-security.%s' % name: value - }) - elif self.type == 'gsm': - if self.gsm: - for name, value in self.gsm.items(): - options.update({ - 'gsm.%s' % name: value, - }) - elif self.type == 'wireguard': - if self.wireguard: - for name, value in self.wireguard.items(): - options.update({ - 'wireguard.%s' % name: value, - }) - # Convert settings values based on the situation. - for setting, value in options.items(): - setting_type = self.settings_type(setting) - convert_func = None - if setting_type is bool: - # Convert all bool options to yes/no. - convert_func = self.bool_to_string - if detect_change: - if setting in ('vlan.id', 'vxlan.id'): - # Convert VLAN/VXLAN IDs to text when detecting changes. - convert_func = to_text - elif setting == self.mtu_setting: - # MTU is 'auto' by default when detecting changes. - convert_func = self.mtu_to_string - elif setting == 'ipv6.ip6-privacy': - convert_func = self.ip6_privacy_to_num - elif setting_type is list: - # Convert lists to strings for nmcli create/modify commands. - convert_func = self.list_to_string - - if callable(convert_func): - options[setting] = convert_func(options[setting]) - - return options - - @property - def ip_conn_type(self): - return self.type in ( - 'bond', - 'bridge', - 'dummy', - 'ethernet', - '802-3-ethernet', - 'generic', - 'gre', - 'infiniband', - 'ipip', - 'sit', - 'team', - 'vlan', - 'wifi', - '802-11-wireless', - 'gsm', - 'wireguard', - ) - - @property - def mac_setting(self): - if self.type == 'bridge': - return 'bridge.mac-address' - else: - return '802-3-ethernet.cloned-mac-address' - - @property - def mtu_conn_type(self): - return self.type in ( - 'dummy', - 'ethernet', - 'team-slave', - ) - - @property - def mtu_setting(self): - return '802-3-ethernet.mtu' - - @staticmethod - def mtu_to_string(mtu): - if not mtu: - return 'auto' - else: - return to_text(mtu) - - @staticmethod - def ip6_privacy_to_num(privacy): - ip6_privacy_values = { - 'disabled': '0', - 'prefer-public-addr': '1 (enabled, prefer public IP)', - 'prefer-temp-addr': '2 (enabled, prefer temporary IP)', - 'unknown': '-1', - } - - if privacy is None: - return None - - if privacy not in ip6_privacy_values: - raise AssertionError('{privacy} is invalid ip_privacy6 option'.format(privacy=privacy)) - - return ip6_privacy_values[privacy] - - @property - def slave_conn_type(self): - return self.type in ( - 'bond-slave', - 'bridge-slave', - 'team-slave', - 'wifi', - ) - - @property - def tunnel_conn_type(self): - return self.type in ( - 'gre', - 'ipip', - 'sit', - ) - - @staticmethod - def enforce_ipv4_cidr_notation(ip4_addresses): - if ip4_addresses is None: - return None - return [address if '/' in address else address + '/32' for address in ip4_addresses] - - @staticmethod - def enforce_ipv6_cidr_notation(ip6_addresses): - if ip6_addresses is None: - return None - return [address if '/' in address else address + '/128' for address in ip6_addresses] - - def enforce_routes_format(self, routes, routes_extended): - if routes is not None: - return routes - elif routes_extended is not None: - return [self.route_to_string(route) for route in routes_extended] - else: - return None - - @staticmethod - def route_to_string(route): - result_str = '' - result_str += route['ip'] - if route.get('next_hop') is not None: - result_str += ' ' + route['next_hop'] - if route.get('metric') is not None: - result_str += ' ' + str(route['metric']) - - for attribute, value in sorted(route.items()): - if attribute not in ('ip', 'next_hop', 'metric') and value is not None: - result_str += ' {0}={1}'.format(attribute, str(value).lower()) - - return result_str - - @staticmethod - def bool_to_string(boolean): - if boolean: - return "yes" - else: - return "no" - - @staticmethod - def list_to_string(lst): - return ",".join(lst or [""]) - - @staticmethod - def settings_type(setting): - if setting in ('bridge.stp', - 'bridge-port.hairpin-mode', - 'connection.autoconnect', - 'ipv4.never-default', - 'ipv4.ignore-auto-dns', - 'ipv4.ignore-auto-routes', - 'ipv4.may-fail', - 'ipv6.ignore-auto-dns', - 'ipv6.ignore-auto-routes', - '802-11-wireless.hidden'): - return bool - elif setting in ('ipv4.addresses', - 'ipv6.addresses', - 'ipv4.dns', - 'ipv4.dns-search', - 'ipv4.routes', - 'ipv4.routing-rules', - 'ipv6.dns', - 'ipv6.dns-search', - 'ipv6.routes', - '802-11-wireless-security.group', - '802-11-wireless-security.leap-password-flags', - '802-11-wireless-security.pairwise', - '802-11-wireless-security.proto', - '802-11-wireless-security.psk-flags', - '802-11-wireless-security.wep-key-flags', - '802-11-wireless.mac-address-blacklist'): - return list - return str - - def get_route_params(self, raw_values): - routes_params = [] - for raw_value in raw_values: - route_params = {} - for parameter, value in re.findall(r'([\w-]*)\s?=\s?([^\s,}]*)', raw_value): - if parameter == 'nh': - route_params['next_hop'] = value - elif parameter == 'mt': - route_params['metric'] = value - else: - route_params[parameter] = value - routes_params.append(route_params) - return [self.route_to_string(route_params) for route_params in routes_params] - - def list_connection_info(self): - cmd = [self.nmcli_bin, '--fields', 'name', '--terse', 'con', 'show'] - (rc, out, err) = self.execute_command(cmd) - if rc != 0: - raise NmcliModuleError(err) - return out.splitlines() - - def connection_exists(self): - return self.conn_name in self.list_connection_info() - - def down_connection(self): - cmd = [self.nmcli_bin, 'con', 'down', self.conn_name] - return self.execute_command(cmd) - - def up_connection(self): - cmd = [self.nmcli_bin, 'con', 'up', self.conn_name] - return self.execute_command(cmd) - - def connection_update(self, nmcli_command): - if nmcli_command == 'create': - cmd = [self.nmcli_bin, 'con', 'add', 'type'] - if self.tunnel_conn_type: - cmd.append('ip-tunnel') - else: - cmd.append(self.type) - cmd.append('con-name') - elif nmcli_command == 'modify': - cmd = [self.nmcli_bin, 'con', 'modify'] - else: - self.module.fail_json(msg="Invalid nmcli command.") - cmd.append(self.conn_name) - - # Use connection name as default for interface name on creation. - if nmcli_command == 'create' and self.ifname is None: - ifname = self.conn_name - else: - ifname = self.ifname - - options = { - 'connection.interface-name': ifname, - } - - options.update(self.connection_options()) - - # Constructing the command. - for key, value in options.items(): - if value is not None: - if key in self.SECRET_OPTIONS: - self.edit_commands += ['set %s %s' % (key, value)] - continue - cmd.extend([key, value]) - - return self.execute_command(cmd) - - def create_connection(self): - status = self.connection_update('create') - if status[0] == 0 and self.edit_commands: - status = self.edit_connection() - if self.create_connection_up: - status = self.up_connection() - return status - - @property - def create_connection_up(self): - if self.type in ('bond', 'dummy', 'ethernet', 'infiniband', 'wifi'): - if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): - return True - elif self.type == 'team': - if (self.dns4 is not None) or (self.dns6 is not None): - return True - return False - - def remove_connection(self): - # self.down_connection() - cmd = [self.nmcli_bin, 'con', 'del', self.conn_name] - return self.execute_command(cmd) - - def modify_connection(self): - status = self.connection_update('modify') - if status[0] == 0 and self.edit_commands: - status = self.edit_connection() - return status - - def edit_connection(self): - commands = self.edit_commands + ['save', 'quit'] - return self.execute_edit_commands(commands, arguments=[self.conn_name]) - - def show_connection(self): - cmd = [self.nmcli_bin, '--show-secrets', 'con', 'show', self.conn_name] - - (rc, out, err) = self.execute_command(cmd) - - if rc != 0: - raise NmcliModuleError(err) - - p_enum_value = re.compile(r'^([-]?\d+) \((\w+)\)$') - - conn_info = dict() - for line in out.splitlines(): - pair = line.split(':', 1) - key = pair[0].strip() - key_type = self.settings_type(key) - if key and len(pair) > 1: - raw_value = pair[1].lstrip() - if raw_value == '--': - conn_info[key] = None - elif key == 'bond.options': - # Aliases such as 'miimon', 'downdelay' are equivalent to the +bond.options 'option=value' syntax. - opts = raw_value.split(',') - for opt in opts: - alias_pair = opt.split('=', 1) - if len(alias_pair) > 1: - alias_key = alias_pair[0] - alias_value = alias_pair[1] - conn_info[alias_key] = alias_value - elif key in ('ipv4.routes', 'ipv6.routes'): - conn_info[key] = [s.strip() for s in raw_value.split(';')] - elif key_type == list: - conn_info[key] = [s.strip() for s in raw_value.split(',')] - else: - m_enum = p_enum_value.match(raw_value) - if m_enum is not None: - value = m_enum.group(1) - else: - value = raw_value - conn_info[key] = value - - return conn_info - - def get_supported_properties(self, setting): - properties = [] - - if setting == '802-11-wireless-security': - set_property = 'psk' - set_value = 'FAKEVALUE' - commands = ['set %s.%s %s' % (setting, set_property, set_value)] - else: - commands = [] - - commands += ['print %s' % setting, 'quit', 'yes'] - - (rc, out, err) = self.execute_edit_commands(commands, arguments=['type', self.type]) - - if rc != 0: - raise NmcliModuleError(err) - - for line in out.splitlines(): - prefix = '%s.' % setting - if (line.startswith(prefix)): - pair = line.split(':', 1) - property = pair[0].strip().replace(prefix, '') - properties.append(property) - - return properties - - def check_for_unsupported_properties(self, setting): - if setting == '802-11-wireless': - setting_key = 'wifi' - elif setting == '802-11-wireless-security': - setting_key = 'wifi_sec' - else: - setting_key = setting - - supported_properties = self.get_supported_properties(setting) - unsupported_properties = [] - - for property, value in getattr(self, setting_key).items(): - if property not in supported_properties: - unsupported_properties.append(property) - - if unsupported_properties: - msg_options = [] - for property in unsupported_properties: - msg_options.append('%s.%s' % (setting_key, property)) - - msg = 'Invalid or unsupported option(s): "%s"' % '", "'.join(msg_options) - if self.ignore_unsupported_suboptions: - self.module.warn(msg) - else: - self.module.fail_json(msg=msg) - - return unsupported_properties - - def _compare_conn_params(self, conn_info, options): - changed = False - diff_before = dict() - diff_after = dict() - - for key, value in options.items(): - if not value: - continue - - if key in conn_info: - current_value = conn_info[key] - if key in ('ipv4.routes', 'ipv6.routes') and current_value is not None: - current_value = self.get_route_params(current_value) - if key == self.mac_setting: - # MAC addresses are case insensitive, nmcli always reports them in uppercase - value = value.upper() - # ensure current_value is also converted to uppercase in case nmcli changes behaviour - current_value = current_value.upper() - if key == 'gsm.apn': - # Depending on version nmcli adds double-qoutes to gsm.apn - # Need to strip them in order to compare both - current_value = current_value.strip('"') - if key == self.mtu_setting and self.mtu is None: - self.mtu = 0 - else: - # parameter does not exist - current_value = None - - if isinstance(current_value, list) and isinstance(value, list): - # compare values between two lists - if sorted(current_value) != sorted(value): - changed = True - elif all([key == self.mtu_setting, self.type == 'dummy', current_value is None, value == 'auto', self.mtu is None]): - value = None - else: - if current_value != to_text(value): - changed = True - - diff_before[key] = current_value - diff_after[key] = value - - diff = { - 'before': diff_before, - 'after': diff_after, - } - return (changed, diff) - - def is_connection_changed(self): - options = { - 'connection.interface-name': self.ifname, - } - - if not self.type: - current_con_type = self.show_connection().get('connection.type') - if current_con_type: - self.type = current_con_type - - options.update(self.connection_options(detect_change=True)) - return self._compare_conn_params(self.show_connection(), options) - - -def main(): - # Parsing argument file - module = AnsibleModule( - argument_spec=dict( - ignore_unsupported_suboptions=dict(type='bool', default=False), - autoconnect=dict(type='bool', default=True), - state=dict(type='str', required=True, choices=['absent', 'present']), - conn_name=dict(type='str', required=True), - master=dict(type='str'), - ifname=dict(type='str'), - type=dict(type='str', - choices=[ - 'bond', - 'bond-slave', - 'bridge', - 'bridge-slave', - 'dummy', - 'ethernet', - 'generic', - 'gre', - 'infiniband', - 'ipip', - 'sit', - 'team', - 'team-slave', - 'vlan', - 'vxlan', - 'wifi', - 'gsm', - 'wireguard', - ]), - ip4=dict(type='list', elements='str'), - gw4=dict(type='str'), - gw4_ignore_auto=dict(type='bool', default=False), - routes4=dict(type='list', elements='str'), - routes4_extended=dict(type='list', - elements='dict', - options=dict( - ip=dict(type='str', required=True), - next_hop=dict(type='str'), - metric=dict(type='int'), - table=dict(type='int'), - tos=dict(type='int'), - cwnd=dict(type='int'), - mtu=dict(type='int'), - onlink=dict(type='bool') - )), - route_metric4=dict(type='int'), - routing_rules4=dict(type='list', elements='str'), - never_default4=dict(type='bool', default=False), - dns4=dict(type='list', elements='str'), - dns4_search=dict(type='list', elements='str'), - dns4_ignore_auto=dict(type='bool', default=False), - method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']), - may_fail4=dict(type='bool', default=True), - dhcp_client_id=dict(type='str'), - ip6=dict(type='list', elements='str'), - gw6=dict(type='str'), - gw6_ignore_auto=dict(type='bool', default=False), - dns6=dict(type='list', elements='str'), - dns6_search=dict(type='list', elements='str'), - dns6_ignore_auto=dict(type='bool', default=False), - routes6=dict(type='list', elements='str'), - routes6_extended=dict(type='list', - elements='dict', - options=dict( - ip=dict(type='str', required=True), - next_hop=dict(type='str'), - metric=dict(type='int'), - table=dict(type='int'), - cwnd=dict(type='int'), - mtu=dict(type='int'), - onlink=dict(type='bool') - )), - route_metric6=dict(type='int'), - method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared', 'disabled']), - ip_privacy6=dict(type='str', choices=['disabled', 'prefer-public-addr', 'prefer-temp-addr', 'unknown']), - addr_gen_mode6=dict(type='str', choices=['eui64', 'stable-privacy']), - # Bond Specific vars - mode=dict(type='str', default='balance-rr', - choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']), - miimon=dict(type='int'), - downdelay=dict(type='int'), - updelay=dict(type='int'), - arp_interval=dict(type='int'), - arp_ip_target=dict(type='str'), - primary=dict(type='str'), - # general usage - mtu=dict(type='int'), - mac=dict(type='str'), - zone=dict(type='str'), - # bridge specific vars - stp=dict(type='bool', default=True), - priority=dict(type='int', default=128), - slavepriority=dict(type='int', default=32), - forwarddelay=dict(type='int', default=15), - hellotime=dict(type='int', default=2), - maxage=dict(type='int', default=20), - ageingtime=dict(type='int', default=300), - hairpin=dict(type='bool'), - path_cost=dict(type='int', default=100), - # team specific vars - runner=dict(type='str', default='roundrobin', - choices=['broadcast', 'roundrobin', 'activebackup', 'loadbalance', 'lacp']), - # team active-backup runner specific options - runner_hwaddr_policy=dict(type='str', choices=['same_all', 'by_active', 'only_active']), - # vlan specific vars - vlanid=dict(type='int'), - vlandev=dict(type='str'), - flags=dict(type='str'), - ingress=dict(type='str'), - egress=dict(type='str'), - # vxlan specific vars - vxlan_id=dict(type='int'), - vxlan_local=dict(type='str'), - vxlan_remote=dict(type='str'), - # ip-tunnel specific vars - ip_tunnel_dev=dict(type='str'), - ip_tunnel_local=dict(type='str'), - ip_tunnel_remote=dict(type='str'), - # ip-tunnel type gre specific vars - ip_tunnel_input_key=dict(type='str', no_log=True), - ip_tunnel_output_key=dict(type='str', no_log=True), - # 802-11-wireless* specific vars - ssid=dict(type='str'), - wifi=dict(type='dict'), - wifi_sec=dict(type='dict', no_log=True), - gsm=dict(type='dict'), - wireguard=dict(type='dict'), - ), - mutually_exclusive=[['never_default4', 'gw4'], - ['routes4_extended', 'routes4'], - ['routes6_extended', 'routes6']], - required_if=[("type", "wifi", [("ssid")])], - supports_check_mode=True, - ) - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') - - nmcli = Nmcli(module) - - (rc, out, err) = (None, '', '') - result = {'conn_name': nmcli.conn_name, 'state': nmcli.state} - - # check for issues - if nmcli.conn_name is None: - nmcli.module.fail_json(msg="Please specify a name for the connection") - # team checks - if nmcli.type == "team": - if nmcli.runner_hwaddr_policy and not nmcli.runner == "activebackup": - nmcli.module.fail_json(msg="Runner-hwaddr-policy is only allowed for runner activebackup") - # team-slave checks - if nmcli.type == 'team-slave': - if nmcli.master is None: - nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type) - if nmcli.ifname is None: - nmcli.module.fail_json(msg="Please specify an interface name for the connection when type is %s" % nmcli.type) - if nmcli.type == 'wifi': - unsupported_properties = {} - if nmcli.wifi: - if 'ssid' in nmcli.wifi: - module.warn("Ignoring option 'wifi.ssid', it must be specified with option 'ssid'") - del nmcli.wifi['ssid'] - unsupported_properties['wifi'] = nmcli.check_for_unsupported_properties('802-11-wireless') - if nmcli.wifi_sec: - unsupported_properties['wifi_sec'] = nmcli.check_for_unsupported_properties('802-11-wireless-security') - if nmcli.ignore_unsupported_suboptions and unsupported_properties: - for setting_key, properties in unsupported_properties.items(): - for property in properties: - del getattr(nmcli, setting_key)[property] - - try: - if nmcli.state == 'absent': - if nmcli.connection_exists(): - if module.check_mode: - module.exit_json(changed=True) - (rc, out, err) = nmcli.down_connection() - (rc, out, err) = nmcli.remove_connection() - if rc != 0: - module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc) - - elif nmcli.state == 'present': - if nmcli.connection_exists(): - changed, diff = nmcli.is_connection_changed() - if module._diff: - result['diff'] = diff - - if changed: - # modify connection (note: this function is check mode aware) - # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type)) - result['Exists'] = 'Connections do exist so we are modifying them' - if module.check_mode: - module.exit_json(changed=True, **result) - (rc, out, err) = nmcli.modify_connection() - else: - result['Exists'] = 'Connections already exist and no changes made' - if module.check_mode: - module.exit_json(changed=False, **result) - if not nmcli.connection_exists(): - result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type)) - if module.check_mode: - module.exit_json(changed=True, **result) - (rc, out, err) = nmcli.create_connection() - if rc is not None and rc != 0: - module.fail_json(name=nmcli.conn_name, msg=err, rc=rc) - except NmcliModuleError as e: - module.fail_json(name=nmcli.conn_name, msg=str(e)) - - if rc is None: - result['changed'] = False - else: - result['changed'] = True - if out: - result['stdout'] = out - if err: - result['stderr'] = err - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/netcup_dns.py b/plugins/modules/netcup_dns.py similarity index 59% rename from plugins/modules/net_tools/netcup_dns.py rename to plugins/modules/netcup_dns.py index 5ec5cbb246..52ec6c1915 100644 --- a/plugins/modules/net_tools/netcup_dns.py +++ b/plugins/modules/netcup_dns.py @@ -1,83 +1,96 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2018 Nicolai Buchwitz -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018 Nicolai Buchwitz +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: netcup_dns notes: [] -short_description: manage Netcup DNS records +short_description: Manage Netcup DNS records description: - - "Manages DNS records via the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php)" + - Manages DNS records using the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: api_key: description: - - API key for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net)) - required: True + - API key for authentication, must be obtained using the netcup CCP (U(https://ccp.netcup.net)). + required: true type: str api_password: description: - - API password for authentication, must be obtained via the netcup CCP (https://ccp.netcup.net) - required: True + - API password for authentication, must be obtained using the netcup CCP (U(https://ccp.netcup.net)). + required: true type: str customer_id: description: - - Netcup customer id - required: True + - Netcup customer ID. + required: true type: int domain: description: - - Domainname the records should be added / removed - required: True + - Domainname the records should be added / removed. + required: true type: str record: description: - - Record to add or delete, supports wildcard (*). Default is C(@) (e.g. the zone name) + - Record to add or delete, supports wildcard (V(*)). Default is V(@) (that is, the zone name). default: "@" - aliases: [ name ] + aliases: [name] type: str type: description: - - Record type - choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS'] - required: True + - Record type. + - Support for V(OPENPGPKEY), V(SMIMEA) and V(SSHFP) was added in community.general 8.1.0. + - Record types V(OPENPGPKEY) and V(SMIMEA) require nc-dnsapi >= 0.1.5. + - Record type V(SSHFP) requires nc-dnsapi >= 0.1.6. + choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS', 'OPENPGPKEY', 'SMIMEA', 'SSHFP'] + required: true type: str value: description: - - Record value + - Record value. required: true type: str solo: type: bool - default: False + default: false description: - - Whether the record should be the only one for that record type and record name. Only use with C(state=present) - - This will delete all other records with the same record name and type. + - Whether the record should be the only one for that record type and record name. Only use with O(state=present). + - This deletes all other records with the same record name and type. priority: description: - - Record priority. Required for C(type=MX) - required: False + - Record priority. Required for O(type=MX). + required: false type: int state: description: - - Whether the record should exist or not - required: False + - Whether the record should exist or not. + required: false default: present - choices: [ 'present', 'absent' ] + choices: ['present', 'absent'] type: str + timeout: + description: + - HTTP(S) connection timeout in seconds. + default: 5 + type: int + version_added: 5.7.0 requirements: - "nc-dnsapi >= 0.1.3" author: "Nicolai Buchwitz (@nbuchwitz)" +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a record of type A community.general.netcup_dns: api_key: "..." @@ -128,40 +141,52 @@ EXAMPLES = ''' type: "AAAA" value: "::1" solo: true -''' -RETURN = ''' +- name: Increase the connection timeout to avoid problems with an unstable connection + community.general.netcup_dns: + api_key: "..." + api_password: "..." + customer_id: "..." + domain: "example.com" + name: "mail" + type: "A" + value: "127.0.0.1" + timeout: 30 +""" + +RETURN = r""" records: - description: list containing all records - returned: success - type: complex - contains: - name: - description: the record name - returned: success - type: str - sample: fancy-hostname - type: - description: the record type - returned: succcess - type: str - sample: A - value: - description: the record destination - returned: success - type: str - sample: 127.0.0.1 - priority: - description: the record priority (only relevant if type=MX) - returned: success - type: int - sample: 0 - id: - description: internal id of the record - returned: success - type: int - sample: 12345 -''' + description: List containing all records. + returned: success + type: list + elements: dict + contains: + name: + description: The record name. + returned: success + type: str + sample: fancy-hostname + type: + description: The record type. + returned: success + type: str + sample: A + value: + description: The record destination. + returned: success + type: str + sample: 127.0.0.1 + priority: + description: The record priority (only relevant if RV(records[].type=MX)). + returned: success + type: int + sample: 0 + id: + description: Internal ID of the record. + returned: success + type: int + sample: 12345 +""" import traceback @@ -186,12 +211,15 @@ def main(): customer_id=dict(required=True, type='int'), domain=dict(required=True), - record=dict(required=False, default='@', aliases=['name']), - type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']), + record=dict(default='@', aliases=['name']), + type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', + 'TLSA', 'NS', 'DS', 'OPENPGPKEY', 'SMIMEA', + 'SSHFP']), value=dict(required=True), - priority=dict(required=False, type='int'), - solo=dict(required=False, type='bool', default=False), - state=dict(required=False, choices=['present', 'absent'], default='present'), + priority=dict(type='int'), + solo=dict(type='bool', default=False), + state=dict(choices=['present', 'absent'], default='present'), + timeout=dict(type='int', default=5), ), supports_check_mode=True @@ -210,6 +238,7 @@ def main(): priority = module.params.get('priority') solo = module.params.get('solo') state = module.params.get('state') + timeout = module.params.get('timeout') if record_type == 'MX' and not priority: module.fail_json(msg="record type MX required the 'priority' argument") @@ -217,7 +246,7 @@ def main(): has_changed = False all_records = [] try: - with nc_dnsapi.Client(customer_id, api_key, api_password) as api: + with nc_dnsapi.Client(customer_id, api_key, api_password, timeout) as api: all_records = api.dns_records(domain) record = DNSRecord(record, record_type, value, priority=priority) diff --git a/plugins/modules/newrelic_deployment.py b/plugins/modules/newrelic_deployment.py new file mode 100644 index 0000000000..63495d9e7f --- /dev/null +++ b/plugins/modules/newrelic_deployment.py @@ -0,0 +1,185 @@ +#!/usr/bin/python + +# Copyright 2013 Matt Coddington +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: newrelic_deployment +author: "Matt Coddington (@mcodd)" +short_description: Notify New Relic about app deployments +description: + - Notify New Relic about app deployments (see U(https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/record-monitor-deployments/)). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + token: + type: str + description: + - API token to place in the Api-Key header. + required: true + app_name: + type: str + description: + - The value of C(app_name) in the C(newrelic.yml) file used by the application. + - One of O(app_name) or O(application_id) is required. + required: false + application_id: + type: str + description: + - The application ID found in the metadata of the application in APM. + - One of O(app_name) or O(application_id) is required. + required: false + changelog: + type: str + description: + - A list of changes for this deployment. + required: false + description: + type: str + description: + - Text annotation for the deployment - notes for you. + required: false + revision: + type: str + description: + - A revision number (for example, git commit SHA). + required: true + user: + type: str + description: + - The name of the user/process that triggered this deployment. + required: false + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + required: false + default: true + type: bool + app_name_exact_match: + type: bool + description: + - If this flag is set to V(true) then the application ID lookup by name would only work for an exact match. If set to + V(false) it returns the first result. + required: false + default: false + version_added: 7.5.0 +requirements: [] +""" + +EXAMPLES = r""" +- name: Notify New Relic about an app deployment + community.general.newrelic_deployment: + token: AAAAAA + app_name: myapp + user: ansible deployment + revision: '1.0' +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from urllib.parse import quote +import json + +# =========================================== +# Module execution. +# + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True, no_log=True), + app_name=dict(), + application_id=dict(), + changelog=dict(), + description=dict(), + revision=dict(required=True), + user=dict(), + validate_certs=dict(default=True, type='bool'), + app_name_exact_match=dict(type='bool', default=False), + ), + required_one_of=[['app_name', 'application_id']], + required_if=[('app_name_exact_match', True, ['app_name'])], + supports_check_mode=True + ) + + # build list of params + params = {} + if module.params["app_name"] and module.params["application_id"]: + module.fail_json(msg="only one of 'app_name' or 'application_id' can be set") + app_id = None + if module.params["app_name"]: + app_id = get_application_id(module) + elif module.params["application_id"]: + app_id = module.params["application_id"] + else: + module.fail_json(msg="you must set one of 'app_name' or 'application_id'") + + if app_id is None: + module.fail_json(msg="No application with name %s is found in NewRelic" % module.params["app_name"]) + + for item in ["changelog", "description", "revision", "user"]: + if module.params[item]: + params[item] = module.params[item] + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True) + + # Send the data to New Relic + url = "https://api.newrelic.com/v2/applications/%s/deployments.json" % quote(str(app_id), safe='') + data = { + 'deployment': params + } + headers = { + 'Api-Key': module.params["token"], + 'Content-Type': 'application/json', + } + response, info = fetch_url(module, url, data=module.jsonify(data), headers=headers, method="POST") + if info['status'] in (200, 201): + module.exit_json(changed=True) + else: + module.fail_json(msg="Unable to insert deployment marker: %s" % info['msg']) + + +def get_application_id(module): + url = "https://api.newrelic.com/v2/applications.json" + data = "filter[name]=%s" % module.params["app_name"] + application_id = None + headers = { + 'Api-Key': module.params["token"], + } + response, info = fetch_url(module, url, data=data, headers=headers) + if info['status'] not in (200, 201): + module.fail_json(msg="Unable to get application: %s" % info['msg']) + + result = json.loads(response.read()) + if result is None or len(result.get("applications", "")) == 0: + module.fail_json(msg='No application found with name "%s"' % module.params["app_name"]) + + if module.params["app_name_exact_match"]: + for item in result["applications"]: + if item["name"] == module.params["app_name"]: + application_id = item["id"] + break + if application_id is None: + module.fail_json(msg='No application found with exact name "%s"' % module.params["app_name"]) + else: + application_id = result["applications"][0]["id"] + + return application_id + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/notification/nexmo.py b/plugins/modules/nexmo.py similarity index 74% rename from plugins/modules/notification/nexmo.py rename to plugins/modules/nexmo.py index d239bb4456..ee65bdda57 100644 --- a/plugins/modules/notification/nexmo.py +++ b/plugins/modules/nexmo.py @@ -1,58 +1,62 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2014, Matt Martz -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Matt Martz +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: nexmo -short_description: Send a SMS via nexmo +short_description: Send a SMS using nexmo description: - - Send a SMS message via nexmo + - Send a SMS message using nexmo. author: "Matt Martz (@sivel)" +attributes: + check_mode: + support: none + diff_mode: + support: none options: api_key: type: str description: - - Nexmo API Key + - Nexmo API Key. required: true api_secret: type: str description: - - Nexmo API Secret + - Nexmo API Secret. required: true src: type: int description: - - Nexmo Number to send from + - Nexmo Number to send from. required: true dest: type: list elements: int description: - - Phone number(s) to send SMS message to + - Phone number(s) to send SMS message to. required: true msg: type: str description: - - Message to text to send. Messages longer than 160 characters will be - split into multiple messages + - Message text to send. Messages longer than 160 characters are split into multiple messages. required: true validate_certs: description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool - default: 'yes' + default: true extends_documentation_fragment: - - url -''' + - ansible.builtin.url + - community.general.attributes +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Send notification message via Nexmo community.general.nexmo: api_key: 640c8a53 @@ -65,8 +69,8 @@ EXAMPLES = """ delegate_to: localhost """ import json +from urllib.parse import urlencode -from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url, url_argument_spec diff --git a/plugins/modules/web_infrastructure/nginx_status_info.py b/plugins/modules/nginx_status_info.py similarity index 87% rename from plugins/modules/web_infrastructure/nginx_status_info.py rename to plugins/modules/nginx_status_info.py index ada6881714..31707e0688 100644 --- a/plugins/modules/web_infrastructure/nginx_status_info.py +++ b/plugins/modules/nginx_status_info.py @@ -1,20 +1,21 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# (c) 2016, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, René Moser +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: nginx_status_info -short_description: Retrieve information on nginx status. +short_description: Retrieve information on nginx status description: - Gathers information from nginx from an URL having C(stub_status) enabled. author: "René Moser (@resmo)" +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module options: url: type: str @@ -30,9 +31,9 @@ options: notes: - See U(http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) for more information. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Gather status info from nginx on localhost - name: Get current http stats community.general.nginx_status_info: @@ -45,10 +46,9 @@ EXAMPLES = r''' url: http://localhost/nginx_status timeout: 20 register: result -''' +""" -RETURN = r''' ---- +RETURN = r""" active_connections: description: Active connections. returned: success @@ -60,7 +60,8 @@ accepts: type: int sample: 81769947 handled: - description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached. + description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some + resource limits have been reached. returned: success type: int sample: 81769947 @@ -89,7 +90,7 @@ data: returned: success type: str sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n" -''' +""" import re from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/cloud/smartos/nictagadm.py b/plugins/modules/nictagadm.py similarity index 75% rename from plugins/modules/cloud/smartos/nictagadm.py rename to plugins/modules/nictagadm.py index 05aba6f188..bd4f646bcf 100644 --- a/plugins/modules/cloud/smartos/nictagadm.py +++ b/plugins/modules/nictagadm.py @@ -1,56 +1,61 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Bruce Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Bruce Smith +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: nictagadm short_description: Manage nic tags on SmartOS systems description: - Create or delete nic tags on SmartOS systems. author: -- Bruce Smith (@SmithX10) + - Bruce Smith (@SmithX10) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - - Name of the nic tag. + - Name of the nic tag. required: true type: str mac: description: - - Specifies the I(mac) address to attach the nic tag to when not creating an I(etherstub). - - Parameters I(mac) and I(etherstub) are mutually exclusive. + - Specifies the O(mac) address to attach the nic tag to when not creating an O(etherstub). + - Parameters O(mac) and O(etherstub) are mutually exclusive. type: str etherstub: description: - - Specifies that the nic tag will be attached to a created I(etherstub). - - Parameter I(etherstub) is mutually exclusive with both I(mtu), and I(mac). + - Specifies that the nic tag is attached to a created O(etherstub). + - Parameter O(etherstub) is mutually exclusive with both O(mtu), and O(mac). type: bool - default: no + default: false mtu: description: - - Specifies the size of the I(mtu) of the desired nic tag. - - Parameters I(mtu) and I(etherstub) are mutually exclusive. + - Specifies the size of the O(mtu) of the desired nic tag. + - Parameters O(mtu) and O(etherstub) are mutually exclusive. type: int force: description: - - When I(state) is absent set this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs. + - When O(state=absent) this switch uses the C(-f) parameter and delete the nic tag regardless of existing VMs. type: bool - default: no + default: false state: description: - - Create or delete a SmartOS nic tag. + - Create or delete a SmartOS nic tag. type: str - choices: [ absent, present ] + choices: [absent, present] default: present -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create 'storage0' on '00:1b:21:a3:f5:4d' community.general.nictagadm: name: storage0 @@ -62,11 +67,11 @@ EXAMPLES = r''' community.general.nictagadm: name: storage0 state: absent -''' +""" -RETURN = r''' +RETURN = r""" name: - description: nic tag name + description: Nic tag name. returned: always type: str sample: storage0 @@ -76,26 +81,26 @@ mac: type: str sample: 00:1b:21:a3:f5:4d etherstub: - description: specifies if the nic tag will create and attach to an etherstub. + description: Specifies if the nic tag was created and attached to an etherstub. returned: always type: bool - sample: False + sample: false mtu: - description: specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive. + description: Specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive. returned: always type: int sample: 1500 force: - description: Shows if -f was used during the deletion of a nic tag + description: Shows if C(-f) was used during the deletion of a nic tag. returned: always type: bool - sample: False + sample: false state: - description: state of the target + description: State of the target. returned: always type: str sample: present -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.network import is_mac diff --git a/plugins/modules/nmcli.py b/plugins/modules/nmcli.py new file mode 100644 index 0000000000..e6edbbf1d2 --- /dev/null +++ b/plugins/modules/nmcli.py @@ -0,0 +1,2874 @@ +#!/usr/bin/python + +# Copyright (c) 2015, Chris Long +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: nmcli +author: + - Chris Long (@alcamie101) +short_description: Manage Networking +requirements: + - nmcli +extends_documentation_fragment: + - community.general.attributes +description: + - Manage the network devices. Create, modify and manage various connection and device type, for example V(ethernet), V(team), + V(bond), V(vlan) and so on. + - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: C(NetworkManager).' + - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: C(NetworkManager-tui).' + - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: C(network-manager).' + - 'On openSUSE, the requirements can be met by installing the following packages: C(NetworkManager).' +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + state: + description: + - Whether the device should exist or not, taking action if the state is different from what is stated. + - Using O(state=present) creates connection set to be brought up automatically. + - Using O(state=up) and O(state=down) does not modify connection with other parameters. These states have been added + in community.general 9.5.0. + type: str + required: true + choices: [absent, present, up, down] + autoconnect: + description: + - Whether the connection should start on boot. + - Whether the connection profile can be automatically activated. + type: bool + default: true + autoconnect_priority: + description: + - The priority of the connection profile for autoconnect. If set, connection profiles with higher priority are preferred. + type: int + version_added: 11.0.0 + autoconnect_retries: + description: + - The number of times to retry autoconnecting. + type: int + version_added: 11.0.0 + conn_name: + description: + - The name used to call the connection. Pattern is V([-][-]). + type: str + required: true + conn_reload: + description: + - Whether the connection should be reloaded if it was modified. + type: bool + required: false + default: false + version_added: 9.5.0 + ifname: + description: + - The interface to bind the connection to. + - The connection is only applicable to this interface name. + - A special value of V(*) can be used for interface-independent connections. + - The O(ifname) argument is mandatory for all connection types except bond, team, bridge, vlan and vpn. + - This parameter defaults to O(conn_name) when left unset for all connection types except vpn that removes it. + type: str + type: + description: + - This is the type of device or network connection that you wish to create or modify. + - Type V(dummy) is added in community.general 3.5.0. + - Type V(gsm) is added in community.general 3.7.0. + - Type V(infiniband) is added in community.general 2.0.0. + - Type V(loopback) is added in community.general 8.1.0. + - Type V(macvlan) is added in community.general 6.6.0. + - Type V(ovs-bridge) is added in community.general 8.6.0. + - Type V(ovs-interface) is added in community.general 8.6.0. + - Type V(ovs-port) is added in community.general 8.6.0. + - Type V(wireguard) is added in community.general 4.3.0. + - Type V(vpn) is added in community.general 5.1.0. + - Type V(vrf) is added in community.general 10.4.0. + - Using V(bond-slave), V(bridge-slave), or V(team-slave) implies V(ethernet) connection type with corresponding O(slave_type) + option. + - If you want to control non-ethernet connection attached to V(bond), V(bridge), or V(team) consider using O(slave_type) + option. + type: str + choices: + - bond + - bond-slave + - bridge + - bridge-slave + - dummy + - ethernet + - generic + - gre + - infiniband + - ipip + - macvlan + - sit + - team + - team-slave + - vlan + - vxlan + - wifi + - gsm + - wireguard + - ovs-bridge + - ovs-port + - ovs-interface + - vpn + - vrf + - loopback + mode: + description: + - This is the type of device or network connection that you wish to create for a bond or bridge. + type: str + choices: [802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast] + default: balance-rr + transport_mode: + description: + - This option sets the connection type of Infiniband IPoIB devices. + type: str + choices: [datagram, connected] + version_added: 5.8.0 + infiniband_mac: + description: + - MAC address of the Infiniband IPoIB devices. + type: str + version_added: 10.6.0 + slave_type: + description: + - Type of the device of this slave's master connection (for example V(bond)). + - Type V(ovs-port) is added in community.general 8.6.0. + type: str + choices: ['bond', 'bridge', 'team', 'ovs-port', 'vrf'] + version_added: 7.0.0 + master: + description: + - Master ] STP forwarding delay, in seconds. + type: int + default: 15 + hellotime: + description: + - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds. + type: int + default: 2 + maxage: + description: + - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds. + type: int + default: 20 + ageingtime: + description: + - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds. + type: int + default: 300 + mac: + description: + - MAC address of the connection. + - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel. + type: str + slavepriority: + description: + - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave. + type: int + default: 32 + path_cost: + description: + - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations using this slave. + type: int + default: 100 + hairpin: + description: + - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through + the slave the frame was received on. + - The default change to V(false) in community.general 7.0.0. It used to be V(true) before. + type: bool + default: false + runner: + description: + - This is the type of device or network connection that you wish to create for a team. + type: str + choices: [broadcast, roundrobin, activebackup, loadbalance, lacp] + default: roundrobin + version_added: 3.4.0 + runner_hwaddr_policy: + description: + - This defines the policy of how hardware addresses of team device and port devices should be set during the team lifetime. + type: str + choices: [same_all, by_active, only_active] + version_added: 3.4.0 + runner_fast_rate: + description: + - Option specifies the rate at which our link partner is asked to transmit LACPDU packets. If this is V(true) then packets + are sent once per second. Otherwise they are sent every 30 seconds. + - Only allowed for O(runner=lacp). + type: bool + version_added: 6.5.0 + vlanid: + description: + - This is only used with VLAN - VLAN ID in range <0-4095>. + type: int + vlandev: + description: + - This is only used with VLAN - parent device this VLAN is on, can use ifname. + type: str + flags: + description: + - This is only used with VLAN - flags. + type: str + ingress: + description: + - This is only used with VLAN - VLAN ingress priority mapping. + type: str + egress: + description: + - This is only used with VLAN - VLAN egress priority mapping. + type: str + vxlan_id: + description: + - This is only used with VXLAN - VXLAN ID. + type: int + vxlan_remote: + description: + - This is only used with VXLAN - VXLAN destination IP address. + type: str + vxlan_local: + description: + - This is only used with VXLAN - VXLAN local IP address. + type: str + ip_tunnel_dev: + description: + - This is used with GRE/IPIP/SIT - parent device this GRE/IPIP/SIT tunnel, can use ifname. + type: str + ip_tunnel_remote: + description: + - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT destination IP address. + type: str + ip_tunnel_local: + description: + - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT local IP address. + type: str + ip_tunnel_input_key: + description: + - The key used for tunnel input packets. + - Only used when O(type=gre). + type: str + version_added: 3.6.0 + ip_tunnel_output_key: + description: + - The key used for tunnel output packets. + - Only used when O(type=gre). + type: str + version_added: 3.6.0 + table: + description: + - This is only used with VRF - VRF table number. + type: int + version_added: 10.4.0 + zone: + description: + - The trust level of the connection. + - When updating this property on a currently activated connection, the change takes effect immediately. + type: str + version_added: 2.0.0 + wifi_sec: + description: + - The security configuration of the WiFi connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on + the host. + - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).' + - 'For instance to use common WPA-PSK auth with a password: V({key-mgmt: wpa-psk, psk: my_password}).' + type: dict + suboptions: + auth-alg: + description: + - When WEP is used (that is, if O(wifi_sec.key-mgmt) is V(none) or V(ieee8021x)) indicate the 802.11 authentication + algorithm required by the AP here. + - One of V(open) for Open System, V(shared) for Shared Key, or V(leap) for Cisco LEAP. + - When using Cisco LEAP (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap)) the O(wifi_sec.leap-username) + and O(wifi_sec.leap-password) properties must be specified. + type: str + choices: [open, shared, leap] + fils: + description: + - Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection. + - One of V(0) (use global default value), V(1) (disable FILS), V(2) (enable FILS if the supplicant and the access + point support it) or V(3) (enable FILS and fail if not supported). + - When set to V(0) and no global default is set, FILS is optionally enabled. + type: int + choices: [0, 1, 2, 3] + default: 0 + group: + description: + - A list of group/broadcast encryption algorithms which prevents connections to Wi-Fi networks that do not utilize + one of the algorithms in the list. + - For maximum compatibility leave this property empty. + type: list + elements: str + choices: [wep40, wep104, tkip, ccmp] + key-mgmt: + description: + - Key management used for the connection. + - One of V(none) (WEP or no password protection), V(ieee8021x) (Dynamic WEP), V(owe) (Opportunistic Wireless Encryption), + V(wpa-psk) (WPA2 + WPA3 personal), V(sae) (WPA3 personal only), V(wpa-eap) (WPA2 + WPA3 enterprise) or V(wpa-eap-suite-b-192) + (WPA3 enterprise only). + - This property must be set for any Wi-Fi connection that uses security. + type: str + choices: [none, ieee8021x, owe, wpa-psk, sae, wpa-eap, wpa-eap-suite-b-192] + leap-password-flags: + description: Flags indicating how to handle the O(wifi_sec.leap-password) property. + type: list + elements: int + leap-password: + description: The login password for legacy LEAP connections (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap)). + type: str + leap-username: + description: The login username for legacy LEAP connections (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap)). + type: str + pairwise: + description: + - A list of pairwise encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one + of the algorithms in the list. + - For maximum compatibility leave this property empty. + type: list + elements: str + choices: [tkip, ccmp] + pmf: + description: + - Indicates whether Protected Management Frames (802.11w) must be enabled for the connection. + - One of V(0) (use global default value), V(1) (disable PMF), V(2) (enable PMF if the supplicant and the access + point support it) or V(3) (enable PMF and fail if not supported). + - When set to V(0) and no global default is set, PMF is optionally enabled. + type: int + choices: [0, 1, 2, 3] + default: 0 + proto: + description: + - List of strings specifying the allowed WPA protocol versions to use. + - Each element may be V(wpa) (allow WPA) or V(rsn) (allow WPA2/RSN). + - If not specified, both WPA and RSN connections are allowed. + type: list + elements: str + choices: [wpa, rsn] + psk-flags: + description: Flags indicating how to handle the O(wifi_sec.psk) property. + type: list + elements: int + psk: + description: + - Pre-Shared-Key for WPA networks. + - For WPA-PSK, it is either an ASCII passphrase of 8 to 63 characters that is (as specified in the 802.11i standard) + hashed to derive the actual key, or the key in form of 64 hexadecimal character. + - The WPA3-Personal networks use a passphrase of any length for SAE authentication. + type: str + wep-key-flags: + description: + - Flags indicating how to handle the O(wifi_sec.wep-key0), O(wifi_sec.wep-key1), O(wifi_sec.wep-key2), and O(wifi_sec.wep-key3) + properties. + type: list + elements: int + wep-key-type: + description: + - Controls the interpretation of WEP keys. + - Allowed values are V(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or + 13-character ASCII password; or V(2), in which case the passphrase is provided as a string and it is hashed using + the de-facto MD5 method to derive the actual WEP key. + type: int + choices: [1, 2] + wep-key0: + description: + - Index 0 WEP key. This is the WEP key used in most networks. + - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted. + type: str + wep-key1: + description: + - Index 1 WEP key. This WEP index is not used by most networks. + - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted. + type: str + wep-key2: + description: + - Index 2 WEP key. This WEP index is not used by most networks. + - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted. + type: str + wep-key3: + description: + - Index 3 WEP key. This WEP index is not used by most networks. + - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted. + type: str + wep-tx-keyidx: + description: + - When static WEP is used (that is, if O(wifi_sec.key-mgmt=none)) and a non-default WEP key index is used by the + AP, put that WEP key index here. + - Valid values are V(0) (default key) through V(3). + - Note that some consumer access points (like the Linksys WRT54G) number the keys V(1) to V(4). + type: int + choices: [0, 1, 2, 3] + default: 0 + wps-method: + description: + - Flags indicating which mode of WPS is to be used if any. + - There is little point in changing the default setting as NetworkManager automatically determines whether it is + feasible to start WPS enrollment from the Access Point capabilities. + - WPS can be disabled by setting this property to a value of V(1). + type: int + default: 0 + version_added: 3.0.0 + ssid: + description: + - Name of the Wireless router or the access point. + type: str + version_added: 3.0.0 + wifi: + description: + - The configuration of the WiFi connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on + the host. + - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).' + - 'For instance to create a hidden AP mode WiFi connection: V({hidden: true, mode: ap}).' + type: dict + suboptions: + ap-isolation: + description: + - Configures AP isolation, which prevents communication between wireless devices connected to this AP. + - This property can be set to a value different from V(-1) only when the interface is configured in AP mode. + - If set to V(1), devices are not able to communicate with each other. This increases security because it protects + devices against attacks from other clients in the network. At the same time, it prevents devices to access resources + on the same wireless networks as file shares, printers, and so on. + - If set to V(0), devices can talk to each other. + - When set to V(-1), the global default is used; in case the global default is unspecified it is assumed to be V(0). + type: int + choices: [-1, 0, 1] + default: -1 + assigned-mac-address: + description: + - The new field for the cloned MAC address. + - It can be either a hardware address in ASCII representation, or one of the special values V(preserve), V(permanent), + V(random) or V(stable). + - This field replaces the deprecated O(wifi.cloned-mac-address) on D-Bus, which can only contain explicit hardware + addresses. + - Note that this property only exists in D-Bus API. libnm and nmcli continue to call this property C(cloned-mac-address). + type: str + band: + description: + - 802.11 frequency band of the network. + - One of V(a) for 5GHz 802.11a or V(bg) for 2.4GHz 802.11. + - This locks associations to the Wi-Fi network to the specific band, so for example, if V(a) is specified, the device + does not associate with the same network in the 2.4GHz band even if the network's settings are compatible. + - This setting depends on specific driver capability and may not work with all drivers. + type: str + choices: [a, bg] + bssid: + description: + - If specified, directs the device to only associate with the given access point. + - This capability is highly driver dependent and not supported by all devices. + - Note this property does not control the BSSID used when creating an Ad-Hoc network and is unlikely to in the future. + type: str + channel: + description: + - Wireless channel to use for the Wi-Fi connection. + - The device only joins (or creates for Ad-Hoc networks) a Wi-Fi network on the specified channel. + - Because channel numbers overlap between bands, this property also requires the O(wifi.band) property to be set. + type: int + default: 0 + cloned-mac-address: + description: + - This D-Bus field is deprecated in favor of O(wifi.assigned-mac-address) which is more flexible and allows specifying + special variants like V(random). + - For libnm and nmcli, this field is called C(cloned-mac-address). + type: str + generate-mac-address-mask: + description: + - With O(wifi.cloned-mac-address) setting V(random) or V(stable), by default all bits of the MAC address are scrambled + and a locally-administered, unicast MAC address is created. This property allows to specify that certain bits + are fixed. + - Note that the least significant bit of the first MAC address is always unset to create a unicast MAC address. + - If the property is V(null), it is eligible to be overwritten by a default connection setting. + - If the value is still V(null) or an empty string, the default is to create a locally-administered, unicast MAC + address. + - If the value contains one MAC address, this address is used as mask. The set bits of the mask are to be filled + with the current MAC address of the device, while the unset bits are subject to randomization. + - Setting V(FE:FF:FF:00:00:00) means to preserve the OUI of the current MAC address and only randomize the lower + 3 bytes using the V(random) or V(stable) algorithm. + - If the value contains one additional MAC address after the mask, this address is used instead of the current MAC + address to fill the bits that shall not be randomized. + - For example, a value of V(FE:FF:FF:00:00:00 68:F7:28:00:00:00) sets the OUI of the MAC address to 68:F7:28, while + the lower bits are randomized. + - A value of V(02:00:00:00:00:00 00:00:00:00:00:00) creates a fully scrambled globally-administered, burned-in MAC + address. + - If the value contains more than one additional MAC addresses, one of them is chosen randomly. For example, V(02:00:00:00:00:00 + 00:00:00:00:00:00 02:00:00:00:00:00) creates a fully scrambled MAC address, randomly locally or globally administered. + type: str + hidden: + description: + - If V(true), indicates that the network is a non-broadcasting network that hides its SSID. This works both in infrastructure + and AP mode. + - In infrastructure mode, various workarounds are used for a more reliable discovery of hidden networks, such as + probe-scanning the SSID. However, these workarounds expose inherent insecurities with hidden SSID networks, and + thus hidden SSID networks should be used with caution. + - In AP mode, the created network does not broadcast its SSID. + - Note that marking the network as hidden may be a privacy issue for you (in infrastructure mode) or client stations + (in AP mode), as the explicit probe-scans are distinctly recognizable on the air. + type: bool + default: false + mac-address-blacklist: + description: + - A list of permanent MAC addresses of Wi-Fi devices to which this connection should never apply. + - Each MAC address should be given in the standard hex-digits-and-colons notation (for example, V(00:11:22:33:44:55)). + type: list + elements: str + mac-address-randomization: + description: + - One of V(0) (never randomize unless the user has set a global default to randomize and the supplicant supports + randomization), V(1) (never randomize the MAC address), or V(2) (always randomize the MAC address). + - This property is deprecated for O(wifi.cloned-mac-address). + type: int + default: 0 + choices: [0, 1, 2] + mac-address: + description: + - If specified, this connection only applies to the Wi-Fi device whose permanent MAC address matches. + - This property does not change the MAC address of the device (for example for MAC spoofing). + type: str + mode: + description: Wi-Fi network mode. If blank, V(infrastructure) is assumed. + type: str + choices: [infrastructure, mesh, adhoc, ap] + default: infrastructure + mtu: + description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into + multiple Ethernet frames. + type: int + default: 0 + powersave: + description: + - One of V(2) (disable Wi-Fi power saving), V(3) (enable Wi-Fi power saving), V(1) (do not touch currently configure + setting) or V(0) (use the globally configured value). + - All other values are reserved. + type: int + default: 0 + choices: [0, 1, 2, 3] + rate: + description: + - If non-zero, directs the device to only use the specified bitrate for communication with the access point. + - Units are in Kb/s, so for example V(5500) = 5.5 Mbit/s. + - This property is highly driver dependent and not all devices support setting a static bitrate. + type: int + default: 0 + tx-power: + description: + - If non-zero, directs the device to use the specified transmit power. + - Units are dBm. + - This property is highly driver dependent and not all devices support setting a static transmit power. + type: int + default: 0 + wake-on-wlan: + description: + - The NMSettingWirelessWakeOnWLan options to enable. Not all devices support all options. + - May be any combination of C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_ANY) (V(0x2)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_DISCONNECT) + (V(0x4)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_MAGIC) (V(0x8)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_GTK_REKEY_FAILURE) + (V(0x10)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_EAP_IDENTITY_REQUEST) (V(0x20)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_4WAY_HANDSHAKE) + (V(0x40)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_RFKILL_RELEASE) (V(0x80)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_TCP) + (V(0x100)) or the special values V(0x1) (to use global settings) and V(0x8000) (to disable management of Wake-on-LAN + in NetworkManager). + - Note the option values' sum must be specified in order to combine multiple options. + type: int + default: 1 + version_added: 3.5.0 + ignore_unsupported_suboptions: + description: + - Ignore suboptions which are invalid or unsupported by the version of NetworkManager/nmcli installed on the host. + - Only O(wifi) and O(wifi_sec) options are currently affected. + type: bool + default: false + version_added: 3.6.0 + gsm: + description: + - The configuration of the GSM connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on + the host. + - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-gsm.html).' + - 'For instance to use apn, pin, username and password: V({apn: provider.apn, pin: 1234, username: apn.username, password: + apn.password}).' + type: dict + version_added: 3.7.0 + suboptions: + apn: + description: + - The GPRS Access Point Name specifying the APN used when establishing a data session with the GSM-based network. + - The APN often determines how the user is billed for their network usage and whether the user has access to the + Internet or just a provider-specific walled-garden, so it is important to use the correct APN for the user's mobile + broadband plan. + - The APN may only be composed of the characters a-z, 0-9, ., and - per GSM 03.60 Section 14.9. + type: str + auto-config: + description: When V(true), the settings such as O(gsm.apn), O(gsm.username), or O(gsm.password) default to values + that match the network the modem registers to in the Mobile Broadband Provider database. + type: bool + default: false + device-id: + description: + - The device unique identifier (as given by the V(WWAN) management service) which this connection applies to. + - If given, the connection only applies to the specified device. + type: str + home-only: + description: + - When V(true), only connections to the home network are allowed. + - Connections to roaming networks are not made. + type: bool + default: false + mtu: + description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into + multiple Ethernet frames. + type: int + default: 0 + network-id: + description: + - The Network ID (GSM LAI format, ie MCC-MNC) to force specific network registration. + - If the Network ID is specified, NetworkManager attempts to force the device to register only on the specified + network. + - This can be used to ensure that the device does not roam when direct roaming control of the device is not otherwise + possible. + type: str + number: + description: Legacy setting that used to help establishing PPP data sessions for GSM-based modems. + type: str + password: + description: + - The password used to authenticate with the network, if required. + - Many providers do not require a password, or accept any password. + - But if a password is required, it is specified here. + type: str + password-flags: + description: + - NMSettingSecretFlags indicating how to handle the O(gsm.password) property. + - 'Following choices are allowed: V(0) B(NONE): The system is responsible for providing and storing this secret + (default), V(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when + it is required agents are asked to retrieve it V(2) B(NOT_SAVED): This secret should not be saved, but should + be requested from the user each time it is needed V(4) B(NOT_REQUIRED): In situations where it cannot be automatically + determined that the secret is required (some VPNs and PPP providers do not require all secrets) this flag indicates + that the specific secret is not required.' + type: int + choices: [0, 1, 2, 4] + default: 0 + pin: + description: + - If the SIM is locked with a PIN it must be unlocked before any other operations are requested. + - Specify the PIN here to allow operation of the device. + type: str + pin-flags: + description: + - NMSettingSecretFlags indicating how to handle the O(gsm.pin) property. + - See O(gsm.password-flags) for NMSettingSecretFlags choices. + type: int + choices: [0, 1, 2, 4] + default: 0 + sim-id: + description: + - The SIM card unique identifier (as given by the C(WWAN) management service) which this connection applies to. + - If given, the connection applies to any device also allowed by O(gsm.device-id) which contains a SIM card matching + the given identifier. + type: str + sim-operator-id: + description: + - A MCC/MNC string like V(310260) or V(21601I) identifying the specific mobile network operator which this connection + applies to. + - If given, the connection applies to any device also allowed by O(gsm.device-id) and O(gsm.sim-id) which contains + a SIM card provisioned by the given operator. + type: str + username: + description: + - The username used to authenticate with the network, if required. + - Many providers do not require a username, or accept any username. + - But if a username is required, it is specified here. + macvlan: + description: + - The configuration of the MAC VLAN connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on + the host. + - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-macvlan.html).' + type: dict + version_added: 6.6.0 + suboptions: + mode: + description: + - The macvlan mode, which specifies the communication mechanism between multiple macvlans on the same lower device. + - 'Following choices are allowed: V(1) B(vepa), V(2) B(bridge), V(3) B(private), V(4) B(passthru) and V(5) B(source).' + type: int + choices: [1, 2, 3, 4, 5] + required: true + parent: + description: + - If given, specifies the parent interface name or parent connection UUID from which this MAC-VLAN interface should + be created. If this property is not specified, the connection must contain an "802-3-ethernet" setting with a + "mac-address" property. + type: str + required: true + promiscuous: + description: + - Whether the interface should be put in promiscuous mode. + type: bool + tap: + description: + - Whether the interface should be a MACVTAP. + type: bool + wireguard: + description: + - The configuration of the Wireguard connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on + the host. + - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-wireguard.html).' + - 'For instance to configure a listen port: V({listen-port: 12345}).' + type: dict + version_added: 4.3.0 + suboptions: + fwmark: + description: + - The 32-bit fwmark for outgoing packets. + - The use of fwmark is optional and is by default off. Setting it to 0 disables it. + - Note that O(wireguard.ip4-auto-default-route) or O(wireguard.ip6-auto-default-route) enabled, implies to automatically + choose a fwmark. + type: int + ip4-auto-default-route: + description: + - Whether to enable special handling of the IPv4 default route. + - If enabled, the IPv4 default route from O(wireguard.peer-routes) is placed to a dedicated routing-table and two + policy routing rules are added. + - The fwmark number is also used as routing-table for the default-route, and if fwmark is zero, an unused fwmark/table + is chosen automatically. This corresponds to what wg-quick does with Table=auto and what WireGuard calls "Improved + Rule-based Routing". + type: bool + ip6-auto-default-route: + description: + - Like O(wireguard.ip4-auto-default-route), but for the IPv6 default route. + type: bool + listen-port: + description: The WireGuard connection listen-port. If not specified, the port is chosen randomly when the interface + comes up. + type: int + mtu: + description: + - If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple + fragments. + - If zero a default MTU is used. Note that contrary to wg-quick's MTU setting, this does not take into account the + current routes at the time of activation. + type: int + peer-routes: + description: + - Whether to automatically add routes for the AllowedIPs ranges of the peers. + - If V(true) (the default), NetworkManager automatically adds routes in the routing tables according to C(ipv4.route-table) + and C(ipv6.route-table). Usually you want this automatism enabled. + - If V(false), no such routes are added automatically. In this case, the user may want to configure static routes + in C(ipv4.routes) and C(ipv6.routes), respectively. + - Note that if the peer's AllowedIPs is V(0.0.0.0/0) or V(::/0) and the profile's C(ipv4.never-default) or C(ipv6.never-default) + setting is enabled, the peer route for this peer is not added automatically. + type: bool + private-key: + description: The 256 bit private-key in base64 encoding. + type: str + private-key-flags: + description: C(NMSettingSecretFlags) indicating how to handle the O(wireguard.private-key) property. + type: int + choices: [0, 1, 2] + vpn: + description: + - Configuration of a VPN connection (PPTP and L2TP). + - In order to use L2TP you need to be sure that C(network-manager-l2tp) - and C(network-manager-l2tp-gnome) if host + has UI - are installed on the host. + type: dict + version_added: 5.1.0 + suboptions: + permissions: + description: User that has permission to use the connection. + type: str + required: true + service-type: + description: This defines the service type of connection. + type: str + required: true + gateway: + description: The gateway to connection. It can be an IP address (for example V(192.0.2.1)) or a FQDN address (for + example V(vpn.example.com)). + type: str + required: true + password-flags: + description: + - NMSettingSecretFlags indicating how to handle the C(vpn.password) property. + - 'Following choices are allowed: V(0) B(NONE): The system is responsible for providing and storing this secret + (default); V(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when + it is required agents are asked to retrieve it; V(2) B(NOT_SAVED): This secret should not be saved, but should + be requested from the user each time it is needed; V(4) B(NOT_REQUIRED): In situations where it cannot be automatically + determined that the secret is required (some VPNs and PPP providers do not require all secrets) this flag indicates + that the specific secret is not required.' + type: int + choices: [0, 1, 2, 4] + default: 0 + user: + description: Username provided by VPN administrator. + type: str + required: true + ipsec-enabled: + description: + - Enable or disable IPSec tunnel to L2TP host. + - This option is need when O(vpn.service-type) is V(org.freedesktop.NetworkManager.l2tp). + type: bool + ipsec-psk: + description: + - The pre-shared key in base64 encoding. + - > + You can encode using this Ansible Jinja2 expression: V("0s{{ '[YOUR PRE-SHARED KEY]' | ansible.builtin.b64encode }}"). + - This is only used when O(vpn.ipsec-enabled=true). + type: str + sriov: + description: + - Allow to configure SR-IOV settings. + - 'An up-to-date list of supported attributes can be found here: + U(https://networkmanager.pages.freedesktop.org/NetworkManager/NetworkManager/settings-sriov.html).' + type: dict + version_added: 10.1.0 + suboptions: + autoprobe-drivers: + description: + - Whether to autoprobe virtual functions by a compatible driver. + type: int + eswitch-encap-mode: + description: + - Select the eswitch encapsulation support. + type: int + eswitch-inline-mode: + description: + - Select the eswitch inline-mode of the device. + type: int + eswitch-mode: + description: + - Select the eswitch mode of the device. + type: int + total-vfs: + description: Number of virtual functions to create. Consult your NIC documentation for the maximum number of VFs supported. + type: int + vfs: + description: + - 'Virtual function descriptors in the form: V(INDEX [ATTR=VALUE[ ATTR=VALUE]...]).' + - Multiple VFs can be specified using a comma as separator, for example V(2 mac=00:11:22:33:44:55 spoof-check=true,3 + vlans=100). + type: str +""" + +EXAMPLES = r""" +# These examples are using the following inventory: +# +# ## Directory layout: +# +# |_/inventory/cloud-hosts +# | /group_vars/openstack-stage.yml +# | /host_vars/controller-01.openstack.host.com +# | /host_vars/controller-02.openstack.host.com +# |_/playbook/library/nmcli.py +# | /playbook-add.yml +# | /playbook-del.yml +# ``` +# +# ## inventory examples +# ### groups_vars +# ```yml +# --- +# #devops_os_define_network +# storage_gw: "192.0.2.254" +# external_gw: "198.51.100.254" +# tenant_gw: "203.0.113.254" +# +# #Team vars +# nmcli_team: +# - conn_name: tenant +# ip4: '{{ tenant_ip }}' +# gw4: '{{ tenant_gw }}' +# - conn_name: external +# ip4: '{{ external_ip }}' +# gw4: '{{ external_gw }}' +# - conn_name: storage +# ip4: '{{ storage_ip }}' +# gw4: '{{ storage_gw }}' +# nmcli_team_slave: +# - conn_name: em1 +# ifname: em1 +# master: tenant +# - conn_name: em2 +# ifname: em2 +# master: tenant +# - conn_name: p2p1 +# ifname: p2p1 +# master: storage +# - conn_name: p2p2 +# ifname: p2p2 +# master: external +# +# #bond vars +# nmcli_bond: +# - conn_name: tenant +# ip4: '{{ tenant_ip }}' +# gw4: '' +# mode: balance-rr +# - conn_name: external +# ip4: '{{ external_ip }}' +# gw4: '' +# mode: balance-rr +# - conn_name: storage +# ip4: '{{ storage_ip }}' +# gw4: '{{ storage_gw }}' +# mode: balance-rr +# nmcli_bond_slave: +# - conn_name: em1 +# ifname: em1 +# master: tenant +# - conn_name: em2 +# ifname: em2 +# master: tenant +# - conn_name: p2p1 +# ifname: p2p1 +# master: storage +# - conn_name: p2p2 +# ifname: p2p2 +# master: external +# +# #ethernet vars +# nmcli_ethernet: +# - conn_name: em1 +# ifname: em1 +# ip4: +# - '{{ tenant_ip }}' +# - '{{ second_tenant_ip }}' +# gw4: '{{ tenant_gw }}' +# - conn_name: em2 +# ifname: em2 +# ip4: '{{ tenant_ip1 }}' +# gw4: '{{ tenant_gw }}' +# - conn_name: p2p1 +# ifname: p2p1 +# ip4: '{{ storage_ip }}' +# gw4: '{{ storage_gw }}' +# - conn_name: p2p2 +# ifname: p2p2 +# ip4: '{{ external_ip }}' +# gw4: '{{ external_gw }}' +# ``` +# +# ### host_vars +# ```yml +# --- +# storage_ip: "192.0.2.91/23" +# external_ip: "198.51.100.23/21" +# tenant_ip: "203.0.113.77/23" +# second_tenant_ip: "204.0.113.77/23" +# ``` + + +## playbook-add.yml example + +- hosts: openstack-stage + remote_user: root + tasks: + + - name: Install needed network manager libs + ansible.builtin.package: + name: + - NetworkManager-libnm + - nm-connection-editor + - libsemanage-python + - policycoreutils-python + state: present + +##### Working with all cloud nodes - Teaming + - name: Try nmcli add team - conn_name only & ip4 gw4 + community.general.nmcli: + type: team + conn_name: '{{ item.conn_name }}' + ip4: '{{ item.ip4 }}' + gw4: '{{ item.gw4 }}' + state: present + with_items: + - '{{ nmcli_team }}' + + - name: Try nmcli add teams-slave + community.general.nmcli: + type: team-slave + conn_name: '{{ item.conn_name }}' + ifname: '{{ item.ifname }}' + master: '{{ item.master }}' + state: present + with_items: + - '{{ nmcli_team_slave }}' + +##### Working with all cloud nodes - Bonding + - name: Try nmcli add bond - conn_name only & ip4 gw4 mode + community.general.nmcli: + type: bond + conn_name: '{{ item.conn_name }}' + ip4: '{{ item.ip4 }}' + gw4: '{{ item.gw4 }}' + mode: '{{ item.mode }}' + state: present + with_items: + - '{{ nmcli_bond }}' + + - name: Try nmcli add bond-slave + community.general.nmcli: + type: bond-slave + conn_name: '{{ item.conn_name }}' + ifname: '{{ item.ifname }}' + master: '{{ item.master }}' + state: present + with_items: + - '{{ nmcli_bond_slave }}' + +##### Working with all cloud nodes - Ethernet + - name: Try nmcli add Ethernet - conn_name only & ip4 gw4 + community.general.nmcli: + type: ethernet + conn_name: '{{ item.conn_name }}' + ip4: '{{ item.ip4 }}' + gw4: '{{ item.gw4 }}' + state: present + with_items: + - '{{ nmcli_ethernet }}' + +## playbook-del.yml example +- hosts: openstack-stage + remote_user: root + tasks: + + - name: Try nmcli del team - multiple + community.general.nmcli: + conn_name: '{{ item.conn_name }}' + state: absent + with_items: + - conn_name: em1 + - conn_name: em2 + - conn_name: p1p1 + - conn_name: p1p2 + - conn_name: p2p1 + - conn_name: p2p2 + - conn_name: tenant + - conn_name: storage + - conn_name: external + - conn_name: team-em1 + - conn_name: team-em2 + - conn_name: team-p1p1 + - conn_name: team-p1p2 + - conn_name: team-p2p1 + - conn_name: team-p2p2 + + - name: Add an Ethernet connection with static IP configuration + community.general.nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip4: 192.0.2.100/24 + gw4: 192.0.2.1 + state: present + + - name: Add an Team connection with static IP configuration + community.general.nmcli: + conn_name: my-team1 + ifname: my-team1 + type: team + ip4: 192.0.2.100/24 + gw4: 192.0.2.1 + state: present + autoconnect: true + + - name: Optionally, at the same time specify IPv6 addresses for the device + community.general.nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip4: 192.0.2.100/24 + gw4: 192.0.2.1 + ip6: 2001:db8::cafe + gw6: 2001:db8::1 + state: present + + - name: Add two IPv4 DNS server addresses + community.general.nmcli: + conn_name: my-eth1 + type: ethernet + dns4: + - 192.0.2.53 + - 198.51.100.53 + state: present + + - name: Make a profile usable for all compatible Ethernet interfaces + community.general.nmcli: + ctype: ethernet + name: my-eth1 + ifname: '*' + state: present + + - name: Change the property of a setting e.g. MTU + community.general.nmcli: + conn_name: my-eth1 + mtu: 9000 + type: ethernet + state: present + + - name: Change the property of a setting e.g. MTU and reload connection + community.general.nmcli: + conn_name: my-eth1 + mtu: 1500 + type: ethernet + state: present + conn_reload: true + + - name: Disable connection + community.general.nmcli: + conn_name: my-eth1 + state: down + + - name: Reload and enable connection + community.general.nmcli: + conn_name: my-eth1 + state: up + conn_reload: true + + - name: Add second ip4 address + community.general.nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip4: + - 192.0.2.100/24 + - 192.0.3.100/24 + state: present + + - name: Add second ip6 address + community.general.nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip6: + - 2001:db8::cafe + - 2002:db8::cafe + state: present + + - name: Add VxLan + community.general.nmcli: + type: vxlan + conn_name: vxlan_test1 + vxlan_id: 16 + vxlan_local: 192.168.1.2 + vxlan_remote: 192.168.1.5 + + - name: Add gre + community.general.nmcli: + type: gre + conn_name: gre_test1 + ip_tunnel_dev: eth0 + ip_tunnel_local: 192.168.1.2 + ip_tunnel_remote: 192.168.1.5 + + - name: Add ipip + community.general.nmcli: + type: ipip + conn_name: ipip_test1 + ip_tunnel_dev: eth0 + ip_tunnel_local: 192.168.1.2 + ip_tunnel_remote: 192.168.1.5 + + - name: Add sit + community.general.nmcli: + type: sit + conn_name: sit_test1 + ip_tunnel_dev: eth0 + ip_tunnel_local: 192.168.1.2 + ip_tunnel_remote: 192.168.1.5 + + - name: Add zone + community.general.nmcli: + type: ethernet + conn_name: my-eth1 + zone: external + state: present + +# nmcli exits with status 0 if it succeeds and exits with a status greater +# than zero when there is a failure. The following list of status codes may be +# returned: +# +# - 0 Success - indicates the operation succeeded +# - 1 Unknown or unspecified error +# - 2 Invalid user input, wrong nmcli invocation +# - 3 Timeout expired (see --wait option) +# - 4 Connection activation failed +# - 5 Connection deactivation failed +# - 6 Disconnecting device failed +# - 7 Connection deletion failed +# - 8 NetworkManager is not running +# - 9 nmcli and NetworkManager versions mismatch +# - 10 Connection, device, or access point does not exist. + +- name: Create the wifi connection + community.general.nmcli: + type: wifi + conn_name: Brittany + ifname: wlp4s0 + ssid: Brittany + wifi_sec: + key-mgmt: wpa-psk + psk: my_password + autoconnect: true + state: present + +- name: Create a hidden AP mode wifi connection + community.general.nmcli: + type: wifi + conn_name: ChocoMaster + ifname: wlo1 + ssid: ChocoMaster + wifi: + hidden: true + mode: ap + autoconnect: true + state: present + +- name: Create a gsm connection + community.general.nmcli: + type: gsm + conn_name: my-gsm-provider + ifname: cdc-wdm0 + gsm: + apn: my.provider.apn + username: my-provider-username + password: my-provider-password + pin: my-sim-pin + autoconnect: true + state: present + +- name: Create a macvlan connection + community.general.nmcli: + type: macvlan + conn_name: my-macvlan-connection + ifname: mymacvlan0 + macvlan: + mode: 2 + parent: eth1 + autoconnect: true + state: present + +- name: Create a wireguard connection + community.general.nmcli: + type: wireguard + conn_name: my-wg-provider + ifname: mywg0 + wireguard: + listen-port: 51820 + private-key: my-private-key + autoconnect: true + state: present + +- name: >- + Create a VPN L2TP connection for ansible_user to connect on vpn.example.com + authenticating with user 'brittany' and pre-shared key as 'Brittany123' + community.general.nmcli: + type: vpn + conn_name: my-vpn-connection + vpn: + permissions: "{{ ansible_user }}" + service-type: org.freedesktop.NetworkManager.l2tp + gateway: vpn.example.com + password-flags: 2 + user: brittany + ipsec-enabled: true + ipsec-psk: "0s{{ 'Brittany123' | ansible.builtin.b64encode }}" + autoconnect: false + state: present + +## Creating bond attached to bridge example +- name: Create bond attached to bridge + community.general.nmcli: + type: bond + conn_name: bond0 + slave_type: bridge + master: br0 + state: present + +- name: Create master bridge + community.general.nmcli: + type: bridge + conn_name: br0 + method4: disabled + method6: disabled + state: present + +## Creating vlan connection attached to bridge +- name: Create master bridge + community.general.nmcli: + type: bridge + conn_name: br0 + state: present + +- name: Create VLAN 5 + community.general.nmcli: + type: vlan + conn_name: eth0.5 + slave_type: bridge + master: br0 + vlandev: eth0 + vlanid: 5 + state: present + +## Creating VRF and adding VLAN interface to it +- name: Create VRF + community.general.nmcli: + type: vrf + ifname: vrf10 + table: 10 + state: present + conn_name: vrf10 + method4: disabled + method6: disabled + +- name: Create VLAN interface inside VRF + community.general.nmcli: + conn_name: "eth0.124" + type: vlan + vlanid: "124" + vlandev: "eth0" + master: "vrf10" + slave_type: vrf + state: "present" + ip4: '192.168.124.50' + gw4: '192.168.124.1' + +## Defining ip rules while setting a static IP +## table 'production' is set with id 200 in this example. +- name: Set Static ips for interface with ip rules and routes + community.general.nmcli: + type: ethernet + conn_name: 'eth0' + ip4: '192.168.1.50' + gw4: '192.168.1.1' + state: present + routes4_extended: + - ip: "0.0.0.0/0" + next_hop: "192.168.1.1" + table: "production" + routing_rules4: + - "priority 0 from 192.168.1.50 table 200" + +## Creating an OVS bridge and attaching a port +- name: Create OVS Bridge + community.general.nmcli: + conn_name: ovs-br-conn + ifname: ovs-br + type: ovs-bridge + state: present + +- name: Create OVS Port for OVS Bridge Interface + community.general.nmcli: + conn_name: ovs-br-interface-port-conn + ifname: ovs-br-interface-port + master: ovs-br + type: ovs-port + state: present + +## Adding an ethernet interface to an OVS bridge port +- name: Add Ethernet Interface to OVS Port + community.general.nmcli: + conn_name: eno1 + ifname: eno1 + master: ovs-br-interface-port + slave_type: ovs-port + type: ethernet + state: present +""" + +RETURN = r"""# +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_text +import re + + +class NmcliModuleError(Exception): + pass + + +class Nmcli(object): + """ + This is the generic nmcli manipulation class that is subclassed based on platform. + A subclass may wish to override the following action methods:- + - create_connection() + - delete_connection() + - edit_connection() + - modify_connection() + - show_connection() + - up_connection() + - down_connection() + All subclasses MUST define platform and distribution (which may be None). + """ + + platform = 'Generic' + distribution = None + + SECRET_OPTIONS = ( + '802-11-wireless-security.leap-password', + '802-11-wireless-security.psk', + '802-11-wireless-security.wep-key0', + '802-11-wireless-security.wep-key1', + '802-11-wireless-security.wep-key2', + '802-11-wireless-security.wep-key3' + ) + + def __init__(self, module): + self.module = module + self.state = module.params['state'] + self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions'] + self.autoconnect = module.params['autoconnect'] + self.autoconnect_priority = module.params['autoconnect_priority'] + self.autoconnect_retries = module.params['autoconnect_retries'] + self.conn_name = module.params['conn_name'] + self.conn_reload = module.params['conn_reload'] + self.slave_type = module.params['slave_type'] + self.master = module.params['master'] + self.ifname = module.params['ifname'] + self.type = module.params['type'] + self.ip4 = module.params['ip4'] + self.gw4 = module.params['gw4'] + self.gw4_ignore_auto = module.params['gw4_ignore_auto'] + self.routes4 = module.params['routes4'] + self.routes4_extended = module.params['routes4_extended'] + self.route_metric4 = module.params['route_metric4'] + self.routing_rules4 = module.params['routing_rules4'] + self.never_default4 = module.params['never_default4'] + self.dns4 = module.params['dns4'] + self.dns4_search = module.params['dns4_search'] + self.dns4_options = module.params['dns4_options'] + self.dns4_ignore_auto = module.params['dns4_ignore_auto'] + self.method4 = module.params['method4'] + self.may_fail4 = module.params['may_fail4'] + self.ip6 = module.params['ip6'] + self.gw6 = module.params['gw6'] + self.gw6_ignore_auto = module.params['gw6_ignore_auto'] + self.routes6 = module.params['routes6'] + self.routes6_extended = module.params['routes6_extended'] + self.route_metric6 = module.params['route_metric6'] + self.dns6 = module.params['dns6'] + self.dns6_search = module.params['dns6_search'] + self.dns6_options = module.params['dns6_options'] + self.dns6_ignore_auto = module.params['dns6_ignore_auto'] + self.method6 = module.params['method6'] + self.ip_privacy6 = module.params['ip_privacy6'] + self.addr_gen_mode6 = module.params['addr_gen_mode6'] + self.mtu = module.params['mtu'] + self.stp = module.params['stp'] + self.priority = module.params['priority'] + self.mode = module.params['mode'] + self.miimon = module.params['miimon'] + self.primary = module.params['primary'] + self.downdelay = module.params['downdelay'] + self.updelay = module.params['updelay'] + self.xmit_hash_policy = module.params['xmit_hash_policy'] + self.fail_over_mac = module.params['fail_over_mac'] + self.arp_interval = module.params['arp_interval'] + self.arp_ip_target = module.params['arp_ip_target'] + self.slavepriority = module.params['slavepriority'] + self.forwarddelay = module.params['forwarddelay'] + self.hellotime = module.params['hellotime'] + self.maxage = module.params['maxage'] + self.ageingtime = module.params['ageingtime'] + self.hairpin = module.params['hairpin'] + self.path_cost = module.params['path_cost'] + self.mac = module.params['mac'] + self.runner = module.params['runner'] + self.runner_hwaddr_policy = module.params['runner_hwaddr_policy'] + self.runner_fast_rate = module.params['runner_fast_rate'] + self.vlanid = module.params['vlanid'] + self.vlandev = module.params['vlandev'] + self.flags = module.params['flags'] + self.ingress = module.params['ingress'] + self.egress = module.params['egress'] + self.vxlan_id = module.params['vxlan_id'] + self.vxlan_local = module.params['vxlan_local'] + self.vxlan_remote = module.params['vxlan_remote'] + self.ip_tunnel_dev = module.params['ip_tunnel_dev'] + self.ip_tunnel_local = module.params['ip_tunnel_local'] + self.ip_tunnel_remote = module.params['ip_tunnel_remote'] + self.ip_tunnel_input_key = module.params['ip_tunnel_input_key'] + self.ip_tunnel_output_key = module.params['ip_tunnel_output_key'] + self.nmcli_bin = self.module.get_bin_path('nmcli', True) + self.dhcp_client_id = module.params['dhcp_client_id'] + self.zone = module.params['zone'] + self.ssid = module.params['ssid'] + self.wifi = module.params['wifi'] + self.wifi_sec = module.params['wifi_sec'] + self.gsm = module.params['gsm'] + self.macvlan = module.params['macvlan'] + self.wireguard = module.params['wireguard'] + self.vpn = module.params['vpn'] + self.transport_mode = module.params['transport_mode'] + self.infiniband_mac = module.params['infiniband_mac'] + self.sriov = module.params['sriov'] + + if self.method4: + self.ipv4_method = self.method4 + elif self.type in ('dummy', 'macvlan', 'wireguard') and not self.ip4: + self.ipv4_method = 'disabled' + elif self.ip4: + self.ipv4_method = 'manual' + else: + self.ipv4_method = None + + if self.method6: + self.ipv6_method = self.method6 + elif self.type in ('dummy', 'macvlan', 'wireguard') and not self.ip6: + self.ipv6_method = 'disabled' + elif self.ip6: + self.ipv6_method = 'manual' + else: + self.ipv6_method = None + + if self.type == "vrf": + self.table = module.params['table'] + + self.edit_commands = [] + + self.extra_options_validation() + + def extra_options_validation(self): + """ Additional validation of options set passed to module that cannot be implemented in module's argspecs. """ + if self.type not in ("bridge-slave", "team-slave", "bond-slave"): + if self.master is None and self.slave_type is not None: + self.module.fail_json(msg="'master' option is required when 'slave_type' is specified.") + + def execute_command(self, cmd, use_unsafe_shell=False, data=None): + cmd = [to_text(item) for item in cmd] + return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) + + def execute_edit_commands(self, commands, arguments): + arguments = arguments or [] + cmd = [self.nmcli_bin, 'con', 'edit'] + arguments + data = "\n".join(commands) + return self.execute_command(cmd, data=data) + + def connection_options(self, detect_change=False): + # Options common to multiple connection types. + options = { + 'connection.autoconnect': self.autoconnect, + 'connection.autoconnect-priority': self.autoconnect_priority, + 'connection.autoconnect-retries': self.autoconnect_retries, + 'connection.zone': self.zone, + } + + # IP address options. + # The ovs-interface type can be both ip_conn_type and have a master + # An interface that has a master but is of slave type vrf can have an IP address + if (self.ip_conn_type and (not self.master or self.slave_type == "vrf")) or self.type == "ovs-interface": + options.update({ + 'ipv4.addresses': self.enforce_ipv4_cidr_notation(self.ip4), + 'ipv4.dhcp-client-id': self.dhcp_client_id, + 'ipv4.dns': self.dns4, + 'ipv4.dns-search': self.dns4_search, + 'ipv4.dns-options': self.dns4_options, + 'ipv4.ignore-auto-dns': self.dns4_ignore_auto, + 'ipv4.gateway': self.gw4, + 'ipv4.ignore-auto-routes': self.gw4_ignore_auto, + 'ipv4.routes': self.enforce_routes_format(self.routes4, self.routes4_extended), + 'ipv4.route-metric': self.route_metric4, + 'ipv4.routing-rules': self.routing_rules4, + 'ipv4.never-default': self.never_default4, + 'ipv4.method': self.ipv4_method, + 'ipv4.may-fail': self.may_fail4, + 'ipv6.addresses': self.enforce_ipv6_cidr_notation(self.ip6), + 'ipv6.dns': self.dns6, + 'ipv6.dns-search': self.dns6_search, + 'ipv6.dns-options': self.dns6_options, + 'ipv6.ignore-auto-dns': self.dns6_ignore_auto, + 'ipv6.gateway': self.gw6, + 'ipv6.ignore-auto-routes': self.gw6_ignore_auto, + 'ipv6.routes': self.enforce_routes_format(self.routes6, self.routes6_extended), + 'ipv6.route-metric': self.route_metric6, + 'ipv6.method': self.ipv6_method, + 'ipv6.ip6-privacy': self.ip_privacy6, + 'ipv6.addr-gen-mode': self.addr_gen_mode6 + }) + # when 'method' is disabled the 'may_fail' no make sense but accepted by nmcli with keeping 'yes' + # force ignoring to save idempotency + if self.ipv4_method and self.ipv4_method != 'disabled': + options.update({'ipv4.may-fail': self.may_fail4}) + + # Layer 2 options. + if self.mac: + options.update({self.mac_setting: self.mac}) + + if self.mtu_conn_type: + options.update({self.mtu_setting: self.mtu}) + + # Connections that can have a master. + if self.slave_conn_type: + options.update({ + 'connection.master': self.master, + 'connection.slave-type': self.slave_type, + }) + + # Options specific to a connection type. + if self.type == 'bond': + options.update({ + 'arp-interval': self.arp_interval, + 'arp-ip-target': self.arp_ip_target, + 'downdelay': self.downdelay, + 'miimon': self.miimon, + 'mode': self.mode, + 'primary': self.primary, + 'updelay': self.updelay, + 'xmit_hash_policy': self.xmit_hash_policy, + 'fail_over_mac': self.fail_over_mac, + }) + elif self.type == 'bond-slave': + if self.slave_type and self.slave_type != 'bond': + self.module.fail_json(msg="Connection type '%s' cannot be combined with '%s' slave-type. " + "Allowed slave-type for '%s' is 'bond'." + % (self.type, self.slave_type, self.type) + ) + if not self.slave_type: + self.module.warn("Connection 'slave-type' property automatically set to 'bond' " + "because of using 'bond-slave' connection type.") + options.update({ + 'connection.slave-type': 'bond', + }) + elif self.type == 'bridge': + options.update({ + 'bridge.ageing-time': self.ageingtime, + 'bridge.forward-delay': self.forwarddelay, + 'bridge.hello-time': self.hellotime, + 'bridge.max-age': self.maxage, + 'bridge.priority': self.priority, + 'bridge.stp': self.stp, + }) + # priority make sense when stp enabled, otherwise nmcli keeps bridge-priority to 32768 regrdless of input. + # force ignoring to save idempotency + if self.stp: + options.update({'bridge.priority': self.priority}) + elif self.type == 'team': + options.update({ + 'team.runner': self.runner, + 'team.runner-hwaddr-policy': self.runner_hwaddr_policy, + }) + if self.runner_fast_rate is not None: + options.update({ + 'team.runner-fast-rate': self.runner_fast_rate, + }) + elif self.type == 'bridge-slave': + if self.slave_type and self.slave_type != 'bridge': + self.module.fail_json(msg="Connection type '%s' cannot be combined with '%s' slave-type. " + "Allowed slave-type for '%s' is 'bridge'." + % (self.type, self.slave_type, self.type) + ) + if not self.slave_type: + self.module.warn("Connection 'slave-type' property automatically set to 'bridge' " + "because of using 'bridge-slave' connection type.") + options.update({'connection.slave-type': 'bridge'}) + self.module.warn( + "Connection type as 'bridge-slave' implies 'ethernet' connection with 'bridge' slave-type. " + "Consider using slave_type='bridge' with necessary type." + ) + options.update({ + 'bridge-port.path-cost': self.path_cost, + 'bridge-port.hairpin-mode': self.hairpin, + 'bridge-port.priority': self.slavepriority, + }) + elif self.type == 'team-slave': + if self.slave_type and self.slave_type != 'team': + self.module.fail_json(msg="Connection type '%s' cannot be combined with '%s' slave-type. " + "Allowed slave-type for '%s' is 'team'." + % (self.type, self.slave_type, self.type) + ) + if not self.slave_type: + self.module.warn("Connection 'slave-type' property automatically set to 'team' " + "because of using 'team-slave' connection type.") + options.update({ + 'connection.slave-type': 'team', + }) + elif self.tunnel_conn_type: + options.update({ + 'ip-tunnel.local': self.ip_tunnel_local, + 'ip-tunnel.mode': self.type, + 'ip-tunnel.parent': self.ip_tunnel_dev, + 'ip-tunnel.remote': self.ip_tunnel_remote, + }) + if self.type == 'gre': + options.update({ + 'ip-tunnel.input-key': self.ip_tunnel_input_key, + 'ip-tunnel.output-key': self.ip_tunnel_output_key + }) + elif self.type == 'vlan': + options.update({ + 'vlan.id': self.vlanid, + 'vlan.parent': self.vlandev, + 'vlan.flags': self.flags, + 'vlan.ingress': self.ingress, + 'vlan.egress': self.egress, + }) + elif self.type == 'vxlan': + options.update({ + 'vxlan.id': self.vxlan_id, + 'vxlan.local': self.vxlan_local, + 'vxlan.remote': self.vxlan_remote, + }) + elif self.type == 'wifi': + options.update({ + '802-11-wireless.ssid': self.ssid, + 'connection.slave-type': ('bond' if self.slave_type is None else self.slave_type) if self.master else None, + }) + if self.wifi: + for name, value in self.wifi.items(): + options.update({ + '802-11-wireless.%s' % name: value + }) + if self.wifi_sec: + for name, value in self.wifi_sec.items(): + options.update({ + '802-11-wireless-security.%s' % name: value + }) + elif self.type == 'gsm': + if self.gsm: + for name, value in self.gsm.items(): + options.update({ + 'gsm.%s' % name: value, + }) + elif self.type == 'macvlan': + if self.macvlan: + for name, value in self.macvlan.items(): + options.update({ + 'macvlan.%s' % name: value, + }) + elif self.state == 'present': + raise NmcliModuleError('type is macvlan but all of the following are missing: macvlan') + elif self.type == 'wireguard': + if self.wireguard: + for name, value in self.wireguard.items(): + options.update({ + 'wireguard.%s' % name: value, + }) + elif self.type == 'vpn': + if self.vpn: + vpn_data_values = '' + for name, value in self.vpn.items(): + if name == 'service-type': + options.update({ + 'vpn.service-type': value, + }) + elif name == 'permissions': + options.update({ + 'connection.permissions': value, + }) + else: + if vpn_data_values != '': + vpn_data_values += ', ' + + if isinstance(value, bool): + value = self.bool_to_string(value) + + vpn_data_values += '%s=%s' % (name, value) + options.update({ + 'vpn.data': vpn_data_values, + }) + elif self.type == 'infiniband': + options.update({ + 'infiniband.transport-mode': self.transport_mode, + }) + if self.infiniband_mac: + options['infiniband.mac-address'] = self.infiniband_mac + elif self.type == 'vrf': + options.update({ + 'table': self.table, + }) + + if self.type == 'ethernet': + if self.sriov: + for name, value in self.sriov.items(): + options.update({ + 'sriov.%s' % name: value, + }) + + # Convert settings values based on the situation. + for setting, value in options.items(): + setting_type = self.settings_type(setting) + convert_func = None + if setting_type is bool: + # Convert all bool options to yes/no. + convert_func = self.bool_to_string + if detect_change: + if setting in ('vlan.id', 'vxlan.id'): + # Convert VLAN/VXLAN IDs to text when detecting changes. + convert_func = to_text + elif setting == self.mtu_setting: + # MTU is 'auto' by default when detecting changes. + convert_func = self.mtu_to_string + elif setting == 'ipv6.ip6-privacy': + convert_func = self.ip6_privacy_to_num + elif setting_type is list: + # Convert lists to strings for nmcli create/modify commands. + convert_func = self.list_to_string + + if callable(convert_func): + options[setting] = convert_func(value) + + return options + + @property + def ip_conn_type(self): + return self.type in ( + 'bond', + 'bridge', + 'dummy', + 'ethernet', + '802-3-ethernet', + 'generic', + 'gre', + 'infiniband', + 'ipip', + 'sit', + 'team', + 'vlan', + 'wifi', + '802-11-wireless', + 'gsm', + 'macvlan', + 'wireguard', + 'vpn', + 'loopback', + 'ovs-interface', + 'vrf' + ) + + @property + def mac_setting(self): + if self.type == 'bridge': + return 'bridge.mac-address' + else: + return '802-3-ethernet.cloned-mac-address' + + @property + def mtu_conn_type(self): + return self.type in ( + 'bond', + 'bond-slave', + 'dummy', + 'ethernet', + 'infiniband', + 'team-slave', + 'vlan', + ) + + @property + def mtu_setting(self): + if self.type == 'infiniband': + return 'infiniband.mtu' + else: + return '802-3-ethernet.mtu' + + @staticmethod + def mtu_to_string(mtu): + if not mtu: + return 'auto' + else: + return to_text(mtu) + + @staticmethod + def ip6_privacy_to_num(privacy): + ip6_privacy_values = { + 'disabled': '0', + 'prefer-public-addr': '1 (enabled, prefer public IP)', + 'prefer-temp-addr': '2 (enabled, prefer temporary IP)', + 'unknown': '-1', + } + + if privacy is None: + return None + + if privacy not in ip6_privacy_values: + raise AssertionError('{privacy} is invalid ip_privacy6 option'.format(privacy=privacy)) + + return ip6_privacy_values[privacy] + + @property + def slave_conn_type(self): + return self.type in ( + 'ethernet', + 'bridge', + 'bond', + 'vlan', + 'team', + 'wifi', + 'bond-slave', + 'bridge-slave', + 'team-slave', + 'wifi', + 'infiniband', + 'ovs-port', + 'ovs-interface', + ) + + @property + def tunnel_conn_type(self): + return self.type in ( + 'gre', + 'ipip', + 'sit', + ) + + @staticmethod + def enforce_ipv4_cidr_notation(ip4_addresses): + if ip4_addresses is None: + return None + return [address if '/' in address else address + '/32' for address in ip4_addresses] + + @staticmethod + def enforce_ipv6_cidr_notation(ip6_addresses): + if ip6_addresses is None: + return None + return [address if '/' in address else address + '/128' for address in ip6_addresses] + + def enforce_routes_format(self, routes, routes_extended): + if routes is not None: + return routes + elif routes_extended is not None: + return [self.route_to_string(route) for route in routes_extended] + else: + return None + + @staticmethod + def route_to_string(route): + result_str = '' + result_str += route['ip'] + if route.get('next_hop') is not None: + result_str += ' ' + route['next_hop'] + if route.get('metric') is not None: + result_str += ' ' + str(route['metric']) + + for attribute, value in sorted(route.items()): + if attribute not in ('ip', 'next_hop', 'metric') and value is not None: + result_str += ' {0}={1}'.format(attribute, str(value).lower()) + + return result_str + + @staticmethod + def bool_to_string(boolean): + if boolean: + return "yes" + else: + return "no" + + @staticmethod + def list_to_string(lst): + if lst is None: + return None + else: + return ",".join(lst) + + @staticmethod + def settings_type(setting): + if setting in {'bridge.stp', + 'bridge-port.hairpin-mode', + 'connection.autoconnect', + 'ipv4.never-default', + 'ipv4.ignore-auto-dns', + 'ipv4.ignore-auto-routes', + 'ipv4.may-fail', + 'ipv6.ignore-auto-dns', + 'ipv6.ignore-auto-routes', + '802-11-wireless.hidden', + 'team.runner-fast-rate'}: + return bool + elif setting in {'ipv4.addresses', + 'ipv6.addresses', + 'ipv4.dns', + 'ipv4.dns-search', + 'ipv4.dns-options', + 'ipv4.routes', + 'ipv4.routing-rules', + 'ipv6.dns', + 'ipv6.dns-search', + 'ipv6.dns-options', + 'ipv6.routes', + '802-11-wireless-security.group', + '802-11-wireless-security.leap-password-flags', + '802-11-wireless-security.pairwise', + '802-11-wireless-security.proto', + '802-11-wireless-security.psk-flags', + '802-11-wireless-security.wep-key-flags', + '802-11-wireless.mac-address-blacklist'}: + return list + elif setting in {'connection.autoconnect-priority', + 'connection.autoconnect-retries'}: + return int + return str + + def get_route_params(self, raw_values): + routes_params = [] + for raw_value in raw_values: + route_params = {} + for parameter, value in re.findall(r'([\w-]*)\s?=\s?([^\s,}]*)', raw_value): + if parameter == 'nh': + route_params['next_hop'] = value + elif parameter == 'mt': + route_params['metric'] = value + else: + route_params[parameter] = value + routes_params.append(route_params) + return [self.route_to_string(route_params) for route_params in routes_params] + + def list_connection_info(self): + cmd = [self.nmcli_bin, '--fields', 'name', '--terse', 'con', 'show'] + (rc, out, err) = self.execute_command(cmd) + if rc != 0: + raise NmcliModuleError(err) + return out.splitlines() + + def connection_exists(self): + return self.conn_name in self.list_connection_info() + + def down_connection(self): + cmd = [self.nmcli_bin, 'con', 'down', self.conn_name] + return self.execute_command(cmd) + + def up_connection(self): + cmd = [self.nmcli_bin, 'con', 'up', self.conn_name] + return self.execute_command(cmd) + + def reload_connection(self): + cmd = [self.nmcli_bin, 'con', 'reload'] + return self.execute_command(cmd) + + def connection_update(self, nmcli_command): + if nmcli_command == 'create': + cmd = [self.nmcli_bin, 'con', 'add', 'type'] + if self.tunnel_conn_type: + cmd.append('ip-tunnel') + else: + cmd.append(self.type) + cmd.append('con-name') + elif nmcli_command == 'modify': + cmd = [self.nmcli_bin, 'con', 'modify'] + else: + self.module.fail_json(msg="Invalid nmcli command.") + cmd.append(self.conn_name) + + # Use connection name as default for interface name on creation. + if nmcli_command == 'create' and self.ifname is None: + ifname = self.conn_name + else: + ifname = self.ifname + + options = { + 'connection.interface-name': ifname, + } + + # VPN doesn't need an interface but if sended it must be a valid interface. + if self.type == 'vpn' and self.ifname is None: + del options['connection.interface-name'] + + options.update(self.connection_options()) + + # Constructing the command. + for key, value in options.items(): + if value is not None: + if key in self.SECRET_OPTIONS: + self.edit_commands += ['set %s %s' % (key, value)] + continue + if key == 'xmit_hash_policy': + cmd.extend(['+bond.options', 'xmit_hash_policy=%s' % value]) + continue + if key == 'fail_over_mac': + cmd.extend(['+bond.options', 'fail_over_mac=%s' % value]) + continue + cmd.extend([key, value]) + + return self.execute_command(cmd) + + def create_connection(self): + status = self.connection_update('create') + if status[0] == 0 and self.edit_commands: + status = self.edit_connection() + if self.create_connection_up: + status = self.up_connection() + return status + + @property + def create_connection_up(self): + if self.type in ('bond', 'dummy', 'ethernet', 'infiniband', 'wifi'): + if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): + return True + elif self.type == 'team': + if (self.dns4 is not None) or (self.dns6 is not None): + return True + return False + + def remove_connection(self): + # self.down_connection() + cmd = [self.nmcli_bin, 'con', 'del', self.conn_name] + return self.execute_command(cmd) + + def modify_connection(self): + status = self.connection_update('modify') + if status[0] == 0 and self.edit_commands: + status = self.edit_connection() + return status + + def edit_connection(self): + commands = self.edit_commands + ['save', 'quit'] + return self.execute_edit_commands(commands, arguments=[self.conn_name]) + + def show_connection(self): + cmd = [self.nmcli_bin, '--show-secrets', 'con', 'show', self.conn_name] + + (rc, out, err) = self.execute_command(cmd) + + if rc != 0: + raise NmcliModuleError(err) + + p_enum_value = re.compile(r'^([-]?\d+) \((\w+)\)$') + + conn_info = dict() + for line in out.splitlines(): + pair = line.split(':', 1) + key = pair[0].strip() + key_type = self.settings_type(key) + if key and len(pair) > 1: + raw_value = pair[1].lstrip() + if raw_value == '--': + if key_type == list: + conn_info[key] = [] + else: + conn_info[key] = None + elif key == 'bond.options': + # Aliases such as 'miimon', 'downdelay' are equivalent to the +bond.options 'option=value' syntax. + opts = raw_value.split(',') + for opt in opts: + alias_pair = opt.split('=', 1) + if len(alias_pair) > 1: + alias_key = alias_pair[0] + alias_value = alias_pair[1] + conn_info[alias_key] = alias_value + elif key in ('ipv4.routes', 'ipv6.routes'): + conn_info[key] = [s.strip() for s in raw_value.split(';')] + elif key_type == list: + conn_info[key] = [s.strip() for s in raw_value.split(',')] + else: + m_enum = p_enum_value.match(raw_value) + if m_enum is not None: + value = m_enum.group(1) + else: + value = raw_value + conn_info[key] = value + + return conn_info + + def get_supported_properties(self, setting): + properties = [] + + if setting == '802-11-wireless-security': + set_property = 'psk' + set_value = 'FAKEVALUE' + commands = ['set %s.%s %s' % (setting, set_property, set_value)] + else: + commands = [] + + commands += ['print %s' % setting, 'quit', 'yes'] + + (rc, out, err) = self.execute_edit_commands(commands, arguments=['type', self.type]) + + if rc != 0: + raise NmcliModuleError(err) + + for line in out.splitlines(): + prefix = '%s.' % setting + if line.startswith(prefix): + pair = line.split(':', 1) + property = pair[0].strip().replace(prefix, '') + properties.append(property) + + return properties + + def check_for_unsupported_properties(self, setting): + if setting == '802-11-wireless': + setting_key = 'wifi' + elif setting == '802-11-wireless-security': + setting_key = 'wifi_sec' + else: + setting_key = setting + + supported_properties = self.get_supported_properties(setting) + unsupported_properties = [] + + for property, value in getattr(self, setting_key).items(): + if property not in supported_properties: + unsupported_properties.append(property) + + if unsupported_properties: + msg_options = [] + for property in unsupported_properties: + msg_options.append('%s.%s' % (setting_key, property)) + + msg = 'Invalid or unsupported option(s): "%s"' % '", "'.join(msg_options) + if self.ignore_unsupported_suboptions: + self.module.warn(msg) + else: + self.module.fail_json(msg=msg) + + return unsupported_properties + + def _compare_conn_params(self, conn_info, options): + changed = False + diff_before = dict() + diff_after = dict() + + for key, value in options.items(): + # We can't just do `if not value` because then if there's a value + # of 0 specified as an integer it'll be interpreted as empty when + # it actually isn't. + if value not in (0, []) and not value: + continue + + if key in conn_info: + current_value = conn_info[key] + if key == '802-11-wireless.wake-on-wlan' and current_value is not None: + match = re.match('0x([0-9A-Fa-f]+)', current_value) + if match: + current_value = str(int(match.group(1), 16)) + if key in ('ipv4.routes', 'ipv6.routes') and current_value is not None: + current_value = self.get_route_params(current_value) + if key == self.mac_setting: + # MAC addresses are case insensitive, nmcli always reports them in uppercase + value = value.upper() + # ensure current_value is also converted to uppercase in case nmcli changes behaviour + if current_value: + current_value = current_value.upper() + if key == 'gsm.apn': + # Depending on version nmcli adds double-qoutes to gsm.apn + # Need to strip them in order to compare both + if current_value: + current_value = current_value.strip('"') + if key == self.mtu_setting and self.mtu is None: + self.mtu = 0 + if key == 'vpn.data': + if current_value: + current_value = sorted(re.sub(r'\s*=\s*', '=', part.strip(), count=1) for part in current_value.split(',')) + value = sorted(part.strip() for part in value.split(',')) + else: + # parameter does not exist + current_value = None + + if isinstance(current_value, list) and isinstance(value, list): + # compare values between two lists + if key in ('ipv4.addresses', 'ipv6.addresses', 'ipv4.dns', 'ipv6.dns', 'ipv4.dns-search', 'ipv6.dns-search'): + # The order of IP addresses matters because the first one + # is the default source address for outbound connections. + # Similarly, the order of DNS nameservers and search + # suffixes is important. + changed |= current_value != value + else: + changed |= sorted(current_value) != sorted(value) + elif all([key == self.mtu_setting, self.type == 'dummy', current_value is None, value == 'auto', self.mtu is None]): + value = None + else: + value = to_text(value) + if current_value != value: + changed = True + + diff_before[key] = current_value + diff_after[key] = value + + diff = { + 'before': diff_before, + 'after': diff_after, + } + return (changed, diff) + + def is_connection_changed(self): + options = { + 'connection.interface-name': self.ifname, + } + + # VPN doesn't need an interface but if sended it must be a valid interface. + if self.type == 'vpn' and self.ifname is None: + del options['connection.interface-name'] + + if not self.type: + current_con_type = self.show_connection().get('connection.type') + if current_con_type: + if current_con_type == '802-11-wireless': + current_con_type = 'wifi' + self.type = current_con_type + + options.update(self.connection_options(detect_change=True)) + return self._compare_conn_params(self.show_connection(), options) + + +def main(): + # Parsing argument file + module = AnsibleModule( + argument_spec=dict( + ignore_unsupported_suboptions=dict(type='bool', default=False), + autoconnect=dict(type='bool', default=True), + autoconnect_priority=dict(type='int'), + autoconnect_retries=dict(type='int'), + state=dict(type='str', required=True, choices=['absent', 'present', 'up', 'down']), + conn_name=dict(type='str', required=True), + conn_reload=dict(type='bool', default=False), + master=dict(type='str'), + slave_type=dict(type='str', choices=['bond', 'bridge', 'team', 'ovs-port', 'vrf']), + ifname=dict(type='str'), + type=dict(type='str', + choices=[ + 'bond', + 'bond-slave', + 'bridge', + 'bridge-slave', + 'dummy', + 'ethernet', + 'generic', + 'gre', + 'infiniband', + 'ipip', + 'sit', + 'team', + 'team-slave', + 'vlan', + 'vxlan', + 'wifi', + 'gsm', + 'macvlan', + 'wireguard', + 'vpn', + 'loopback', + 'ovs-interface', + 'ovs-bridge', + 'ovs-port', + 'vrf', + ]), + ip4=dict(type='list', elements='str'), + gw4=dict(type='str'), + gw4_ignore_auto=dict(type='bool', default=False), + routes4=dict(type='list', elements='str'), + routes4_extended=dict(type='list', + elements='dict', + options=dict( + ip=dict(type='str', required=True), + next_hop=dict(type='str'), + metric=dict(type='int'), + table=dict(type='int'), + tos=dict(type='int'), + cwnd=dict(type='int'), + mtu=dict(type='int'), + onlink=dict(type='bool') + )), + route_metric4=dict(type='int'), + routing_rules4=dict(type='list', elements='str'), + never_default4=dict(type='bool', default=False), + dns4=dict(type='list', elements='str'), + dns4_search=dict(type='list', elements='str'), + dns4_options=dict(type='list', elements='str'), + dns4_ignore_auto=dict(type='bool', default=False), + method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']), + may_fail4=dict(type='bool', default=True), + dhcp_client_id=dict(type='str'), + ip6=dict(type='list', elements='str'), + gw6=dict(type='str'), + gw6_ignore_auto=dict(type='bool', default=False), + dns6=dict(type='list', elements='str'), + dns6_search=dict(type='list', elements='str'), + dns6_options=dict(type='list', elements='str'), + dns6_ignore_auto=dict(type='bool', default=False), + routes6=dict(type='list', elements='str'), + routes6_extended=dict(type='list', + elements='dict', + options=dict( + ip=dict(type='str', required=True), + next_hop=dict(type='str'), + metric=dict(type='int'), + table=dict(type='int'), + cwnd=dict(type='int'), + mtu=dict(type='int'), + onlink=dict(type='bool') + )), + route_metric6=dict(type='int'), + method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared', 'disabled']), + ip_privacy6=dict(type='str', choices=['disabled', 'prefer-public-addr', 'prefer-temp-addr', 'unknown']), + addr_gen_mode6=dict(type='str', choices=['default', 'default-or-eui64', 'eui64', 'stable-privacy']), + # Bond Specific vars + mode=dict(type='str', default='balance-rr', + choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']), + miimon=dict(type='int'), + downdelay=dict(type='int'), + updelay=dict(type='int'), + xmit_hash_policy=dict(type='str'), + fail_over_mac=dict(type='str', choices=['none', 'active', 'follow']), + arp_interval=dict(type='int'), + arp_ip_target=dict(type='str'), + primary=dict(type='str'), + # general usage + mtu=dict(type='int'), + mac=dict(type='str'), + zone=dict(type='str'), + # bridge specific vars + stp=dict(type='bool', default=True), + priority=dict(type='int', default=128), + slavepriority=dict(type='int', default=32), + forwarddelay=dict(type='int', default=15), + hellotime=dict(type='int', default=2), + maxage=dict(type='int', default=20), + ageingtime=dict(type='int', default=300), + hairpin=dict(type='bool', default=False), + path_cost=dict(type='int', default=100), + # team specific vars + runner=dict(type='str', default='roundrobin', + choices=['broadcast', 'roundrobin', 'activebackup', 'loadbalance', 'lacp']), + # team active-backup runner specific options + runner_hwaddr_policy=dict(type='str', choices=['same_all', 'by_active', 'only_active']), + # team lacp runner specific options + runner_fast_rate=dict(type='bool'), + # vlan specific vars + vlanid=dict(type='int'), + vlandev=dict(type='str'), + flags=dict(type='str'), + ingress=dict(type='str'), + egress=dict(type='str'), + # vxlan specific vars + vxlan_id=dict(type='int'), + vxlan_local=dict(type='str'), + vxlan_remote=dict(type='str'), + # ip-tunnel specific vars + ip_tunnel_dev=dict(type='str'), + ip_tunnel_local=dict(type='str'), + ip_tunnel_remote=dict(type='str'), + # ip-tunnel type gre specific vars + ip_tunnel_input_key=dict(type='str', no_log=True), + ip_tunnel_output_key=dict(type='str', no_log=True), + # 802-11-wireless* specific vars + ssid=dict(type='str'), + wifi=dict(type='dict'), + wifi_sec=dict(type='dict', no_log=True), + gsm=dict(type='dict'), + macvlan=dict(type='dict', options=dict( + mode=dict(type='int', choices=[1, 2, 3, 4, 5], required=True), + parent=dict(type='str', required=True), + promiscuous=dict(type='bool'), + tap=dict(type='bool'))), + wireguard=dict(type='dict'), + vpn=dict(type='dict'), + sriov=dict(type='dict'), + table=dict(type='int'), + # infiniband specific vars + transport_mode=dict(type='str', choices=['datagram', 'connected']), + infiniband_mac=dict(type='str'), + + ), + mutually_exclusive=[['never_default4', 'gw4'], + ['routes4_extended', 'routes4'], + ['routes6_extended', 'routes6']], + required_if=[ + ("type", "wifi", ["ssid"]), + ("type", "team-slave", ["master", "ifname"]), + ("slave_type", "team", ["master", "ifname"]), + ], + supports_check_mode=True, + ) + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + nmcli = Nmcli(module) + + (rc, out, err) = (None, '', '') + result = {'conn_name': nmcli.conn_name, 'state': nmcli.state} + + # team checks + if nmcli.type == "team": + if nmcli.runner_hwaddr_policy and not nmcli.runner == "activebackup": + nmcli.module.fail_json(msg="Runner-hwaddr-policy is only allowed for runner activebackup") + if nmcli.runner_fast_rate is not None and nmcli.runner != "lacp": + nmcli.module.fail_json(msg="runner-fast-rate is only allowed for runner lacp") + if nmcli.type == 'wifi': + unsupported_properties = {} + if nmcli.wifi: + if 'ssid' in nmcli.wifi: + module.warn("Ignoring option 'wifi.ssid', it must be specified with option 'ssid'") + del nmcli.wifi['ssid'] + unsupported_properties['wifi'] = nmcli.check_for_unsupported_properties('802-11-wireless') + if nmcli.wifi_sec: + unsupported_properties['wifi_sec'] = nmcli.check_for_unsupported_properties('802-11-wireless-security') + if nmcli.ignore_unsupported_suboptions and unsupported_properties: + for setting_key, properties in unsupported_properties.items(): + for property in properties: + del getattr(nmcli, setting_key)[property] + + try: + if nmcli.state == 'absent': + if nmcli.connection_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = nmcli.down_connection() + (rc, out, err) = nmcli.remove_connection() + if rc != 0: + module.fail_json(name=('Error removing connection named %s' % nmcli.conn_name), msg=err, rc=rc) + + elif nmcli.state == 'present': + if nmcli.connection_exists(): + changed, diff = nmcli.is_connection_changed() + if module._diff: + result['diff'] = diff + + if changed: + # modify connection (note: this function is check mode aware) + # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type)) + result['Exists'] = 'Connections do exist so we are modifying them' + if module.check_mode: + module.exit_json(changed=True, **result) + (rc, out, err) = nmcli.modify_connection() + if nmcli.conn_reload: + (rc, out, err) = nmcli.reload_connection() + else: + result['Exists'] = 'Connections already exist and no changes made' + if module.check_mode: + module.exit_json(changed=False, **result) + if not nmcli.connection_exists(): + result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type)) + if module.check_mode: + module.exit_json(changed=True, **result) + (rc, out, err) = nmcli.create_connection() + if rc is not None and rc != 0: + module.fail_json(name=nmcli.conn_name, msg=err, rc=rc) + + elif nmcli.state == 'up': + if nmcli.connection_exists(): + if module.check_mode: + module.exit_json(changed=True) + if nmcli.conn_reload: + (rc, out, err) = nmcli.reload_connection() + (rc, out, err) = nmcli.up_connection() + if rc != 0: + module.fail_json(name=('Error bringing up connection named %s' % nmcli.conn_name), msg=err, rc=rc) + + elif nmcli.state == 'down': + if nmcli.connection_exists(): + if module.check_mode: + module.exit_json(changed=True) + if nmcli.conn_reload: + (rc, out, err) = nmcli.reload_connection() + (rc, out, err) = nmcli.down_connection() + if rc != 0: + module.fail_json(name=('Error bringing down connection named %s' % nmcli.conn_name), msg=err, rc=rc) + + except NmcliModuleError as e: + module.fail_json(name=nmcli.conn_name, msg=str(e)) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/clustering/nomad/nomad_job.py b/plugins/modules/nomad_job.py similarity index 82% rename from plugins/modules/clustering/nomad/nomad_job.py rename to plugins/modules/nomad_job.py index 92081dfabd..d5ecec3107 100644 --- a/plugins/modules/clustering/nomad/nomad_job.py +++ b/plugins/modules/nomad_job.py @@ -1,64 +1,66 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2020, FERREIRA Christophe -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, FERREIRA Christophe +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: nomad_job author: FERREIRA Christophe (@chris93111) version_added: "1.3.0" short_description: Launch a Nomad Job description: - - Launch a Nomad job. - - Stop a Nomad job. - - Force start a Nomad job + - Launch a Nomad job. + - Stop a Nomad job. + - Force start a Nomad job. requirements: - python-nomad extends_documentation_fragment: - community.general.nomad + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of job for delete, stop and start job without source. - - Name of job for delete, stop and start job without source. - - Either this or I(content) must be specified. - type: str - state: - description: - - Deploy or remove job. - choices: ["present", "absent"] - required: true - type: str - force_start: - description: - - Force job to started. - type: bool - default: false - content: - description: - - Content of Nomad job. - - Either this or I(name) must be specified. - type: str - content_format: - description: - - Type of content of Nomad job. - choices: ["hcl", "json"] - default: hcl - type: str -notes: - - C(check_mode) is supported. + name: + description: + - Name of job for delete, stop and start job without source. + - Name of job for delete, stop and start job without source. + - Either this or O(content) must be specified. + type: str + state: + description: + - Deploy or remove job. + choices: ["present", "absent"] + required: true + type: str + force_start: + description: + - Force job to started. + type: bool + default: false + content: + description: + - Content of Nomad job. + - Either this or O(name) must be specified. + type: str + content_format: + description: + - Type of content of Nomad job. + choices: ["hcl", "json"] + default: hcl + type: str seealso: - name: Nomad jobs documentation description: Complete documentation for Nomad API jobs. link: https://www.nomadproject.io/api-docs/jobs/ -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create job community.general.nomad_job: host: localhost @@ -66,6 +68,14 @@ EXAMPLES = ''' content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}" timeout: 120 +- name: Connect with port to create job + community.general.nomad_job: + host: localhost + port: 4645 + state: present + content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}" + timeout: 120 + - name: Stop job community.general.nomad_job: host: localhost @@ -79,7 +89,7 @@ EXAMPLES = ''' name: api timeout: 120 force_start: true -''' +""" import json @@ -98,6 +108,7 @@ def run(): module = AnsibleModule( argument_spec=dict( host=dict(required=True, type='str'), + port=dict(type='int', default=4646), state=dict(required=True, choices=['present', 'absent']), use_ssl=dict(type='bool', default=True), timeout=dict(type='int', default=5), @@ -127,6 +138,7 @@ def run(): nomad_client = nomad.Nomad( host=module.params.get('host'), + port=module.params.get('port'), secure=module.params.get('use_ssl'), timeout=module.params.get('timeout'), verify=module.params.get('validate_certs'), diff --git a/plugins/modules/nomad_job_info.py b/plugins/modules/nomad_job_info.py new file mode 100644 index 0000000000..98cec59746 --- /dev/null +++ b/plugins/modules/nomad_job_info.py @@ -0,0 +1,339 @@ +#!/usr/bin/python + +# Copyright (c) 2020, FERREIRA Christophe +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: nomad_job_info +author: FERREIRA Christophe (@chris93111) +version_added: "1.3.0" +short_description: Get Nomad Jobs info +description: + - Get info for one Nomad job. + - List Nomad jobs. +requirements: + - python-nomad +extends_documentation_fragment: + - community.general.nomad + - community.general.attributes + - community.general.attributes.info_module +options: + name: + description: + - Name of job for Get info. + - If not specified, lists all jobs. + type: str +seealso: + - name: Nomad jobs documentation + description: Complete documentation for Nomad API jobs. + link: https://www.nomadproject.io/api-docs/jobs/ +""" + +EXAMPLES = r""" +- name: Get info for job awx + community.general.nomad_job_info: + host: localhost + name: awx + register: result + +- name: List Nomad jobs + community.general.nomad_job_info: + host: localhost + register: result +""" + +RETURN = r""" +result: + description: List with dictionary contains jobs info. + returned: success + type: list + sample: + [ + { + "Affinities": null, + "AllAtOnce": false, + "Constraints": null, + "ConsulToken": "", + "CreateIndex": 13, + "Datacenters": [ + "dc1" + ], + "Dispatched": false, + "ID": "example", + "JobModifyIndex": 13, + "Meta": null, + "ModifyIndex": 13, + "Multiregion": null, + "Name": "example", + "Namespace": "default", + "NomadTokenID": "", + "ParameterizedJob": null, + "ParentID": "", + "Payload": null, + "Periodic": null, + "Priority": 50, + "Region": "global", + "Spreads": null, + "Stable": false, + "Status": "pending", + "StatusDescription": "", + "Stop": false, + "SubmitTime": 1602244370615307000, + "TaskGroups": [ + { + "Affinities": null, + "Constraints": null, + "Count": 1, + "EphemeralDisk": { + "Migrate": false, + "SizeMB": 300, + "Sticky": false + }, + "Meta": null, + "Migrate": { + "HealthCheck": "checks", + "HealthyDeadline": 300000000000, + "MaxParallel": 1, + "MinHealthyTime": 10000000000 + }, + "Name": "cache", + "Networks": null, + "ReschedulePolicy": { + "Attempts": 0, + "Delay": 30000000000, + "DelayFunction": "exponential", + "Interval": 0, + "MaxDelay": 3600000000000, + "Unlimited": true + }, + "RestartPolicy": { + "Attempts": 3, + "Delay": 15000000000, + "Interval": 1800000000000, + "Mode": "fail" + }, + "Scaling": null, + "Services": null, + "ShutdownDelay": null, + "Spreads": null, + "StopAfterClientDisconnect": null, + "Tasks": [ + { + "Affinities": null, + "Artifacts": null, + "CSIPluginConfig": null, + "Config": { + "image": "redis:3.2", + "port_map": [ + { + "db": 6379.0 + } + ] + }, + "Constraints": null, + "DispatchPayload": null, + "Driver": "docker", + "Env": null, + "KillSignal": "", + "KillTimeout": 5000000000, + "Kind": "", + "Leader": false, + "Lifecycle": null, + "LogConfig": { + "MaxFileSizeMB": 10, + "MaxFiles": 10 + }, + "Meta": null, + "Name": "redis", + "Resources": { + "CPU": 500, + "Devices": null, + "DiskMB": 0, + "IOPS": 0, + "MemoryMB": 256, + "Networks": [ + { + "CIDR": "", + "DNS": null, + "Device": "", + "DynamicPorts": [ + { + "HostNetwork": "default", + "Label": "db", + "To": 0, + "Value": 0 + } + ], + "IP": "", + "MBits": 10, + "Mode": "", + "ReservedPorts": null + } + ] + }, + "RestartPolicy": { + "Attempts": 3, + "Delay": 15000000000, + "Interval": 1800000000000, + "Mode": "fail" + }, + "Services": [ + { + "AddressMode": "auto", + "CanaryMeta": null, + "CanaryTags": null, + "Checks": [ + { + "AddressMode": "", + "Args": null, + "CheckRestart": null, + "Command": "", + "Expose": false, + "FailuresBeforeCritical": 0, + "GRPCService": "", + "GRPCUseTLS": false, + "Header": null, + "InitialStatus": "", + "Interval": 10000000000, + "Method": "", + "Name": "alive", + "Path": "", + "PortLabel": "", + "Protocol": "", + "SuccessBeforePassing": 0, + "TLSSkipVerify": false, + "TaskName": "", + "Timeout": 2000000000, + "Type": "tcp" + } + ], + "Connect": null, + "EnableTagOverride": false, + "Meta": null, + "Name": "redis-cache", + "PortLabel": "db", + "Tags": [ + "global", + "cache" + ], + "TaskName": "" + } + ], + "ShutdownDelay": 0, + "Templates": null, + "User": "", + "Vault": null, + "VolumeMounts": null + } + ], + "Update": { + "AutoPromote": false, + "AutoRevert": false, + "Canary": 0, + "HealthCheck": "checks", + "HealthyDeadline": 180000000000, + "MaxParallel": 1, + "MinHealthyTime": 10000000000, + "ProgressDeadline": 600000000000, + "Stagger": 30000000000 + }, + "Volumes": null + } + ], + "Type": "service", + "Update": { + "AutoPromote": false, + "AutoRevert": false, + "Canary": 0, + "HealthCheck": "", + "HealthyDeadline": 0, + "MaxParallel": 1, + "MinHealthyTime": 0, + "ProgressDeadline": 0, + "Stagger": 30000000000 + }, + "VaultNamespace": "", + "VaultToken": "", + "Version": 0 + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + +import_nomad = None +try: + import nomad + import_nomad = True +except ImportError: + import_nomad = False + + +def run(): + module = AnsibleModule( + argument_spec=dict( + host=dict(required=True, type='str'), + port=dict(type='int', default=4646), + use_ssl=dict(type='bool', default=True), + timeout=dict(type='int', default=5), + validate_certs=dict(type='bool', default=True), + client_cert=dict(type='path'), + client_key=dict(type='path'), + namespace=dict(type='str'), + name=dict(type='str'), + token=dict(type='str', no_log=True) + ), + supports_check_mode=True + ) + + if not import_nomad: + module.fail_json(msg=missing_required_lib("python-nomad")) + + certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key')) + + nomad_client = nomad.Nomad( + host=module.params.get('host'), + port=module.params.get('port'), + secure=module.params.get('use_ssl'), + timeout=module.params.get('timeout'), + verify=module.params.get('validate_certs'), + cert=certificate_ssl, + namespace=module.params.get('namespace'), + token=module.params.get('token') + ) + + changed = False + result = list() + try: + job_list = nomad_client.jobs.get_jobs() + for job in job_list: + result.append(nomad_client.job.get_job(job.get('ID'))) + except Exception as e: + module.fail_json(msg=to_native(e)) + + if module.params.get('name'): + filter = list() + try: + for job in result: + if job.get('ID') == module.params.get('name'): + filter.append(job) + result = filter + if not filter: + module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name'))) + except Exception as e: + module.fail_json(msg=to_native(e)) + + module.exit_json(changed=changed, result=result) + + +def main(): + + run() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/nomad_token.py b/plugins/modules/nomad_token.py new file mode 100644 index 0000000000..8484334f91 --- /dev/null +++ b/plugins/modules/nomad_token.py @@ -0,0 +1,299 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Pedro Nascimento +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: nomad_token +author: Pedro Nascimento (@apecnascimento) +version_added: "8.1.0" +short_description: Manage Nomad ACL tokens +description: + - This module allows to create Bootstrap tokens, create ACL tokens, update ACL tokens, and delete ACL tokens. +requirements: + - python-nomad +extends_documentation_fragment: + - community.general.nomad + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of ACL token to create. + type: str + token_type: + description: + - The type of the token can be V(client), V(management), or V(bootstrap). + choices: ["client", "management", "bootstrap"] + type: str + default: "client" + policies: + description: + - A list of the policies assigned to the token. + type: list + elements: str + default: [] + global_replicated: + description: + - Indicates whether or not the token was created with the C(--global). + type: bool + default: false + state: + description: + - Create or remove ACL token. + choices: ["present", "absent"] + required: true + type: str + +seealso: + - name: Nomad ACL documentation + description: Complete documentation for Nomad API ACL. + link: https://developer.hashicorp.com/nomad/api-docs/acl/tokens +""" + +EXAMPLES = r""" +- name: Create boostrap token + community.general.nomad_token: + host: localhost + token_type: bootstrap + state: present + +- name: Create ACL token + community.general.nomad_token: + host: localhost + name: "Dev token" + token_type: client + policies: + - readonly + global_replicated: false + state: absent + +- name: Update ACL token Dev token + community.general.nomad_token: + host: localhost + name: "Dev token" + token_type: client + policies: + - readonly + - devpolicy + global_replicated: false + state: absent + +- name: Delete ACL token + community.general.nomad_token: + host: localhost + name: "Dev token" + state: absent +""" + +RETURN = r""" +result: + description: Result returned by nomad. + returned: always + type: dict + sample: + { + "accessor_id": "0d01c55f-8d63-f832-04ff-1866d4eb594e", + "create_index": 14, + "create_time": "2023-11-12T18:48:34.248857001Z", + "expiration_time": null, + "expiration_ttl": "", + "global": true, + "hash": "eSn8H8RVqh8As8WQNnC2vlBRqXy6DECogc5umzX0P30=", + "modify_index": 836, + "name": "devs", + "policies": [ + "readonly" + ], + "roles": null, + "secret_id": "12e878ab-e1f6-e103-b4c4-3b5173bb4cea", + "type": "client" + } +""" + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + +import_nomad = None + +try: + import nomad + + import_nomad = True +except ImportError: + import_nomad = False + + +def get_token(name, nomad_client): + tokens = nomad_client.acl.get_tokens() + token = next((token for token in tokens + if token.get('Name') == name), None) + return token + + +def transform_response(nomad_response): + transformed_response = { + "accessor_id": nomad_response['AccessorID'], + "create_index": nomad_response['CreateIndex'], + "create_time": nomad_response['CreateTime'], + "expiration_ttl": nomad_response['ExpirationTTL'], + "expiration_time": nomad_response['ExpirationTime'], + "global": nomad_response['Global'], + "hash": nomad_response['Hash'], + "modify_index": nomad_response['ModifyIndex'], + "name": nomad_response['Name'], + "policies": nomad_response['Policies'], + "roles": nomad_response['Roles'], + "secret_id": nomad_response['SecretID'], + "type": nomad_response['Type'] + } + + return transformed_response + + +argument_spec = dict( + host=dict(required=True, type='str'), + port=dict(type='int', default=4646), + state=dict(required=True, choices=['present', 'absent']), + use_ssl=dict(type='bool', default=True), + timeout=dict(type='int', default=5), + validate_certs=dict(type='bool', default=True), + client_cert=dict(type='path'), + client_key=dict(type='path'), + namespace=dict(type='str'), + token=dict(type='str', no_log=True), + name=dict(type='str'), + token_type=dict(choices=['client', 'management', 'bootstrap'], default='client'), + policies=dict(type='list', elements='str', default=[]), + global_replicated=dict(type='bool', default=False), +) + + +def setup_module_object(): + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False, + required_one_of=[ + ['name', 'token_type'] + ], + required_if=[ + ('token_type', 'client', ('name',)), + ('token_type', 'management', ('name',)), + ], + ) + return module + + +def setup_nomad_client(module): + if not import_nomad: + module.fail_json(msg=missing_required_lib("python-nomad")) + + certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key')) + + nomad_client = nomad.Nomad( + host=module.params.get('host'), + port=module.params.get('port'), + secure=module.params.get('use_ssl'), + timeout=module.params.get('timeout'), + verify=module.params.get('validate_certs'), + cert=certificate_ssl, + namespace=module.params.get('namespace'), + token=module.params.get('token') + ) + + return nomad_client + + +def run(module): + nomad_client = setup_nomad_client(module) + + msg = "" + result = {} + changed = False + if module.params.get('state') == "present": + + if module.params.get('token_type') == 'bootstrap': + try: + current_token = get_token('Bootstrap Token', nomad_client) + if current_token: + msg = "ACL bootstrap already exist." + else: + nomad_result = nomad_client.acl.generate_bootstrap() + msg = "Boostrap token created." + result = transform_response(nomad_result) + changed = True + + except nomad.api.exceptions.URLNotAuthorizedNomadException: + try: + nomad_result = nomad_client.acl.generate_bootstrap() + msg = "Boostrap token created." + result = transform_response(nomad_result) + changed = True + + except Exception as e: + module.fail_json(msg=to_native(e)) + else: + try: + token_info = { + "Name": module.params.get('name'), + "Type": module.params.get('token_type'), + "Policies": module.params.get('policies'), + "Global": module.params.get('global_replicated') + } + + current_token = get_token(token_info['Name'], nomad_client) + + if current_token: + token_info['AccessorID'] = current_token['AccessorID'] + nomad_result = nomad_client.acl.update_token(current_token['AccessorID'], token_info) + msg = "ACL token updated." + result = transform_response(nomad_result) + changed = True + + else: + nomad_result = nomad_client.acl.create_token(token_info) + msg = "ACL token Created." + result = transform_response(nomad_result) + changed = True + + except Exception as e: + module.fail_json(msg=to_native(e)) + + if module.params.get('state') == "absent": + + if not module.params.get('name'): + module.fail_json(msg="name is needed to delete token.") + + if module.params.get('token_type') == 'bootstrap' or module.params.get('name') == 'Bootstrap Token': + module.fail_json(msg="Delete ACL bootstrap token is not allowed.") + + try: + token = get_token(module.params.get('name'), nomad_client) + if token: + nomad_client.acl.delete_token(token.get('AccessorID')) + msg = 'ACL token deleted.' + changed = True + else: + msg = "No token with name '{0}' found".format(module.params.get('name')) + + except Exception as e: + module.fail_json(msg=to_native(e)) + + module.exit_json(changed=changed, msg=msg, result=result) + + +def main(): + module = setup_module_object() + run(module) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/system/nosh.py b/plugins/modules/nosh.py similarity index 50% rename from plugins/modules/system/nosh.py rename to plugins/modules/nosh.py index 1f9f2806c6..1befdad369 100644 --- a/plugins/modules/system/nosh.py +++ b/plugins/modules/nosh.py @@ -1,72 +1,76 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Thomas Caravia -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Thomas Caravia +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: nosh author: - - "Thomas Caravia (@tacatac)" -short_description: Manage services with nosh + - "Thomas Caravia (@tacatac)" +short_description: Manage services with nosh description: - - Control running and enabled state for system-wide or user services. - - BSD and Linux systems are supported. + - Control running and enabled state for system-wide or user services. + - BSD and Linux systems are supported. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - required: true - description: - - Name of the service to manage. - state: - type: str - required: false - choices: [ started, stopped, reset, restarted, reloaded ] - description: - - C(started)/C(stopped) are idempotent actions that will not run - commands unless necessary. - C(restarted) will always bounce the service. - C(reloaded) will send a SIGHUP or start the service. - C(reset) will start or stop the service according to whether it is - enabled or not. - enabled: - required: false - type: bool - description: - - Enable or disable the service, independently of C(*.preset) file - preference or running state. Mutually exclusive with I(preset). Will take - effect prior to I(state=reset). - preset: - required: false - type: bool - description: - - Enable or disable the service according to local preferences in C(*.preset) files. - Mutually exclusive with I(enabled). Only has an effect if set to true. Will take - effect prior to I(state=reset). - user: - required: false - default: 'no' - type: bool - description: - - Run system-control talking to the calling user's service manager, rather than - the system-wide service manager. + name: + type: str + required: true + description: + - Name of the service to manage. + state: + type: str + required: false + choices: [started, stopped, reset, restarted, reloaded] + description: + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. + - V(restarted) always bounces the service. + - V(reloaded) sends a SIGHUP or starts the service. + - V(reset) starts or stops the service according to whether it is enabled or not. + enabled: + required: false + type: bool + description: + - Enable or disable the service, independently of C(*.preset) file preference or running state. Mutually exclusive with + O(preset). It takes effect prior to O(state=reset). + preset: + required: false + type: bool + description: + - Enable or disable the service according to local preferences in C(*.preset) files. Mutually exclusive with O(enabled). + Only has an effect if set to true. It takes effect prior to O(state=reset). + user: + required: false + default: false + type: bool + description: + - Run system-control talking to the calling user's service manager, rather than the system-wide service manager. requirements: - - A system with an active nosh service manager, see Notes for further information. + - A system with an active nosh service manager, see Notes for further information. notes: - - Information on the nosh utilities suite may be found at U(https://jdebp.eu/Softwares/nosh/). -''' + - Information on the nosh utilities suite may be found at U(https://jdebp.eu/Softwares/nosh/). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Start dnscache if not running - community.general.nosh: name=dnscache state=started + community.general.nosh: + name: dnscache + state: started - name: Stop mpd, if running - community.general.nosh: name=mpd state=stopped + community.general.nosh: + name: mpd + state: stopped - name: Restart unbound or start it if not already running community.general.nosh: @@ -79,236 +83,247 @@ EXAMPLES = ''' state: reloaded - name: Disable nsd - community.general.nosh: name=nsd enabled=no + community.general.nosh: + name: nsd + enabled: false - name: For package installers, set nginx running state according to local enable settings, preset and reset - community.general.nosh: name=nginx preset=True state=reset + community.general.nosh: + name: nginx + preset: true + state: reset - name: Reboot the host if nosh is the system manager, would need a "wait_for*" task at least, not recommended as-is - community.general.nosh: name=reboot state=started + community.general.nosh: + name: reboot + state: started - name: Using conditionals with the module facts tasks: - name: Obtain information on tinydns service - community.general.nosh: name=tinydns + community.general.nosh: + name: tinydns register: result - name: Fail if service not loaded - ansible.builtin.fail: msg="The {{ result.name }} service is not loaded" + ansible.builtin.fail: + msg: "The {{ result.name }} service is not loaded" when: not result.status - name: Fail if service is running - ansible.builtin.fail: msg="The {{ result.name }} service is running" + ansible.builtin.fail: + msg: "The {{ result.name }} service is running" when: result.status and result.status['DaemontoolsEncoreState'] == "running" -''' +""" -RETURN = ''' +RETURN = r""" name: - description: name used to find the service - returned: success - type: str - sample: "sshd" + description: Name used to find the service. + returned: success + type: str + sample: "sshd" service_path: - description: resolved path for the service - returned: success - type: str - sample: "/var/sv/sshd" + description: Resolved path for the service. + returned: success + type: str + sample: "/var/sv/sshd" enabled: - description: whether the service is enabled at system bootstrap - returned: success - type: bool - sample: True + description: Whether the service is enabled at system bootstrap. + returned: success + type: bool + sample: true preset: - description: whether the enabled status reflects the one set in the relevant C(*.preset) file - returned: success - type: bool - sample: 'False' + description: Whether the enabled status reflects the one set in the relevant C(*.preset) file. + returned: success + type: bool + sample: 'False' state: - description: service process run state, C(None) if the service is not loaded and will not be started - returned: if state option is used - type: str - sample: "reloaded" + description: Service process run state, V(none) if the service is not loaded and will not be started. + returned: if state option is used + type: str + sample: "reloaded" status: - description: a dictionary with the key=value pairs returned by `system-control show-json` or C(None) if the service is not loaded - returned: success - type: complex - contains: - After: - description: [] # FIXME - returned: success - type: list - sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys", "log"] - Before: - description: [] # FIXME - returned: success - type: list - sample: ["/etc/service-bundles/targets/shutdown"] - Conflicts: - description: [] # FIXME - returned: success - type: list - sample: '[]' - DaemontoolsEncoreState: - description: [] # FIXME - returned: success - type: str - sample: "running" - DaemontoolsState: - description: [] # FIXME - returned: success - type: str - sample: "up" - Enabled: - description: [] # FIXME - returned: success - type: bool - sample: True - LogService: - description: [] # FIXME - returned: success - type: str - sample: "../cyclog@sshd" - MainPID: - description: [] # FIXME - returned: success - type: int - sample: 661 - Paused: - description: [] # FIXME - returned: success - type: bool - sample: 'False' - ReadyAfterRun: - description: [] # FIXME - returned: success - type: bool - sample: 'False' - RemainAfterExit: - description: [] # FIXME - returned: success - type: bool - sample: 'False' - Required-By: - description: [] # FIXME - returned: success - type: list - sample: '[]' - RestartExitStatusCode: - description: [] # FIXME - returned: success - type: int - sample: '0' - RestartExitStatusNumber: - description: [] # FIXME - returned: success - type: int - sample: '0' - RestartTimestamp: - description: [] # FIXME - returned: success - type: int - sample: 4611686019935648081 - RestartUTCTimestamp: - description: [] # FIXME - returned: success - type: int - sample: 1508260140 - RunExitStatusCode: - description: [] # FIXME - returned: success - type: int - sample: '0' - RunExitStatusNumber: - description: [] # FIXME - returned: success - type: int - sample: '0' - RunTimestamp: - description: [] # FIXME - returned: success - type: int - sample: 4611686019935648081 - RunUTCTimestamp: - description: [] # FIXME - returned: success - type: int - sample: 1508260140 - StartExitStatusCode: - description: [] # FIXME - returned: success - type: int - sample: 1 - StartExitStatusNumber: - description: [] # FIXME - returned: success - type: int - sample: '0' - StartTimestamp: - description: [] # FIXME - returned: success - type: int - sample: 4611686019935648081 - StartUTCTimestamp: - description: [] # FIXME - returned: success - type: int - sample: 1508260140 - StopExitStatusCode: - description: [] # FIXME - returned: success - type: int - sample: '0' - StopExitStatusNumber: - description: [] # FIXME - returned: success - type: int - sample: '0' - StopTimestamp: - description: [] # FIXME - returned: success - type: int - sample: 4611686019935648081 - StopUTCTimestamp: - description: [] # FIXME - returned: success - type: int - sample: 1508260140 - Stopped-By: - description: [] # FIXME - returned: success - type: list - sample: ["/etc/service-bundles/targets/shutdown"] - Timestamp: - description: [] # FIXME - returned: success - type: int - sample: 4611686019935648081 - UTCTimestamp: - description: [] # FIXME - returned: success - type: int - sample: 1508260140 - Want: - description: [] # FIXME - returned: success - type: str - sample: "nothing" - Wanted-By: - description: [] # FIXME - returned: success - type: list - sample: ["/etc/service-bundles/targets/server","/etc/service-bundles/targets/sockets"] - Wants: - description: [] # FIXME - returned: success - type: list - sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys"] + description: A dictionary with the key=value pairs returned by C(system-control show-json) or V(none) if the service is + not loaded. + returned: success + type: complex + contains: + After: + description: [] # FIXME + returned: success + type: list + sample: ["/etc/service-bundles/targets/basic", "../sshdgenkeys", "log"] + Before: + description: [] # FIXME + returned: success + type: list + sample: ["/etc/service-bundles/targets/shutdown"] + Conflicts: + description: [] # FIXME + returned: success + type: list + sample: [] + DaemontoolsEncoreState: + description: [] # FIXME + returned: success + type: str + sample: "running" + DaemontoolsState: + description: [] # FIXME + returned: success + type: str + sample: "up" + Enabled: + description: [] # FIXME + returned: success + type: bool + sample: true + LogService: + description: [] # FIXME + returned: success + type: str + sample: "../cyclog@sshd" + MainPID: + description: [] # FIXME + returned: success + type: int + sample: 661 + Paused: + description: [] # FIXME + returned: success + type: bool + sample: 'False' + ReadyAfterRun: + description: [] # FIXME + returned: success + type: bool + sample: 'False' + RemainAfterExit: + description: [] # FIXME + returned: success + type: bool + sample: 'False' + Required-By: + description: [] # FIXME + returned: success + type: list + sample: [] + RestartExitStatusCode: + description: [] # FIXME + returned: success + type: int + sample: '0' + RestartExitStatusNumber: + description: [] # FIXME + returned: success + type: int + sample: '0' + RestartTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 4611686019935648081 + RestartUTCTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 1508260140 + RunExitStatusCode: + description: [] # FIXME + returned: success + type: int + sample: '0' + RunExitStatusNumber: + description: [] # FIXME + returned: success + type: int + sample: '0' + RunTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 4611686019935648081 + RunUTCTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 1508260140 + StartExitStatusCode: + description: [] # FIXME + returned: success + type: int + sample: 1 + StartExitStatusNumber: + description: [] # FIXME + returned: success + type: int + sample: '0' + StartTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 4611686019935648081 + StartUTCTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 1508260140 + StopExitStatusCode: + description: [] # FIXME + returned: success + type: int + sample: '0' + StopExitStatusNumber: + description: [] # FIXME + returned: success + type: int + sample: '0' + StopTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 4611686019935648081 + StopUTCTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 1508260140 + Stopped-By: + description: [] # FIXME + returned: success + type: list + sample: ["/etc/service-bundles/targets/shutdown"] + Timestamp: + description: [] # FIXME + returned: success + type: int + sample: 4611686019935648081 + UTCTimestamp: + description: [] # FIXME + returned: success + type: int + sample: 1508260140 + Want: + description: [] # FIXME + returned: success + type: str + sample: "nothing" + Wanted-By: + description: [] # FIXME + returned: success + type: list + sample: ["/etc/service-bundles/targets/server", "/etc/service-bundles/targets/sockets"] + Wants: + description: [] # FIXME + returned: success + type: list + sample: ["/etc/service-bundles/targets/basic", "../sshdgenkeys"] user: - description: whether the user-level service manager is called - returned: success - type: bool - sample: False -''' + description: Whether the user-level service manager is called. + returned: success + type: bool + sample: false +""" import json diff --git a/plugins/modules/notification/bearychat.py b/plugins/modules/notification/bearychat.py deleted file mode 100644 index 4c907ea6b7..0000000000 --- a/plugins/modules/notification/bearychat.py +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Jiangge Zhang -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: bearychat -short_description: Send BearyChat notifications -description: - - The M(community.general.bearychat) module sends notifications to U(https://bearychat.com) - via the Incoming Robot integration. -author: "Jiangge Zhang (@tonyseek)" -options: - url: - type: str - description: - - BearyChat WebHook URL. This authenticates you to the bearychat - service. It looks like - C(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60). - required: true - text: - type: str - description: - - Message to send. - markdown: - description: - - If C(yes), text will be parsed as markdown. - default: 'yes' - type: bool - channel: - type: str - description: - - Channel to send the message to. If absent, the message goes to the - default channel selected by the I(url). - attachments: - type: list - elements: dict - description: - - Define a list of attachments. For more information, see - https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments -''' - -EXAMPLES = """ -- name: Send notification message via BearyChat - local_action: - module: bearychat - url: | - https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60 - text: "{{ inventory_hostname }} completed" - -- name: Send notification message via BearyChat all options - local_action: - module: bearychat - url: | - https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60 - text: "{{ inventory_hostname }} completed" - markdown: no - channel: "#ansible" - attachments: - - title: "Ansible on {{ inventory_hostname }}" - text: "May the Force be with you." - color: "#ffffff" - images: - - http://example.com/index.png -""" - -RETURN = """ -msg: - description: execution result - returned: success - type: str - sample: "OK" -""" - -try: - from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse - HAS_URLPARSE = True -except Exception: - HAS_URLPARSE = False -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def build_payload_for_bearychat(module, text, markdown, channel, attachments): - payload = {} - if text is not None: - payload['text'] = text - if markdown is not None: - payload['markdown'] = markdown - if channel is not None: - payload['channel'] = channel - if attachments is not None: - payload.setdefault('attachments', []).extend( - build_payload_for_bearychat_attachment( - module, item.get('title'), item.get('text'), item.get('color'), - item.get('images')) - for item in attachments) - payload = 'payload=%s' % module.jsonify(payload) - return payload - - -def build_payload_for_bearychat_attachment(module, title, text, color, images): - attachment = {} - if title is not None: - attachment['title'] = title - if text is not None: - attachment['text'] = text - if color is not None: - attachment['color'] = color - if images is not None: - target_images = attachment.setdefault('images', []) - if not isinstance(images, (list, tuple)): - images = [images] - for image in images: - if isinstance(image, dict) and 'url' in image: - image = {'url': image['url']} - elif hasattr(image, 'startswith') and image.startswith('http'): - image = {'url': image} - else: - module.fail_json( - msg="BearyChat doesn't have support for this kind of " - "attachment image") - target_images.append(image) - return attachment - - -def do_notify_bearychat(module, url, payload): - response, info = fetch_url(module, url, data=payload) - if info['status'] != 200: - url_info = urlparse(url) - obscured_incoming_webhook = urlunparse( - (url_info.scheme, url_info.netloc, '[obscured]', '', '', '')) - module.fail_json( - msg=" failed to send %s to %s: %s" % ( - payload, obscured_incoming_webhook, info['msg'])) - - -def main(): - module = AnsibleModule(argument_spec={ - 'url': dict(type='str', required=True, no_log=True), - 'text': dict(type='str'), - 'markdown': dict(default=True, type='bool'), - 'channel': dict(type='str'), - 'attachments': dict(type='list', elements='dict'), - }) - - if not HAS_URLPARSE: - module.fail_json(msg='urlparse is not installed') - - url = module.params['url'] - text = module.params['text'] - markdown = module.params['markdown'] - channel = module.params['channel'] - attachments = module.params['attachments'] - - payload = build_payload_for_bearychat( - module, text, markdown, channel, attachments) - do_notify_bearychat(module, url, payload) - - module.exit_json(msg="OK") - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/flowdock.py b/plugins/modules/notification/flowdock.py deleted file mode 100644 index a1842c5d16..0000000000 --- a/plugins/modules/notification/flowdock.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Matt Coddington -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: flowdock -author: "Matt Coddington (@mcodd)" -short_description: Send a message to a flowdock -description: - - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat) -options: - token: - type: str - description: - - API token. - required: true - type: - type: str - description: - - Whether to post to 'inbox' or 'chat' - required: true - choices: [ "inbox", "chat" ] - msg: - type: str - description: - - Content of the message - required: true - tags: - type: str - description: - - tags of the message, separated by commas - required: false - external_user_name: - type: str - description: - - (chat only - required) Name of the "user" sending the message - required: false - from_address: - type: str - description: - - (inbox only - required) Email address of the message sender - required: false - source: - type: str - description: - - (inbox only - required) Human readable identifier of the application that uses the Flowdock API - required: false - subject: - type: str - description: - - (inbox only - required) Subject line of the message - required: false - from_name: - type: str - description: - - (inbox only) Name of the message sender - required: false - reply_to: - type: str - description: - - (inbox only) Email address for replies - required: false - project: - type: str - description: - - (inbox only) Human readable identifier for more detailed message categorization - required: false - link: - type: str - description: - - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox. - required: false - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - type: bool - -requirements: [ ] -''' - -EXAMPLES = ''' -- name: Send a message to a flowdock - community.general.flowdock: - type: inbox - token: AAAAAA - from_address: user@example.com - source: my cool app - msg: test from ansible - subject: test subject - -- name: Send a message to a flowdock - community.general.flowdock: - type: chat - token: AAAAAA - external_user_name: testuser - msg: test from ansible - tags: tag1,tag2,tag3 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - msg=dict(required=True), - type=dict(required=True, choices=["inbox", "chat"]), - external_user_name=dict(required=False), - from_address=dict(required=False), - source=dict(required=False), - subject=dict(required=False), - from_name=dict(required=False), - reply_to=dict(required=False), - project=dict(required=False), - tags=dict(required=False), - link=dict(required=False), - validate_certs=dict(default=True, type='bool'), - ), - supports_check_mode=True - ) - - type = module.params["type"] - token = module.params["token"] - if type == 'inbox': - url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token) - else: - url = "https://api.flowdock.com/v1/messages/chat/%s" % (token) - - params = {} - - # required params - params['content'] = module.params["msg"] - - # required params for the 'chat' type - if module.params['external_user_name']: - if type == 'inbox': - module.fail_json(msg="external_user_name is not valid for the 'inbox' type") - else: - params['external_user_name'] = module.params["external_user_name"] - elif type == 'chat': - module.fail_json(msg="external_user_name is required for the 'chat' type") - - # required params for the 'inbox' type - for item in ['from_address', 'source', 'subject']: - if module.params[item]: - if type == 'chat': - module.fail_json(msg="%s is not valid for the 'chat' type" % item) - else: - params[item] = module.params[item] - elif type == 'inbox': - module.fail_json(msg="%s is required for the 'inbox' type" % item) - - # optional params - if module.params["tags"]: - params['tags'] = module.params["tags"] - - # optional params for the 'inbox' type - for item in ['from_name', 'reply_to', 'project', 'link']: - if module.params[item]: - if type == 'chat': - module.fail_json(msg="%s is not valid for the 'chat' type" % item) - else: - params[item] = module.params[item] - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=False) - - # Send the data to Flowdock - data = urlencode(params) - response, info = fetch_url(module, url, data=data) - if info['status'] != 200: - module.fail_json(msg="unable to send msg: %s" % info['msg']) - - module.exit_json(changed=True, msg=module.params["msg"]) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/notification/hipchat.py b/plugins/modules/notification/hipchat.py deleted file mode 100644 index 76c1227af4..0000000000 --- a/plugins/modules/notification/hipchat.py +++ /dev/null @@ -1,212 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: hipchat -short_description: Send a message to Hipchat. -description: - - Send a message to a Hipchat room, with options to control the formatting. -options: - token: - type: str - description: - - API token. - required: true - room: - type: str - description: - - ID or name of the room. - required: true - msg_from: - type: str - description: - - Name the message will appear to be sent from. Max length is 15 - characters - above this it will be truncated. - default: Ansible - aliases: [from] - msg: - type: str - description: - - The message body. - required: true - color: - type: str - description: - - Background color for the message. - default: yellow - choices: [ "yellow", "red", "green", "purple", "gray", "random" ] - msg_format: - type: str - description: - - Message format. - default: text - choices: [ "text", "html" ] - notify: - description: - - If true, a notification will be triggered for users in the room. - type: bool - default: 'yes' - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - api: - type: str - description: - - API url if using a self-hosted hipchat server. For Hipchat API version - 2 use the default URI with C(/v2) instead of C(/v1). - default: 'https://api.hipchat.com/v1' - -author: -- Shirou Wakayama (@shirou) -- Paul Bourdel (@pb8226) -''' - -EXAMPLES = ''' -- name: Send a message to a Hipchat room - community.general.hipchat: - room: notif - msg: Ansible task finished - -- name: Send a message to a Hipchat room using Hipchat API version 2 - community.general.hipchat: - api: https://api.hipchat.com/v2/ - token: OAUTH2_TOKEN - room: notify - msg: Ansible task finished -''' - -# =========================================== -# HipChat module specific support methods. -# - -import json -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.six.moves.urllib.request import pathname2url -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url - - -DEFAULT_URI = "https://api.hipchat.com/v1" - -MSG_URI_V1 = "/rooms/message" - -NOTIFY_URI_V2 = "/room/{id_or_name}/notification" - - -def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=MSG_URI_V1): - '''sending message to hipchat v1 server''' - - params = {} - params['room_id'] = room - params['from'] = msg_from[:15] # max length is 15 - params['message'] = msg - params['message_format'] = msg_format - params['color'] = color - params['api'] = api - params['notify'] = int(notify) - - url = api + MSG_URI_V1 + "?auth_token=%s" % (token) - data = urlencode(params) - - if module.check_mode: - # In check mode, exit before actually sending the message - module.exit_json(changed=False) - - response, info = fetch_url(module, url, data=data) - if info['status'] == 200: - return response.read() - else: - module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) - - -def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=NOTIFY_URI_V2): - '''sending message to hipchat v2 server''' - - headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'} - - body = dict() - body['message'] = msg - body['color'] = color - body['message_format'] = msg_format - body['notify'] = notify - - POST_URL = api + NOTIFY_URI_V2 - - url = POST_URL.replace('{id_or_name}', pathname2url(room)) - data = json.dumps(body) - - if module.check_mode: - # In check mode, exit before actually sending the message - module.exit_json(changed=False) - - response, info = fetch_url(module, url, data=data, headers=headers, method='POST') - - # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows - # 204 to be the expected result code. - if info['status'] in [200, 204]: - return response.read() - else: - module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - room=dict(required=True), - msg=dict(required=True), - msg_from=dict(default="Ansible", aliases=['from']), - color=dict(default="yellow", choices=["yellow", "red", "green", - "purple", "gray", "random"]), - msg_format=dict(default="text", choices=["text", "html"]), - notify=dict(default=True, type='bool'), - validate_certs=dict(default=True, type='bool'), - api=dict(default=DEFAULT_URI), - ), - supports_check_mode=True - ) - - token = module.params["token"] - room = str(module.params["room"]) - msg = module.params["msg"] - msg_from = module.params["msg_from"] - color = module.params["color"] - msg_format = module.params["msg_format"] - notify = module.params["notify"] - api = module.params["api"] - - try: - if api.find('/v2') != -1: - send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) - else: - send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) - except Exception as e: - module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc()) - - changed = True - module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/language/npm.py b/plugins/modules/npm.py similarity index 70% rename from plugins/modules/packaging/language/npm.py rename to plugins/modules/npm.py index 1c97035e67..7779f326aa 100644 --- a/plugins/modules/packaging/language/npm.py +++ b/plugins/modules/npm.py @@ -1,19 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017 Chris Hoffman -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: npm short_description: Manage node.js packages with npm description: - Manage node.js packages with Node Package Manager (npm). author: "Chris Hoffman (@chrishoffman)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: @@ -34,7 +39,7 @@ options: description: - Install the node.js library globally. required: false - default: no + default: false type: bool executable: description: @@ -47,23 +52,23 @@ options: - Use the C(--ignore-scripts) flag when installing. required: false type: bool - default: no + default: false unsafe_perm: description: - Use the C(--unsafe-perm) flag when installing. type: bool - default: no + default: false ci: description: - Install packages based on package-lock file, same as running C(npm ci). type: bool - default: no + default: false production: description: - Install dependencies in production mode, excluding devDependencies. required: false type: bool - default: no + default: false registry: description: - The registry to install modules from. @@ -75,24 +80,30 @@ options: required: false type: str default: present - choices: [ "present", "absent", "latest" ] + choices: ["present", "absent", "latest"] no_optional: description: - Use the C(--no-optional) flag when installing. type: bool - default: no + default: false version_added: 2.0.0 no_bin_links: description: - Use the C(--no-bin-links) flag when installing. type: bool - default: no + default: false version_added: 2.5.0 + force: + description: + - Use the C(--force) flag when installing. + type: bool + default: false + version_added: 9.5.0 requirements: - - npm installed in bin path (recommended /usr/local/bin) -''' + - npm installed in bin path (recommended /usr/local/bin) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Install "coffee-script" node.js package. community.general.npm: name: coffee-script @@ -107,12 +118,17 @@ EXAMPLES = r''' - name: Install "coffee-script" node.js package globally. community.general.npm: name: coffee-script - global: yes + global: true + +- name: Force Install "coffee-script" node.js package. + community.general.npm: + name: coffee-script + force: true - name: Remove the globally package "coffee-script". community.general.npm: name: coffee-script - global: yes + global: true state: absent - name: Install "coffee-script" node.js package from custom registry. @@ -134,7 +150,7 @@ EXAMPLES = r''' path: /app/location executable: /opt/nvm/v0.10.1/bin/npm state: present -''' +""" import json import os @@ -142,6 +158,7 @@ import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt class Npm(object): @@ -158,39 +175,37 @@ class Npm(object): self.state = kwargs['state'] self.no_optional = kwargs['no_optional'] self.no_bin_links = kwargs['no_bin_links'] + self.force = kwargs['force'] if kwargs['executable']: self.executable = kwargs['executable'].split(' ') else: self.executable = [module.get_bin_path('npm', True)] - if kwargs['version'] and self.state != 'absent': - self.name_version = self.name + '@' + str(self.version) + if kwargs['version'] and kwargs['state'] != 'absent': + self.name_version = self.name + '@' + str(kwargs['version']) else: self.name_version = self.name + self.runner = CmdRunner( + module, + command=self.executable, + arg_formats=dict( + exec_args=cmd_runner_fmt.as_list(), + global_=cmd_runner_fmt.as_bool('--global'), + production=cmd_runner_fmt.as_bool('--production'), + ignore_scripts=cmd_runner_fmt.as_bool('--ignore-scripts'), + unsafe_perm=cmd_runner_fmt.as_bool('--unsafe-perm'), + name_version=cmd_runner_fmt.as_list(), + registry=cmd_runner_fmt.as_opt_val('--registry'), + no_optional=cmd_runner_fmt.as_bool('--no-optional'), + no_bin_links=cmd_runner_fmt.as_bool('--no-bin-links'), + force=cmd_runner_fmt.as_bool('--force'), + ) + ) + def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = self.executable + args - - if self.glbl: - cmd.append('--global') - if self.production and ('install' in cmd or 'update' in cmd or 'ci' in cmd): - cmd.append('--production') - if self.ignore_scripts: - cmd.append('--ignore-scripts') - if self.unsafe_perm: - cmd.append('--unsafe-perm') - if self.name_version and add_package_name: - cmd.append(self.name_version) - if self.registry: - cmd.append('--registry') - cmd.append(self.registry) - if self.no_optional: - cmd.append('--no-optional') - if self.no_bin_links: - cmd.append('--no-bin-links') - # If path is specified, cd into that path and run the command. cwd = None if self.path: @@ -200,8 +215,19 @@ class Npm(object): self.module.fail_json(msg="path %s is not a directory" % self.path) cwd = self.path - rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) + params = dict(self.module.params) + params['exec_args'] = args + params['global_'] = self.glbl + params['production'] = self.production and ('install' in args or 'update' in args or 'ci' in args) + params['name_version'] = self.name_version if add_package_name else None + + with self.runner( + "exec_args global_ production ignore_scripts unsafe_perm name_version registry no_optional no_bin_links force", + check_rc=check_rc, cwd=cwd + ) as ctx: + rc, out, err = ctx.run(**params) return out + return '' def list(self): @@ -261,50 +287,53 @@ class Npm(object): def main(): arg_spec = dict( - name=dict(default=None, type='str'), - path=dict(default=None, type='path'), - version=dict(default=None, type='str'), + name=dict(type='str'), + path=dict(type='path'), + version=dict(type='str'), production=dict(default=False, type='bool'), - executable=dict(default=None, type='path'), - registry=dict(default=None, type='str'), + executable=dict(type='path'), + registry=dict(type='str'), state=dict(default='present', choices=['present', 'absent', 'latest']), ignore_scripts=dict(default=False, type='bool'), unsafe_perm=dict(default=False, type='bool'), ci=dict(default=False, type='bool'), no_optional=dict(default=False, type='bool'), no_bin_links=dict(default=False, type='bool'), + force=dict(default=False, type='bool'), ) arg_spec['global'] = dict(default=False, type='bool') module = AnsibleModule( argument_spec=arg_spec, - supports_check_mode=True + required_if=[('state', 'absent', ['name'])], + supports_check_mode=True, ) name = module.params['name'] path = module.params['path'] version = module.params['version'] glbl = module.params['global'] - production = module.params['production'] - executable = module.params['executable'] - registry = module.params['registry'] state = module.params['state'] - ignore_scripts = module.params['ignore_scripts'] - unsafe_perm = module.params['unsafe_perm'] - ci = module.params['ci'] - no_optional = module.params['no_optional'] - no_bin_links = module.params['no_bin_links'] if not path and not glbl: module.fail_json(msg='path must be specified when not using global') - if state == 'absent' and not name: - module.fail_json(msg='uninstalling a package is only available for named packages') - npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, - executable=executable, registry=registry, ignore_scripts=ignore_scripts, - unsafe_perm=unsafe_perm, state=state, no_optional=no_optional, no_bin_links=no_bin_links) + npm = Npm(module, + name=name, + path=path, + version=version, + glbl=glbl, + production=module.params['production'], + executable=module.params['executable'], + registry=module.params['registry'], + ignore_scripts=module.params['ignore_scripts'], + unsafe_perm=module.params['unsafe_perm'], + state=state, + no_optional=module.params['no_optional'], + no_bin_links=module.params['no_bin_links'], + force=module.params['force']) changed = False - if ci: + if module.params['ci']: npm.ci_install() changed = True elif state == 'present': diff --git a/plugins/modules/net_tools/nsupdate.py b/plugins/modules/nsupdate.py similarity index 68% rename from plugins/modules/net_tools/nsupdate.py rename to plugins/modules/nsupdate.py index fc0d5e1c46..7d56924112 100644 --- a/plugins/modules/net_tools/nsupdate.py +++ b/plugins/modules/nsupdate.py @@ -1,95 +1,98 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2016, Marcin Skarbek -# (c) 2016, Andreas Olsson -# (c) 2017, Loic Blot +# Copyright (c) 2016, Marcin Skarbek +# Copyright (c) 2016, Andreas Olsson +# Copyright (c) 2017, Loic Blot # # This module was ported from https://github.com/mskarbek/ansible-nsupdate # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: nsupdate -short_description: Manage DNS records. +short_description: Manage DNS records description: - - Create, update and remove DNS records using DDNS updates + - Create, update and remove DNS records using DDNS updates. requirements: - dnspython author: "Loic Blot (@nerzhul)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Manage DNS record. - choices: ['present', 'absent'] - default: 'present' - type: str - server: - description: - - Apply DNS modification on this server, specified by IPv4 or IPv6 address. - required: true - type: str - port: - description: - - Use this TCP port when connecting to C(server). - default: 53 - type: int - key_name: - description: - - Use TSIG key name to authenticate against DNS C(server) - type: str - key_secret: - description: - - Use TSIG key secret, associated with C(key_name), to authenticate against C(server) - type: str - key_algorithm: - description: - - Specify key algorithm used by C(key_secret). - choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384', - 'hmac-sha512'] - default: 'hmac-md5' - type: str - zone: - description: - - DNS record will be modified on this C(zone). - - When omitted DNS will be queried to attempt finding the correct zone. - - Starting with Ansible 2.7 this parameter is optional. - type: str - record: - description: - - Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot). - required: true - type: str - type: - description: - - Sets the record type. - default: 'A' - type: str - ttl: - description: - - Sets the record TTL. - default: 3600 - type: int - value: - description: - - Sets the record value. - type: list - elements: str - protocol: - description: - - Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option. - default: 'tcp' - choices: ['tcp', 'udp'] - type: str -''' + state: + description: + - Manage DNS record. + choices: ['present', 'absent'] + default: 'present' + type: str + server: + description: + - Apply DNS modification on this server, specified by IPv4 or IPv6 address. + required: true + type: str + port: + description: + - Use this TCP port when connecting to O(server). + default: 53 + type: int + key_name: + description: + - Use TSIG key name to authenticate against DNS O(server). + type: str + key_secret: + description: + - Use TSIG key secret, associated with O(key_name), to authenticate against O(server). + type: str + key_algorithm: + description: + - Specify key algorithm used by O(key_secret). + choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384', 'hmac-sha512'] + default: 'hmac-md5' + type: str + zone: + description: + - DNS record is modified on this O(zone). + - When omitted, DNS is queried to attempt finding the correct zone. + type: str + record: + description: + - Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot). + required: true + type: str + type: + description: + - Sets the record type. + default: 'A' + type: str + ttl: + description: + - Sets the record TTL. + default: 3600 + type: int + value: + description: + - Sets the record value. + type: list + elements: str + protocol: + description: + - Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option. + default: 'tcp' + choices: ['tcp', 'udp'] + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add or modify ansible.example.org A to 192.168.1.1" community.general.nsupdate: key_name: "nsupdate" @@ -136,49 +139,45 @@ EXAMPLES = ''' record: "1.1.168.192.in-addr.arpa." type: "PTR" state: absent -''' +""" -RETURN = ''' -changed: - description: If module has modified record - returned: success - type: str +RETURN = r""" record: - description: DNS record - returned: success - type: str - sample: 'ansible' + description: DNS record. + returned: success + type: str + sample: 'ansible' ttl: - description: DNS record TTL - returned: success - type: int - sample: 86400 + description: DNS record TTL. + returned: success + type: int + sample: 86400 type: - description: DNS record type - returned: success - type: str - sample: 'CNAME' + description: DNS record type. + returned: success + type: str + sample: 'CNAME' value: - description: DNS record value(s) - returned: success - type: list - sample: '192.168.1.1' + description: DNS record value(s). + returned: success + type: list + sample: '192.168.1.1' zone: - description: DNS record zone - returned: success - type: str - sample: 'example.org.' + description: DNS record zone. + returned: success + type: str + sample: 'example.org.' dns_rc: - description: dnspython return code - returned: always - type: int - sample: 4 + description: C(dnspython) return code. + returned: always + type: int + sample: 4 dns_rc_str: - description: dnspython return code (string representation) - returned: always - type: str - sample: 'REFUSED' -''' + description: C(dnspython) return code (string representation). + returned: always + type: str + sample: 'REFUSED' +""" import traceback @@ -268,12 +267,16 @@ class RecordManager(object): if lookup.rcode() in [dns.rcode.SERVFAIL, dns.rcode.REFUSED]: self.module.fail_json(msg='Zone lookup failure: \'%s\' will not respond to queries regarding \'%s\'.' % ( self.module.params['server'], self.module.params['record'])) - try: - zone = lookup.authority[0].name - if zone == name: - return zone.to_text() - except IndexError: - pass + # If the response contains an Answer SOA RR whose name matches the queried name, + # this is the name of the zone in which the record needs to be inserted. + for rr in lookup.answer: + if rr.rdtype == dns.rdatatype.SOA and rr.name == name: + return rr.name.to_text() + # If the response contains an Authority SOA RR whose name is a subdomain of the queried name, + # this SOA name is the zone in which the record needs to be inserted. + for rr in lookup.authority: + if rr.rdtype == dns.rdatatype.SOA and name.fullcompare(rr.name)[0] == dns.name.NAMERELN_SUBDOMAIN: + return rr.name.to_text() try: name = name.parent() except dns.name.NoParent: @@ -338,7 +341,32 @@ class RecordManager(object): def modify_record(self): update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm) - update.delete(self.module.params['record'], self.module.params['type']) + + if self.module.params['type'].upper() == 'NS': + # When modifying a NS record, Bind9 silently refuses to delete all the NS entries for a zone: + # > 09-May-2022 18:00:50.352 client @0x7fe7dd1f9568 192.168.1.3#45458/key rndc_ddns_ansible: + # > updating zone 'lab/IN': attempt to delete all SOA or NS records ignored + # https://gitlab.isc.org/isc-projects/bind9/-/blob/v9_18/lib/ns/update.c#L3304 + # Let's perform dns inserts and updates first, deletes after. + query = dns.message.make_query(self.module.params['record'], self.module.params['type']) + if self.keyring: + query.use_tsig(keyring=self.keyring, algorithm=self.algorithm) + + try: + if self.module.params['protocol'] == 'tcp': + lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + else: + lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port']) + except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e: + self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e))) + except (socket_error, dns.exception.Timeout) as e: + self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e))) + + lookup_result = lookup.answer[0] if lookup.answer else lookup.authority[0] + entries_to_remove = [n.to_text() for n in lookup_result.items if n.to_text() not in self.value] + else: + update.delete(self.module.params['record'], self.module.params['type']) + for entry in self.value: try: update.add(self.module.params['record'], @@ -349,6 +377,11 @@ class RecordManager(object): self.module.fail_json(msg='value needed when state=present') except dns.exception.SyntaxError: self.module.fail_json(msg='Invalid/malformed value') + + if self.module.params['type'].upper() == 'NS': + for entry in entries_to_remove: + update.delete(self.module.params['record'], self.module.params['type'], entry) + response = self.__do_update(update) return dns.message.Message.rcode(response) @@ -426,7 +459,8 @@ class RecordManager(object): if lookup.rcode() != dns.rcode.NOERROR: self.module.fail_json(msg='Failed to lookup TTL of existing matching record.') - current_ttl = lookup.answer[0].ttl + current_ttl = lookup.answer[0].ttl if lookup.answer else lookup.authority[0].ttl + return current_ttl != self.module.params['ttl'] @@ -436,18 +470,18 @@ def main(): module = AnsibleModule( argument_spec=dict( - state=dict(required=False, default='present', choices=['present', 'absent'], type='str'), + state=dict(default='present', choices=['present', 'absent'], type='str'), server=dict(required=True, type='str'), - port=dict(required=False, default=53, type='int'), - key_name=dict(required=False, type='str'), - key_secret=dict(required=False, type='str', no_log=True), - key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'), - zone=dict(required=False, default=None, type='str'), + port=dict(default=53, type='int'), + key_name=dict(type='str'), + key_secret=dict(type='str', no_log=True), + key_algorithm=dict(default='hmac-md5', choices=tsig_algs, type='str'), + zone=dict(type='str'), record=dict(required=True, type='str'), - type=dict(required=False, default='A', type='str'), - ttl=dict(required=False, default=3600, type='int'), - value=dict(required=False, default=None, type='list', elements='str'), - protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str') + type=dict(default='A', type='str'), + ttl=dict(default=3600, type='int'), + value=dict(type='list', elements='str'), + protocol=dict(default='tcp', choices=['tcp', 'udp'], type='str') ), supports_check_mode=True ) diff --git a/plugins/modules/ocapi_command.py b/plugins/modules/ocapi_command.py new file mode 100644 index 0000000000..91fb7ab5e6 --- /dev/null +++ b/plugins/modules/ocapi_command.py @@ -0,0 +1,270 @@ +#!/usr/bin/python + +# Copyright (c) 2022 Western Digital Corporation +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ocapi_command +version_added: 6.3.0 +short_description: Manages Out-Of-Band controllers using Open Composable API (OCAPI) +description: + - Builds OCAPI URIs locally and sends them to remote OOB controllers to perform an action. + - Manages OOB controller such as Indicator LED, Reboot, Power Mode, Firmware Update. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - Command to execute on OOB controller. + type: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + proxy_slot_number: + description: For proxied inband requests, the slot number of the IOM. Only applies if O(baseuri) is a proxy server. + type: int + update_image_path: + required: false + description: + - For O(command=FWUpload), the path on the local filesystem of the firmware update image. + type: str + job_name: + required: false + description: + - For O(command=DeleteJob) command, the name of the job to delete. + type: str + username: + required: true + description: + - Username for authenticating to OOB controller. + type: str + password: + required: true + description: + - Password for authenticating to OOB controller. + type: str + timeout: + description: + - Timeout in seconds for URL requests to OOB controller. + default: 10 + type: int + +author: "Mike Moerk (@mikemoerk)" +""" + +EXAMPLES = r""" +- name: Set the power state to low + community.general.ocapi_command: + category: Chassis + command: PowerModeLow + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set the power state to normal + community.general.ocapi_command: + category: Chassis + command: PowerModeNormal + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +- name: Set chassis indicator LED to on + community.general.ocapi_command: + category: Chassis + command: IndicatorLedOn + baseuri: "{{ baseuri }}" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" +- name: Set chassis indicator LED to off + community.general.ocapi_command: + category: Chassis + command: IndicatorLedOff + baseuri: "{{ baseuri }}" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" +- name: Reset Enclosure + community.general.ocapi_command: + category: Systems + command: PowerGracefulRestart + baseuri: "{{ baseuri }}" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" +- name: Firmware Upload + community.general.ocapi_command: + category: Update + command: FWUpload + baseuri: "iom1.wdc.com" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" + update_image_path: "/path/to/firmware.tar.gz" +- name: Firmware Update + community.general.ocapi_command: + category: Update + command: FWUpdate + baseuri: "iom1.wdc.com" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" +- name: Firmware Activate + community.general.ocapi_command: + category: Update + command: FWActivate + baseuri: "iom1.wdc.com" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" +- name: Delete Job + community.general.ocapi_command: + category: Jobs + command: DeleteJob + job_name: FirmwareUpdate + baseuri: "{{ baseuri }}" + proxy_slot_number: 2 + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = r""" +msg: + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" + +jobUri: + description: URI to use to monitor status of the operation. Returned for async commands such as Firmware Update, Firmware + Activate. + returned: when supported + type: str + sample: "https://ioma.wdc.com/Storage/Devices/openflex-data24-usalp03020qb0003/Jobs/FirmwareUpdate/" + +operationStatusId: + description: OCAPI State ID (see OCAPI documentation for possible values). + returned: when supported + type: int + sample: 2 +""" + +from urllib.parse import urljoin +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ocapi_utils import OcapiUtils +from ansible.module_utils.common.text.converters import to_native + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "PowerModeLow", "PowerModeNormal"], + "Systems": ["PowerGracefulRestart"], + "Update": ["FWUpload", "FWUpdate", "FWActivate"], + "Jobs": ["DeleteJob"] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='str'), + job_name=dict(type='str'), + baseuri=dict(required=True, type='str'), + proxy_slot_number=dict(type='int'), + update_image_path=dict(type='str'), + username=dict(required=True), + password=dict(required=True, no_log=True), + timeout=dict(type='int', default=10) + ), + supports_check_mode=True + ) + + category = module.params['category'] + command = module.params['command'] + + # admin credentials used for authentication + creds = { + 'user': module.params['username'], + 'pswd': module.params['password'] + } + + # timeout + timeout = module.params['timeout'] + + base_uri = "https://" + module.params["baseuri"] + proxy_slot_number = module.params.get("proxy_slot_number") + ocapi_utils = OcapiUtils(creds, base_uri, proxy_slot_number, timeout, module) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that the command is valid + if command not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (command, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Chassis": + if command.startswith("IndicatorLed"): + result = ocapi_utils.manage_chassis_indicator_led(command) + elif command.startswith("PowerMode"): + result = ocapi_utils.manage_system_power(command) + elif category == "Systems": + if command.startswith("Power"): + result = ocapi_utils.manage_system_power(command) + elif category == "Update": + if command == "FWUpload": + update_image_path = module.params.get("update_image_path") + if update_image_path is None: + module.fail_json(msg=to_native("Missing update_image_path.")) + result = ocapi_utils.upload_firmware_image(update_image_path) + elif command == "FWUpdate": + result = ocapi_utils.update_firmware_image() + elif command == "FWActivate": + result = ocapi_utils.activate_firmware_image() + elif category == "Jobs": + if command == "DeleteJob": + job_name = module.params.get("job_name") + if job_name is None: + module.fail_json("Missing job_name") + job_uri = urljoin(base_uri, "Jobs/" + job_name) + result = ocapi_utils.delete_job(job_uri) + + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + else: + del result['ret'] + changed = result.get('changed', True) + session = result.get('session', dict()) + kwargs = { + "changed": changed, + "session": session, + "msg": "Action was successful." if not module.check_mode else result.get( + "msg", "No action performed in check mode." + ) + } + result_keys = [result_key for result_key in result if result_key not in kwargs] + for result_key in result_keys: + kwargs[result_key] = result[result_key] + module.exit_json(**kwargs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ocapi_info.py b/plugins/modules/ocapi_info.py new file mode 100644 index 0000000000..3eb0422054 --- /dev/null +++ b/plugins/modules/ocapi_info.py @@ -0,0 +1,221 @@ +#!/usr/bin/python + +# Copyright (c) 2022 Western Digital Corporation +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: ocapi_info +version_added: 6.3.0 +short_description: Manages Out-Of-Band controllers using Open Composable API (OCAPI) +description: + - Builds OCAPI URIs locally and sends them to remote OOB controllers to get information back. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - Command to execute on OOB controller. + type: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + proxy_slot_number: + description: For proxied inband requests, the slot number of the IOM. Only applies if O(baseuri) is a proxy server. + type: int + username: + required: true + description: + - Username for authenticating to OOB controller. + type: str + password: + required: true + description: + - Password for authenticating to OOB controller. + type: str + timeout: + description: + - Timeout in seconds for URL requests to OOB controller. + default: 10 + type: int + job_name: + description: + - Name of job for fetching status. + type: str + + +author: "Mike Moerk (@mikemoerk)" +""" + +EXAMPLES = r""" +- name: Get job status + community.general.ocapi_info: + category: Status + command: JobStatus + baseuri: "http://iom1.wdc.com" + jobName: FirmwareUpdate + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = r""" +msg: + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" + +percentComplete: + description: Percent complete of the relevant operation. Applies to O(command=JobStatus). + returned: when supported + type: int + sample: 99 + +operationStatus: + description: Status of the relevant operation. Applies to O(command=JobStatus). See OCAPI documentation for details. + returned: when supported + type: str + sample: "Activate needed" + +operationStatusId: + description: Integer value of status (corresponds to operationStatus). Applies to O(command=JobStatus). See OCAPI documentation + for details. + returned: when supported + type: int + sample: 65540 + +operationHealth: + description: Health of the operation. Applies to O(command=JobStatus). See OCAPI documentation for details. + returned: when supported + type: str + sample: "OK" + +operationHealthId: + description: >- + Integer value for health of the operation (corresponds to RV(operationHealth)). Applies to O(command=JobStatus). See OCAPI + documentation for details. + returned: when supported + type: str + sample: "OK" + +details: + description: Details of the relevant operation. Applies to O(command=JobStatus). + returned: when supported + type: list + elements: str + +status: + description: Dictionary containing status information. See OCAPI documentation for details. + returned: when supported + type: dict + sample: + { + "Details": [ + "None" + ], + "Health": [ + { + "ID": 5, + "Name": "OK" + } + ], + "State": { + "ID": 16, + "Name": "In service" + } + } +""" + +from urllib.parse import urljoin + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ocapi_utils import OcapiUtils +from ansible.module_utils.common.text.converters import to_native + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Jobs": ["JobStatus"] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='str'), + job_name=dict(type='str'), + baseuri=dict(required=True, type='str'), + proxy_slot_number=dict(type='int'), + username=dict(required=True), + password=dict(required=True, no_log=True), + timeout=dict(type='int', default=10) + ), + supports_check_mode=True + ) + + category = module.params['category'] + command = module.params['command'] + + # admin credentials used for authentication + creds = { + 'user': module.params['username'], + 'pswd': module.params['password'] + } + + # timeout + timeout = module.params['timeout'] + + base_uri = "https://" + module.params["baseuri"] + proxy_slot_number = module.params.get("proxy_slot_number") + ocapi_utils = OcapiUtils(creds, base_uri, proxy_slot_number, timeout, module) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that the command is valid + if command not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (command, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Jobs": + if command == "JobStatus": + if module.params.get("job_name") is None: + module.fail_json(msg=to_native( + "job_name required for JobStatus command.")) + job_uri = urljoin(base_uri, 'Jobs/' + module.params["job_name"]) + result = ocapi_utils.get_job_status(job_uri) + + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + else: + del result['ret'] + changed = False + session = result.get('session', dict()) + kwargs = { + "changed": changed, + "session": session, + "msg": "Action was successful." if not module.check_mode else result.get( + "msg", "No action performed in check mode." + ) + } + result_keys = [result_key for result_key in result if result_key not in kwargs] + for result_key in result_keys: + kwargs[result_key] = result[result_key] + module.exit_json(**kwargs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/oracle/oci_vcn.py b/plugins/modules/oci_vcn.py similarity index 55% rename from plugins/modules/cloud/oracle/oci_vcn.py rename to plugins/modules/oci_vcn.py index a82914bdea..ef7d7c4994 100644 --- a/plugins/modules/cloud/oracle/oci_vcn.py +++ b/plugins/modules/oci_vcn.py @@ -1,60 +1,65 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, 2018, Oracle and/or its affiliates. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oci_vcn short_description: Manage Virtual Cloud Networks(VCN) in OCI +deprecated: + removed_in: 13.0.0 + why: Superseded by official Oracle collection. + alternative: Use module C(oci_network_vcn) from the C(oracle.oci) collection. description: - - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI. - The complete Oracle Cloud Infrastructure Ansible Modules can be downloaded from - U(https://github.com/oracle/oci-ansible-modules/releases). + - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI. The complete Oracle Cloud + Infrastructure Ansible Modules can be downloaded from U(https://github.com/oracle/oci-ansible-modules/releases). +attributes: + check_mode: + support: none + diff_mode: + support: none options: - cidr_block: - description: The CIDR IP address block of the VCN. Required when creating a VCN with I(state=present). - type: str - required: false - compartment_id: - description: The OCID of the compartment to contain the VCN. Required when creating a VCN with I(state=present). - This option is mutually exclusive with I(vcn_id). - type: str - display_name: - description: A user-friendly name. Does not have to be unique, and it's changeable. - type: str - aliases: [ 'name' ] - dns_label: - description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to - form a fully qualified domain name (FQDN) for each VNIC within this subnet (for example, - bminstance-1.subnet123.vcn1.oraclevcn.com). Not required to be unique, but it's a best practice - to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric string that begins - with a letter. The value cannot be changed. - type: str - state: - description: Create or update a VCN with I(state=present). Use I(state=absent) to delete a VCN. - type: str - default: present - choices: ['present', 'absent'] - vcn_id: - description: The OCID of the VCN. Required when deleting a VCN with I(state=absent) or updating a VCN - with I(state=present). This option is mutually exclusive with I(compartment_id). - type: str - aliases: [ 'id' ] + cidr_block: + description: The CIDR IP address block of the VCN. Required when creating a VCN with O(state=present). + type: str + required: false + compartment_id: + description: The OCID of the compartment to contain the VCN. Required when creating a VCN with O(state=present). This + option is mutually exclusive with O(vcn_id). + type: str + display_name: + description: A user-friendly name. Does not have to be unique, and it is changeable. + type: str + aliases: ['name'] + dns_label: + description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to form a fully + qualified domain name (FQDN) for each VNIC within this subnet (for example, V(bminstance-1.subnet123.vcn1.oraclevcn.com)). + Not required to be unique, but it is a best practice to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric + string that begins with a letter. The value cannot be changed. + type: str + state: + description: Create or update a VCN with O(state=present). Use O(state=absent) to delete a VCN. + type: str + default: present + choices: ['present', 'absent'] + vcn_id: + description: The OCID of the VCN. Required when deleting a VCN with O(state=absent) or updating a VCN with O(state=present). + This option is mutually exclusive with O(compartment_id). + type: str + aliases: ['id'] author: "Rohit Chaware (@rohitChaware)" extends_documentation_fragment: -- community.general.oracle -- community.general.oracle_creatable_resource -- community.general.oracle_wait_options -- community.general.oracle_tags + - community.general.oracle + - community.general.oracle_creatable_resource + - community.general.oracle_wait_options + - community.general.oracle_tags + - community.general.attributes +""" -''' - -EXAMPLES = """ +EXAMPLES = r""" - name: Create a VCN community.general.oci_vcn: cidr_block: '10.0.0.0/16' @@ -73,24 +78,25 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" vcn: - description: Information about the VCN - returned: On successful create and update operation - type: dict - sample: { - "cidr_block": "10.0.0.0/16", - compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx", - "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx", - "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx", - "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx", - "display_name": "ansible_vcn", - "dns_label": "ansiblevcn", - "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx", - "lifecycle_state": "AVAILABLE", - "time_created": "2017-11-13T20:22:40.626000+00:00", - "vcn_domain_name": "ansiblevcn.oraclevcn.com" - } + description: Information about the VCN. + returned: On successful create and update operation + type: dict + sample: + { + "cidr_block": "10.0.0.0/16", + "compartment_id\"": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx", + "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx", + "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx", + "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx", + "display_name": "ansible_vcn", + "dns_label": "ansiblevcn", + "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx", + "lifecycle_state": "AVAILABLE", + "time_created": "2017-11-13T20:22:40.626000+00:00", + "vcn_domain_name": "ansiblevcn.oraclevcn.com" + } """ from ansible.module_utils.basic import AnsibleModule, missing_required_lib @@ -158,17 +164,12 @@ def main(): ) module_args.update( dict( - cidr_block=dict(type="str", required=False), - compartment_id=dict(type="str", required=False), - display_name=dict(type="str", required=False, aliases=["name"]), - dns_label=dict(type="str", required=False), - state=dict( - type="str", - required=False, - default="present", - choices=["absent", "present"], - ), - vcn_id=dict(type="str", required=False, aliases=["id"]), + cidr_block=dict(type="str"), + compartment_id=dict(type="str"), + display_name=dict(type="str", aliases=["name"]), + dns_label=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "present"]), + vcn_id=dict(type="str", aliases=["id"]), ) ) diff --git a/plugins/modules/database/misc/odbc.py b/plugins/modules/odbc.py similarity index 62% rename from plugins/modules/database/misc/odbc.py rename to plugins/modules/odbc.py index 5d1cdf884b..5fc2e8b18d 100644 --- a/plugins/modules/database/misc/odbc.py +++ b/plugins/modules/odbc.py @@ -1,55 +1,59 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, John Westcott -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, John Westcott +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: odbc author: "John Westcott IV (@john-westcott-iv)" version_added: "1.0.0" -short_description: Execute SQL via ODBC +short_description: Execute SQL using ODBC description: - - Read/Write info via ODBC drivers. + - Read/Write info using ODBC drivers. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - dsn: - description: - - The connection string passed into ODBC. - required: yes - type: str - query: - description: - - The SQL query to perform. - required: yes - type: str - params: - description: - - Parameters to pass to the SQL query. - type: list - elements: str - commit: - description: - - Perform a commit after the execution of the SQL query. - - Some databases allow a commit after a select whereas others raise an exception. - - Default is C(true) to support legacy module behavior. - type: bool - default: yes - version_added: 1.3.0 + dsn: + description: + - The connection string passed into ODBC. + required: true + type: str + query: + description: + - The SQL query to perform. + required: true + type: str + params: + description: + - Parameters to pass to the SQL query. + type: list + elements: str + commit: + description: + - Perform a commit after the execution of the SQL query. + - Some databases allow a commit after a select whereas others raise an exception. + - Default is V(true) to support legacy module behavior. + type: bool + default: true + version_added: 1.3.0 requirements: - - "python >= 2.6" - "pyodbc" notes: - - "Like the command module, this module always returns changed = yes whether or not the query would change the database." - - "To alter this behavior you can use C(changed_when): [yes or no]." - - "For details about return values (description and row_count) see U(https://github.com/mkleehammer/pyodbc/wiki/Cursor)." -''' + - Like the command module, this module always returns V(changed=true) whether or not the query would change the database. + - 'To alter this behavior you can use C(changed_when): [true or false].' + - For details about return values (RV(description) and RV(row_count)) see U(https://github.com/mkleehammer/pyodbc/wiki/Cursor). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Set some values in the test db community.general.odbc: dsn: "DRIVER={ODBC Driver 13 for SQL Server};Server=db.ansible.com;Database=my_db;UID=admin;PWD=password;" @@ -57,25 +61,26 @@ EXAMPLES = ''' params: - "value1" commit: false - changed_when: no -''' + changed_when: false +""" -RETURN = ''' +# @FIXME RV 'results' is meant to be used when 'loop:' was used with the module. +RETURN = r""" results: - description: List of lists of strings containing selected rows, likely empty for DDL statements. - returned: success - type: list - elements: list + description: List of lists of strings containing selected rows, likely empty for DDL statements. + returned: success + type: list + elements: list description: - description: "List of dicts about the columns selected from the cursors, likely empty for DDL statements. See notes." - returned: success - type: list - elements: dict + description: "List of dicts about the columns selected from the cursors, likely empty for DDL statements. See notes." + returned: success + type: list + elements: dict row_count: - description: "The number of rows selected or modified according to the cursor defaults to -1. See notes." - returned: success - type: str -''' + description: "The number of rows selected or modified according to the cursor defaults to V(-1). See notes." + returned: success + type: str +""" from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_native diff --git a/plugins/modules/notification/office_365_connector_card.py b/plugins/modules/office_365_connector_card.py similarity index 62% rename from plugins/modules/notification/office_365_connector_card.py rename to plugins/modules/office_365_connector_card.py index 04d5e385d4..abfdf93cce 100644 --- a/plugins/modules/notification/office_365_connector_card.py +++ b/plugins/modules/office_365_connector_card.py @@ -1,22 +1,28 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017 Marc Sensenich # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: office_365_connector_card short_description: Use webhooks to create Connector Card messages within an Office 365 group description: - - Creates Connector Card messages through - - Office 365 Connectors U(https://dev.outlook.com/Connectors) + - Creates Connector Card messages through Office 365 Connectors. + - See + U(https://learn.microsoft.com/en-us/microsoftteams/platform/task-modules-and-cards/cards/cards-reference#connector-card-for-microsoft-365-groups). author: "Marc Sensenich (@marc-sensenich)" notes: - - This module is not idempotent, therefore if the same task is run twice - there will be two Connector Cards created + - This module is not idempotent, therefore if you run the same task twice then you create two Connector Cards. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: webhook: type: str @@ -27,8 +33,8 @@ options: type: str description: - A string used for summarizing card content. - - This will be shown as the message subject. - - This is required if the text parameter isn't populated. + - This is the message subject. + - This is required if the text parameter is not populated. color: type: str description: @@ -41,23 +47,22 @@ options: type: str description: - The main text of the card. - - This will be rendered below the sender information and optional title, - - and above any sections or actions present. + - This is rendered below the sender information and optional title, + - And above any sections or actions present. actions: type: list elements: dict description: - - This array of objects will power the action links - - found at the bottom of the card. + - This array of objects is used to power the action links found at the bottom of the card. sections: type: list elements: dict description: - Contains a list of sections to display in the card. - - For more information see https://dev.outlook.com/Connectors/reference. -''' + - For more information see U(https://learn.microsoft.com/en-us/outlook/actionable-messages/message-card-reference#section-fields). +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Create a simple Connector Card community.general.office_365_connector_card: webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID @@ -68,71 +73,70 @@ EXAMPLES = """ webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID summary: This is the summary property title: This is the **card's title** property - text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur - adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. + text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod + tempor incididunt ut labore et dolore magna aliqua. color: E81123 sections: - - title: This is the **section's title** property - activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg - activity_title: This is the section's **activityTitle** property - activity_subtitle: This is the section's **activitySubtitle** property - activity_text: This is the section's **activityText** property. - hero_image: - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg - title: This is the image's alternate text - text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur - adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. - facts: - - name: This is a fact name - value: This is a fact value - - name: This is a fact name - value: This is a fact value - - name: This is a fact name - value: This is a fact value - images: - - image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg - title: This is the image's alternate text - - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg - title: This is the image's alternate text - - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg - title: This is the image's alternate text - actions: - - "@type": ActionCard - name: Comment - inputs: - - "@type": TextInput - id: comment - is_multiline: true - title: Input's title property + - title: This is the **section's title** property + activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg + activity_title: This is the section's **activityTitle** property + activity_subtitle: This is the section's **activitySubtitle** property + activity_text: This is the section's **activityText** property. + hero_image: + image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg + title: This is the image's alternate text + text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod + tempor incididunt ut labore et dolore magna aliqua. + facts: + - name: This is a fact name + value: This is a fact value + - name: This is a fact name + value: This is a fact value + - name: This is a fact name + value: This is a fact value + images: + - image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg + title: This is the image's alternate text + - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg + title: This is the image's alternate text + - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg + title: This is the image's alternate text actions: - - "@type": HttpPOST - name: Save - target: http://... - - "@type": ActionCard - name: Due Date - inputs: - - "@type": DateInput - id: dueDate - title: Input's title property - actions: - - "@type": HttpPOST - name: Save - target: http://... - - "@type": HttpPOST - name: Action's name prop. - target: http://... - - "@type": OpenUri - name: Action's name prop - targets: - - os: default - uri: http://... - - start_group: true - title: This is the title of a **second section** - text: This second section is visually separated from the first one by setting its - **startGroup** property to true. + - "@type": ActionCard + name: Comment + inputs: + - "@type": TextInput + id: comment + is_multiline: true + title: Input's title property + actions: + - "@type": HttpPOST + name: Save + target: http://... + - "@type": ActionCard + name: Due Date + inputs: + - "@type": DateInput + id: dueDate + title: Input's title property + actions: + - "@type": HttpPOST + name: Save + target: http://... + - "@type": HttpPOST + name: Action's name prop. + target: http://... + - "@type": OpenUri + name: Action's name prop + targets: + - os: default + uri: http://... + - start_group: true + title: This is the title of a **second section** + text: This second section is visually separated from the first one by setting its **startGroup** property to true. """ -RETURN = """ +RETURN = r""" """ # import module snippets diff --git a/plugins/modules/ohai.py b/plugins/modules/ohai.py new file mode 100644 index 0000000000..6d30a06230 --- /dev/null +++ b/plugins/modules/ohai.py @@ -0,0 +1,50 @@ +#!/usr/bin/python + +# Copyright (c) 2012, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: ohai +short_description: Returns inventory data from I(Ohai) +description: + - Similar to the M(community.general.facter_facts) module, this runs the I(Ohai) discovery program (U(https://docs.chef.io/ohai.html)) + on the remote host and returns JSON inventory data. I(Ohai) data is a bit more verbose and nested than I(facter). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: {} +notes: [] +requirements: ["ohai"] +author: + - "Ansible Core Team" + - "Michael DeHaan (@mpdehaan)" +""" + +EXAMPLES = r""" +ansible webservers -m ohai --tree=/tmp/ohaidata +... +""" +import json + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict() + ) + cmd = ["/usr/bin/env", "ohai"] + rc, out, err = module.run_command(cmd, check_rc=True) + module.exit_json(**json.loads(out)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/omapi_host.py b/plugins/modules/omapi_host.py similarity index 77% rename from plugins/modules/net_tools/omapi_host.py rename to plugins/modules/omapi_host.py index 4d65fcb95d..5dfa01b19e 100644 --- a/plugins/modules/net_tools/omapi_host.py +++ b/plugins/modules/omapi_host.py @@ -1,78 +1,82 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# copyright: (c) 2016, Loic Blot +# Copyright (c) 2016, Loic Blot # Sponsored by Infopro Digital. http://www.infopro-digital.com/ # Sponsored by E.T.A.I. http://www.etai.fr/ -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: omapi_host -short_description: Setup OMAPI hosts. -description: Manage OMAPI hosts into compatible DHCPd servers +short_description: Setup OMAPI hosts +description: Manage OMAPI hosts into compatible DHCPd servers. requirements: - pypureomapi author: -- Loic Blot (@nerzhul) + - Loic Blot (@nerzhul) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - Create or remove OMAPI host. - type: str - required: true - choices: [ absent, present ] - hostname: - description: - - Sets the host lease hostname (mandatory if state=present). - type: str - aliases: [ name ] - host: - description: - - Sets OMAPI server host to interact with. - type: str - default: localhost - port: - description: - - Sets the OMAPI server port to interact with. - type: int - default: 7911 - key_name: - description: - - Sets the TSIG key name for authenticating against OMAPI server. - type: str - required: true - key: - description: - - Sets the TSIG key content for authenticating against OMAPI server. - type: str - required: true - macaddr: - description: - - Sets the lease host MAC address. - type: str - required: true - ip: - description: - - Sets the lease host IP address. - type: str - statements: - description: - - Attach a list of OMAPI DHCP statements with host lease (without ending semicolon). - type: list - elements: str - default: [] - ddns: - description: - - Enable dynamic DNS updates for this host. - type: bool - default: no - -''' -EXAMPLES = r''' + state: + description: + - Create or remove OMAPI host. + type: str + required: true + choices: [absent, present] + hostname: + description: + - Sets the host lease hostname (mandatory if O(state=present)). + type: str + aliases: [name] + host: + description: + - Sets OMAPI server host to interact with. + type: str + default: localhost + port: + description: + - Sets the OMAPI server port to interact with. + type: int + default: 7911 + key_name: + description: + - Sets the TSIG key name for authenticating against OMAPI server. + type: str + required: true + key: + description: + - Sets the TSIG key content for authenticating against OMAPI server. + type: str + required: true + macaddr: + description: + - Sets the lease host MAC address. + type: str + required: true + ip: + description: + - Sets the lease host IP address. + type: str + statements: + description: + - Attach a list of OMAPI DHCP statements with host lease (without ending semicolon). + type: list + elements: str + default: [] + ddns: + description: + - Enable dynamic DNS updates for this host. + type: bool + default: false +""" +EXAMPLES = r""" - name: Add a host using OMAPI community.general.omapi_host: key_name: defomapi @@ -81,10 +85,10 @@ EXAMPLES = r''' macaddr: 44:dd:ab:dd:11:44 name: server01 ip: 192.168.88.99 - ddns: yes + ddns: true statements: - - filename "pxelinux.0" - - next-server 1.1.1.1 + - filename "pxelinux.0" + - next-server 1.1.1.1 state: present - name: Remove a host using OMAPI @@ -94,35 +98,35 @@ EXAMPLES = r''' host: 10.1.1.1 macaddr: 00:66:ab:dd:11:44 state: absent -''' +""" -RETURN = r''' +RETURN = r""" lease: - description: dictionary containing host information - returned: success - type: complex - contains: - ip-address: - description: IP address, if there is. - returned: success - type: str - sample: '192.168.1.5' - hardware-address: - description: MAC address - returned: success - type: str - sample: '00:11:22:33:44:55' - hardware-type: - description: hardware type, generally '1' - returned: success - type: int - sample: 1 - name: - description: hostname - returned: success - type: str - sample: 'mydesktop' -''' + description: Dictionary containing host information. + returned: success + type: complex + contains: + ip-address: + description: IP address, if there is. + returned: success + type: str + sample: '192.168.1.5' + hardware-address: + description: MAC address. + returned: success + type: str + sample: '00:11:22:33:44:55' + hardware-type: + description: Hardware type, generally V(1). + returned: success + type: int + sample: 1 + name: + description: Hostname. + returned: success + type: str + sample: 'mydesktop' +""" import binascii import socket diff --git a/plugins/modules/cloud/opennebula/one_host.py b/plugins/modules/one_host.py similarity index 62% rename from plugins/modules/cloud/opennebula/one_host.py rename to plugins/modules/one_host.py index f205a40a2c..e5781fb07f 100644 --- a/plugins/modules/cloud/opennebula/one_host.py +++ b/plugins/modules/one_host.py @@ -1,89 +1,93 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright 2018 www.privaz.io Valletech AB -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: one_host short_description: Manages OpenNebula Hosts requirements: - - pyone + - pyone description: - - "Manages OpenNebula Hosts" + - Manages OpenNebula Hosts. +attributes: + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - Hostname of the machine to manage. - required: true - type: str - state: - description: - - Takes the host to the desired lifecycle state. - - If C(absent) the host will be deleted from the cluster. - - If C(present) the host will be created in the cluster (includes C(enabled), C(disabled) and C(offline) states). - - If C(enabled) the host is fully operational. - - C(disabled), e.g. to perform maintenance operations. - - C(offline), host is totally offline. - choices: - - absent - - present - - enabled - - disabled - - offline - default: present - type: str - im_mad_name: - description: - - The name of the information manager, this values are taken from the oned.conf with the tag name IM_MAD (name) - default: kvm - type: str - vmm_mad_name: - description: - - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD (name) - default: kvm - type: str - cluster_id: - description: - - The cluster ID. - default: 0 - type: int - cluster_name: - description: - - The cluster specified by name. - type: str - labels: - description: - - The labels for this host. - type: list - elements: str - template: - description: - - The template or attribute changes to merge into the host template. - aliases: - - attributes - type: dict + name: + description: + - Hostname of the machine to manage. + required: true + type: str + state: + description: + - Takes the host to the desired lifecycle state. + - If V(absent) the host is deleted from the cluster. + - If V(present) the host is created in the cluster (includes V(enabled), V(disabled) and V(offline) states). + - If V(enabled) the host is fully operational. + - V(disabled), for example to perform maintenance operations. + - V(offline), host is totally offline. + choices: + - absent + - present + - enabled + - disabled + - offline + default: present + type: str + im_mad_name: + description: + - The name of the information manager, this values are taken from the oned.conf with the tag name IM_MAD (name). + default: kvm + type: str + vmm_mad_name: + description: + - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD + (name). + default: kvm + type: str + cluster_id: + description: + - The cluster ID. + default: 0 + type: int + cluster_name: + description: + - The cluster specified by name. + type: str + labels: + description: + - The labels for this host. + type: list + elements: str + template: + description: + - The template or attribute changes to merge into the host template. + aliases: + - attributes + type: dict extends_documentation_fragment: -- community.general.opennebula - + - community.general.opennebula + - community.general.attributes author: - - Rafael del Valle (@rvalle) -''' + - Rafael del Valle (@rvalle) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new host in OpenNebula community.general.one_host: name: host1 @@ -95,15 +99,15 @@ EXAMPLES = ''' name: host2 cluster_name: default template: - LABELS: - - gold - - ssd - RESERVED_CPU: -100 -''' + LABELS: + - gold + - ssd + RESERVED_CPU: -100 +""" # TODO: pending setting guidelines on returned values -RETURN = ''' -''' +RETURN = r""" +""" # TODO: Documentation on valid state transitions is required to properly implement all valid cases # TODO: To be coherent with CLI this module should also provide "flush" functionality @@ -145,16 +149,19 @@ class HostModule(OpenNebulaModule): def allocate_host(self): """ Creates a host entry in OpenNebula + self.one.host.allocate returns ID of a host Returns: True on success, fails otherwise. """ - if not self.one.host.allocate(self.get_parameter('name'), - self.get_parameter('vmm_mad_name'), - self.get_parameter('im_mad_name'), - self.get_parameter('cluster_id')): - self.fail(msg="could not allocate host") - else: + try: + self.one.host.allocate(self.get_parameter('name'), + self.get_parameter('vmm_mad_name'), + self.get_parameter('im_mad_name'), + self.get_parameter('cluster_id')) self.result['changed'] = True + except Exception as e: + self.fail(msg="Could not allocate host, ERROR: " + str(e)) + return True def wait_for_host_state(self, host, target_states): @@ -214,11 +221,13 @@ class HostModule(OpenNebulaModule): if current_state == HOST_ABSENT: self.fail(msg='absent host cannot be put in disabled state') elif current_state in [HOST_STATES.MONITORED, HOST_STATES.OFFLINE]: - if one.host.status(host.ID, HOST_STATUS.DISABLED): - self.wait_for_host_state(host, [HOST_STATES.DISABLED]) + # returns host ID integer + try: + one.host.status(host.ID, HOST_STATUS.DISABLED) result['changed'] = True - else: - self.fail(msg="could not disable host") + except Exception as e: + self.fail(msg="Could not disable host, ERROR: " + str(e)) + self.wait_for_host_state(host, [HOST_STATES.DISABLED]) elif current_state in [HOST_STATES.DISABLED]: pass else: @@ -228,11 +237,13 @@ class HostModule(OpenNebulaModule): if current_state == HOST_ABSENT: self.fail(msg='absent host cannot be placed in offline state') elif current_state in [HOST_STATES.MONITORED, HOST_STATES.DISABLED]: - if one.host.status(host.ID, HOST_STATUS.OFFLINE): - self.wait_for_host_state(host, [HOST_STATES.OFFLINE]) + # returns host ID integer + try: + one.host.status(host.ID, HOST_STATUS.OFFLINE) result['changed'] = True - else: - self.fail(msg="could not set host offline") + except Exception as e: + self.fail(msg="Could not set host offline, ERROR: " + str(e)) + self.wait_for_host_state(host, [HOST_STATES.OFFLINE]) elif current_state in [HOST_STATES.OFFLINE]: pass else: @@ -240,10 +251,12 @@ class HostModule(OpenNebulaModule): elif desired_state == 'absent': if current_state != HOST_ABSENT: - if one.host.delete(host.ID): + # returns host ID integer + try: + one.host.delete(host.ID) result['changed'] = True - else: - self.fail(msg="could not delete host from cluster") + except Exception as e: + self.fail(msg="Could not delete host from cluster, ERROR: " + str(e)) # if we reach this point we can assume that the host was taken to the desired state @@ -261,17 +274,21 @@ class HostModule(OpenNebulaModule): if self.requires_template_update(host.TEMPLATE, desired_template_changes): # setup the root element so that pyone will generate XML instead of attribute vector desired_template_changes = {"TEMPLATE": desired_template_changes} - if one.host.update(host.ID, desired_template_changes, 1): # merge the template + # merge the template, returns host ID integer + try: + one.host.update(host.ID, desired_template_changes, 1) result['changed'] = True - else: - self.fail(msg="failed to update the host template") + except Exception as e: + self.fail(msg="Failed to update the host template, ERROR: " + str(e)) # the cluster if host.CLUSTER_ID != self.get_parameter('cluster_id'): - if one.cluster.addhost(self.get_parameter('cluster_id'), host.ID): + # returns cluster id in int + try: + one.cluster.addhost(self.get_parameter('cluster_id'), host.ID) result['changed'] = True - else: - self.fail(msg="failed to update the host cluster") + except Exception as e: + self.fail(msg="Failed to update the host cluster, ERROR: " + str(e)) # return self.exit() diff --git a/plugins/modules/one_image.py b/plugins/modules/one_image.py new file mode 100644 index 0000000000..92786fd91d --- /dev/null +++ b/plugins/modules/one_image.py @@ -0,0 +1,626 @@ +#!/usr/bin/python +# Copyright (c) 2018, Milan Ilic +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import annotations + +DOCUMENTATION = r""" +module: one_image +short_description: Manages OpenNebula images +description: + - Manages OpenNebula images. +requirements: + - pyone +extends_documentation_fragment: + - community.general.opennebula + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + id: + description: + - A O(id) of the image you would like to manage. + type: int + name: + description: + - A O(name) of the image you would like to manage. + - Required if O(create=true). + type: str + state: + description: + - V(present) - state that is used to manage the image. + - V(absent) - delete the image. + - V(cloned) - clone the image. + - V(renamed) - rename the image to the O(new_name). + choices: ["present", "absent", "cloned", "renamed"] + default: present + type: str + enabled: + description: + - Whether the image should be enabled or disabled. + type: bool + new_name: + description: + - A name that is assigned to the existing or new image. + - In the case of cloning, by default O(new_name) is set to the name of the origin image with the prefix 'Copy of'. + type: str + persistent: + description: + - Whether the image should be persistent or non-persistent. + type: bool + version_added: 9.5.0 + create: + description: + - Whether the image should be created if not present. + - This is ignored if O(state=absent). + type: bool + version_added: 10.0.0 + template: + description: + - Use with O(create=true) to specify image template. + type: str + version_added: 10.0.0 + datastore_id: + description: + - Use with O(create=true) to specify datastore for image. + type: int + version_added: 10.0.0 + wait_timeout: + description: + - Seconds to wait until image is ready, deleted or cloned. + type: int + default: 60 + version_added: 10.0.0 +author: + - "Milan Ilic (@ilicmilan)" +""" + +EXAMPLES = r""" +- name: Fetch the IMAGE by id + community.general.one_image: + id: 45 + register: result + +- name: Print the IMAGE properties + ansible.builtin.debug: + var: result + +- name: Rename existing IMAGE + community.general.one_image: + id: 34 + state: renamed + new_name: bar-image + +- name: Disable the IMAGE by id + community.general.one_image: + id: 37 + enabled: false + +- name: Make the IMAGE persistent + community.general.one_image: + id: 37 + persistent: true + +- name: Enable the IMAGE by name + community.general.one_image: + name: bar-image + enabled: true + +- name: Clone the IMAGE by name + community.general.one_image: + name: bar-image + state: cloned + new_name: bar-image-clone + register: result + +- name: Delete the IMAGE by id + community.general.one_image: + id: '{{ result.id }}' + state: absent + +- name: Make sure IMAGE is present + community.general.one_image: + name: myyy-image + state: present + create: true + datastore_id: 100 + template: | + PATH = "/var/tmp/image" + TYPE = "OS" + SIZE = 20512 + FORMAT = "qcow2" + PERSISTENT = "Yes" + DEV_PREFIX = "vd" + +- name: Make sure IMAGE is present with a longer timeout + community.general.one_image: + name: big-image + state: present + create: true + datastore_id: 100 + wait_timeout: 900 + template: |- + PATH = "https://192.0.2.200/repo/tipa_image.raw" + TYPE = "OS" + SIZE = 82048 + FORMAT = "raw" + PERSISTENT = "Yes" + DEV_PREFIX = "vd" +""" + +RETURN = r""" +id: + description: Image ID. + type: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: 153 +name: + description: Image name. + type: str + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: app1 +group_id: + description: Image's group ID. + type: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: 1 +group_name: + description: Image's group name. + type: str + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: one-users +owner_id: + description: Image's owner ID. + type: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: 143 +owner_name: + description: Image's owner name. + type: str + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: ansible-test +state: + description: State of image instance. + type: str + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: READY +used: + description: Is image in use. + type: bool + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: true +running_vms: + description: Count of running vms that use this image. + type: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: 7 +permissions: + description: The image's permissions. + type: dict + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 + contains: + owner_u: + description: The image's owner USAGE permissions. + type: str + sample: 1 + owner_m: + description: The image's owner MANAGE permissions. + type: str + sample: 0 + owner_a: + description: The image's owner ADMIN permissions. + type: str + sample: 0 + group_u: + description: The image's group USAGE permissions. + type: str + sample: 0 + group_m: + description: The image's group MANAGE permissions. + type: str + sample: 0 + group_a: + description: The image's group ADMIN permissions. + type: str + sample: 0 + other_u: + description: The image's other users USAGE permissions. + type: str + sample: 0 + other_m: + description: The image's other users MANAGE permissions. + type: str + sample: 0 + other_a: + description: The image's other users ADMIN permissions. + type: str + sample: 0 + sample: + owner_u: 1 + owner_m: 0 + owner_a: 0 + group_u: 0 + group_m: 0 + group_a: 0 + other_u: 0 + other_m: 0 + other_a: 0 +type: + description: The image's type. + type: str + sample: 0 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +disk_type: + description: The image's format type. + type: str + sample: 0 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +persistent: + description: The image's persistence status (1 means true, 0 means false). + type: int + sample: 1 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +source: + description: The image's source. + type: str + sample: /var/lib/one//datastores/100/somerandomstringxd + returned: when O(state=present), O(state=cloned), or O(state=renamed) +path: + description: The image's filesystem path. + type: str + sample: /var/tmp/hello.qcow2 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +fstype: + description: The image's filesystem type. + type: str + sample: ext4 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +size: + description: The image's size in MegaBytes. + type: int + sample: 10000 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +cloning_ops: + description: The image's cloning operations per second. + type: int + sample: 0 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +cloning_id: + description: The image's cloning ID. + type: int + sample: -1 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +target_snapshot: + description: The image's target snapshot. + type: int + sample: 1 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +datastore_id: + description: The image's datastore ID. + type: int + sample: 100 + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +datastore: + description: The image's datastore name. + type: int + sample: image_datastore + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 +vms: + description: The image's list of VM ID's. + type: list + elements: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: + - 1 + - 2 + - 3 + version_added: 9.5.0 +clones: + description: The image's list of clones ID's. + type: list + elements: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: + - 1 + - 2 + - 3 + version_added: 9.5.0 +app_clones: + description: The image's list of app_clones ID's. + type: list + elements: int + returned: when O(state=present), O(state=cloned), or O(state=renamed) + sample: + - 1 + - 2 + - 3 + version_added: 9.5.0 +snapshots: + description: The image's list of snapshots. + type: list + returned: when O(state=present), O(state=cloned), or O(state=renamed) + version_added: 9.5.0 + sample: + - date: 123123 + parent: 1 + size: 10228 + allow_orphans: 1 + children: 0 + active: 1 + name: SampleName +""" + + +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule + + +IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] + + +class ImageModule(OpenNebulaModule): + def __init__(self): + argument_spec = dict( + id=dict(type='int'), + name=dict(type='str'), + state=dict(type='str', choices=['present', 'absent', 'cloned', 'renamed'], default='present'), + enabled=dict(type='bool'), + new_name=dict(type='str'), + persistent=dict(type='bool'), + create=dict(type='bool'), + template=dict(type='str'), + datastore_id=dict(type='int'), + wait_timeout=dict(type='int', default=60), + ) + required_if = [ + ['state', 'renamed', ['id']], + ['create', True, ['template', 'datastore_id', 'name']], + ] + mutually_exclusive = [ + ['id', 'name'], + ] + + OpenNebulaModule.__init__(self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_if=required_if) + + def run(self, one, module, result): + params = module.params + id = params.get('id') + name = params.get('name') + desired_state = params.get('state') + enabled = params.get('enabled') + new_name = params.get('new_name') + persistent = params.get('persistent') + create = params.get('create') + template = params.get('template') + datastore_id = params.get('datastore_id') + wait_timeout = params.get('wait_timeout') + + self.result = {} + + image = self.get_image_instance(id, name) + if not image and desired_state != 'absent': + if create: + self.result = self.create_image(name, template, datastore_id, wait_timeout) + # Using 'if id:' doesn't work properly when id=0 + elif id is not None: + module.fail_json(msg="There is no image with id=" + str(id)) + elif name is not None: + module.fail_json(msg="There is no image with name=" + name) + + if desired_state == 'absent': + self.result = self.delete_image(image, wait_timeout) + else: + if persistent is not None: + self.result = self.change_persistence(image, persistent) + if enabled is not None: + self.result = self.enable_image(image, enabled) + if desired_state == "cloned": + self.result = self.clone_image(image, new_name, wait_timeout) + elif desired_state == "renamed": + self.result = self.rename_image(image, new_name) + + self.exit() + + def get_image(self, predicate): + # Filter -2 means fetch all images user can Use + pool = self.one.imagepool.info(-2, -1, -1, -1) + + for image in pool.IMAGE: + if predicate(image): + return image + + return None + + def get_image_by_name(self, image_name): + return self.get_image(lambda image: (image.NAME == image_name)) + + def get_image_by_id(self, image_id): + return self.get_image(lambda image: (image.ID == image_id)) + + def get_image_instance(self, requested_id, requested_name): + # Using 'if requested_id:' doesn't work properly when requested_id=0 + if requested_id is not None: + return self.get_image_by_id(requested_id) + else: + return self.get_image_by_name(requested_name) + + def create_image(self, image_name, template, datastore_id, wait_timeout): + if not self.module.check_mode: + image_id = self.one.image.allocate("NAME = \"" + image_name + "\"\n" + template, datastore_id) + self.wait_for_ready(image_id, wait_timeout) + image = self.get_image_by_id(image_id) + result = self.get_image_info(image) + + result['changed'] = True + return result + + def wait_for_ready(self, image_id, wait_timeout=60): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + image = self.one.image.info(image_id) + state = image.STATE + + if state in [IMAGE_STATES.index('ERROR')]: + self.module.fail_json(msg="Got an ERROR state: " + image.TEMPLATE['ERROR']) + + if state in [IMAGE_STATES.index('READY')]: + return True + + time.sleep(1) + self.module.fail_json(msg="Wait timeout has expired!") + + def wait_for_delete(self, image_id, wait_timeout=60): + import time + start_time = time.time() + + while (time.time() - start_time) < wait_timeout: + # It might be already deleted by the time this function is called + try: + image = self.one.image.info(image_id) + except Exception: + check_image = self.get_image_instance(image_id) + if not check_image: + return True + + state = image.STATE + + if state in [IMAGE_STATES.index('DELETE')]: + return True + + time.sleep(1) + + self.module.fail_json(msg="Wait timeout has expired!") + + def enable_image(self, image, enable): + image = self.one.image.info(image.ID) + changed = False + + state = image.STATE + + if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: + if enable: + self.module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!") + else: + self.module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!") + + if ((enable and state != IMAGE_STATES.index('READY')) or + (not enable and state != IMAGE_STATES.index('DISABLED'))): + changed = True + + if changed and not self.module.check_mode: + self.one.image.enable(image.ID, enable) + + result = self.get_image_info(image) + result['changed'] = changed + + return result + + def change_persistence(self, image, enable): + image = self.one.image.info(image.ID) + changed = False + + state = image.STATE + + if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: + if enable: + self.module.fail_json(msg="Cannot enable persistence for " + IMAGE_STATES[state] + " image!") + else: + self.module.fail_json(msg="Cannot disable persistence for " + IMAGE_STATES[state] + " image!") + + if ((enable and state != IMAGE_STATES.index('READY')) or + (not enable and state != IMAGE_STATES.index('DISABLED'))): + changed = True + + if changed and not self.module.check_mode: + self.one.image.persistent(image.ID, enable) + + result = self.get_image_info(image) + result['changed'] = changed + + return result + + def clone_image(self, image, new_name, wait_timeout): + if new_name is None: + new_name = "Copy of " + image.NAME + + tmp_image = self.get_image_by_name(new_name) + if tmp_image: + result = self.get_image_info(image) + result['changed'] = False + return result + + if image.STATE == IMAGE_STATES.index('DISABLED'): + self.module.fail_json(msg="Cannot clone DISABLED image") + + if not self.module.check_mode: + new_id = self.one.image.clone(image.ID, new_name) + self.wait_for_ready(new_id, wait_timeout) + image = self.one.image.info(new_id) + + result = self.get_image_info(image) + result['changed'] = True + + return result + + def rename_image(self, image, new_name): + if new_name is None: + self.module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'") + + if new_name == image.NAME: + result = self.get_image_info(image) + result['changed'] = False + return result + + tmp_image = self.get_image_by_name(new_name) + if tmp_image: + self.module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.ID)) + + if not self.module.check_mode: + self.one.image.rename(image.ID, new_name) + + result = self.get_image_info(image) + result['changed'] = True + return result + + def delete_image(self, image, wait_timeout): + if not image: + return {'changed': False} + + if image.RUNNING_VMS > 0: + self.module.fail_json(msg="Cannot delete image. There are " + str(image.RUNNING_VMS) + " VMs using it.") + + if not self.module.check_mode: + self.one.image.delete(image.ID) + self.wait_for_delete(image.ID, wait_timeout) + + return {'changed': True} + + +def main(): + ImageModule().run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/one_image_info.py b/plugins/modules/one_image_info.py new file mode 100644 index 0000000000..37c70c69f1 --- /dev/null +++ b/plugins/modules/one_image_info.py @@ -0,0 +1,367 @@ +#!/usr/bin/python +# Copyright (c) 2018, Milan Ilic +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import annotations + +DOCUMENTATION = r""" +module: one_image_info +short_description: Gather information on OpenNebula images +description: + - Gather information on OpenNebula images. +requirements: + - pyone +extends_documentation_fragment: + - community.general.opennebula + - community.general.attributes + - community.general.attributes.info_module +options: + ids: + description: + - A list of images IDs whose facts you want to gather. + - Module can use integers too. + aliases: ['id'] + type: list + elements: str + name: + description: + - A O(name) of the image whose facts is gathered. + - If the O(name) begins with V(~) the O(name) is used as regex pattern, which restricts the list of images (whose facts + is returned) whose names match specified regex. + - Also, if the O(name) begins with V(~*) case-insensitive matching is performed. + - See examples for more details. + type: str +author: + - "Milan Ilic (@ilicmilan)" + - "Jan Meerkamp (@meerkampdvv)" +""" + +EXAMPLES = r""" +- name: Gather facts about all images + community.general.one_image_info: + register: result + +- name: Print all images facts + ansible.builtin.debug: + msg: result + +- name: Gather facts about an image using ID + community.general.one_image_info: + ids: 123 + +- name: Gather facts about an image using list of ID + community.general.one_image_info: + ids: + - 123 + - 456 + - 789 + - 0 + +- name: Gather facts about an image using the name + community.general.one_image_info: + name: 'foo-image' + register: foo_image + +- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*' + community.general.one_image_info: + name: '~app-image-.*' + register: app_images + +- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases + community.general.one_image_info: + name: '~*foo-image-.*' + register: foo_images +""" + +RETURN = r""" +images: + description: A list of images info. + type: complex + returned: success + contains: + id: + description: The image's ID. + type: int + sample: 153 + name: + description: The image's name. + type: str + sample: app1 + group_id: + description: The image's group ID. + type: int + sample: 1 + group_name: + description: The image's group name. + type: str + sample: one-users + owner_id: + description: The image's owner ID. + type: int + sample: 143 + owner_name: + description: The image's owner name. + type: str + sample: ansible-test + state: + description: The image's state. + type: str + sample: READY + used: + description: The image's usage status. + type: bool + sample: true + running_vms: + description: The image's count of running vms that use this image. + type: int + sample: 7 + permissions: + description: The image's permissions. + type: dict + version_added: 9.5.0 + contains: + owner_u: + description: The image's owner USAGE permissions. + type: str + sample: 1 + owner_m: + description: The image's owner MANAGE permissions. + type: str + sample: 0 + owner_a: + description: The image's owner ADMIN permissions. + type: str + sample: 0 + group_u: + description: The image's group USAGE permissions. + type: str + sample: 0 + group_m: + description: The image's group MANAGE permissions. + type: str + sample: 0 + group_a: + description: The image's group ADMIN permissions. + type: str + sample: 0 + other_u: + description: The image's other users USAGE permissions. + type: str + sample: 0 + other_m: + description: The image's other users MANAGE permissions. + type: str + sample: 0 + other_a: + description: The image's other users ADMIN permissions. + type: str + sample: 0 + sample: + owner_u: 1 + owner_m: 0 + owner_a: 0 + group_u: 0 + group_m: 0 + group_a: 0 + other_u: 0 + other_m: 0 + other_a: 0 + type: + description: The image's type. + type: int + sample: 0 + version_added: 9.5.0 + disk_type: + description: The image's format type. + type: int + sample: 0 + version_added: 9.5.0 + persistent: + description: The image's persistence status (1 means true, 0 means false). + type: int + sample: 1 + version_added: 9.5.0 + source: + description: The image's source. + type: str + sample: /var/lib/one//datastores/100/somerandomstringxd + version_added: 9.5.0 + path: + description: The image's filesystem path. + type: str + sample: /var/tmp/hello.qcow2 + version_added: 9.5.0 + fstype: + description: The image's filesystem type. + type: str + sample: ext4 + version_added: 9.5.0 + size: + description: The image's size in MegaBytes. + type: int + sample: 10000 + version_added: 9.5.0 + cloning_ops: + description: The image's cloning operations per second. + type: int + sample: 0 + version_added: 9.5.0 + cloning_id: + description: The image's cloning ID. + type: int + sample: -1 + version_added: 9.5.0 + target_snapshot: + description: The image's target snapshot. + type: int + sample: 1 + version_added: 9.5.0 + datastore_id: + description: The image's datastore ID. + type: int + sample: 100 + version_added: 9.5.0 + datastore: + description: The image's datastore name. + type: int + sample: image_datastore + version_added: 9.5.0 + vms: + description: The image's list of VM ID's. + type: list + elements: int + version_added: 9.5.0 + sample: + - 1 + - 2 + - 3 + clones: + description: The image's list of clones ID's. + type: list + elements: int + version_added: 9.5.0 + sample: + - 1 + - 2 + - 3 + app_clones: + description: The image's list of app_clones ID's. + type: list + elements: int + version_added: 9.5.0 + sample: + - 1 + - 2 + - 3 + snapshots: + description: The image's list of snapshots. + type: list + version_added: 9.5.0 + sample: + - date: 123123 + parent: 1 + size: 10228 + allow_orphans: 1 + children: 0 + active: 1 + name: SampleName +""" + + +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule + + +IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] + + +class ImageInfoModule(OpenNebulaModule): + def __init__(self): + argument_spec = dict( + ids=dict(type='list', aliases=['id'], elements='str'), + name=dict(type='str'), + ) + mutually_exclusive = [ + ['ids', 'name'], + ] + + OpenNebulaModule.__init__(self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive) + + def run(self, one, module, result): + params = module.params + ids = params.get('ids') + name = params.get('name') + + if ids: + images = self.get_images_by_ids(ids) + elif name: + images = self.get_images_by_name(name) + else: + images = self.get_all_images().IMAGE + + self.result = { + 'images': [self.get_image_info(image) for image in images] + } + + self.exit() + + def get_all_images(self): + pool = self.one.imagepool.info(-2, -1, -1, -1) + # Filter -2 means fetch all images user can Use + + return pool + + def get_images_by_ids(self, ids): + images = [] + pool = self.get_all_images() + + for image in pool.IMAGE: + if str(image.ID) in ids: + images.append(image) + ids.remove(str(image.ID)) + if len(ids) == 0: + break + + if len(ids) > 0: + self.module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids)) + + return images + + def get_images_by_name(self, name_pattern): + images = [] + pattern = None + + pool = self.get_all_images() + + if name_pattern.startswith('~'): + import re + if name_pattern[1] == '*': + pattern = re.compile(name_pattern[2:], re.IGNORECASE) + else: + pattern = re.compile(name_pattern[1:]) + + for image in pool.IMAGE: + if pattern is not None: + if pattern.match(image.NAME): + images.append(image) + elif name_pattern == image.NAME: + images.append(image) + break + + # if the specific name is indicated + if pattern is None and len(images) == 0: + self.module.fail_json(msg="There is no IMAGE with name=" + name_pattern) + + return images + + +def main(): + ImageInfoModule().run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/opennebula/one_service.py b/plugins/modules/one_service.py similarity index 86% rename from plugins/modules/cloud/opennebula/one_service.py rename to plugins/modules/one_service.py index 68f8398f36..78238fd618 100644 --- a/plugins/modules/cloud/opennebula/one_service.py +++ b/plugins/modules/one_service.py @@ -1,124 +1,115 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- +# Copyright (c) 2017, Milan Ilic +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -""" -(c) 2017, Milan Ilic - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: one_service short_description: Deploy and manage OpenNebula services description: - - Manage OpenNebula services + - Manage OpenNebula services. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: api_url: description: - URL of the OpenNebula OneFlow API server. - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted. - - If not set then the value of the ONEFLOW_URL environment variable is used. + - If not set then the value of the E(ONEFLOW_URL) environment variable is used. type: str api_username: description: - - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_USERNAME) environment variable is used. + - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the E(ONEFLOW_USERNAME) + environment variable is used. type: str api_password: description: - - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_PASSWORD) environment variable is used. + - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the E(ONEFLOW_PASSWORD) + environment variable is used. type: str template_name: description: - - Name of service template to use to create a new instance of a service + - Name of service template to use to create a new instance of a service. type: str template_id: description: - - ID of a service template to use to create a new instance of a service + - ID of a service template to use to create a new instance of a service. type: int service_id: description: - - ID of a service instance that you would like to manage + - ID of a service instance that you would like to manage. type: int service_name: description: - - Name of a service instance that you would like to manage + - Name of a service instance that you would like to manage. type: str unique: description: - - Setting C(unique=yes) will make sure that there is only one service instance running with a name set with C(service_name) when - - instantiating a service from a template specified with C(template_id)/C(template_name). Check examples below. + - Setting O(unique=true) ensures that there is only one service instance running with a name set with O(service_name) + when instantiating a service from a template specified with O(template_id) or O(template_name). Check examples below. type: bool - default: no + default: false state: description: - - C(present) - instantiate a service from a template specified with C(template_id)/C(template_name). - - C(absent) - terminate an instance of a service specified with C(service_id)/C(service_name). + - V(present) - instantiate a service from a template specified with O(template_id) or O(template_name). + - V(absent) - terminate an instance of a service specified with O(template_id) or O(template_name). choices: ["present", "absent"] default: present type: str mode: description: - - Set permission mode of a service instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others. + - Set permission mode of a service instance in octet format, for example V(0600) to give owner C(use) and C(manage) + and nothing to group and others. type: str owner_id: description: - - ID of the user which will be set as the owner of the service + - ID of the user which is set as the owner of the service. type: int group_id: description: - - ID of the group which will be set as the group of the service + - ID of the group which is set as the group of the service. type: int wait: description: - - Wait for the instance to reach RUNNING state after DEPLOYING or COOLDOWN state after SCALING + - Wait for the instance to reach RUNNING state after DEPLOYING or COOLDOWN state after SCALING. type: bool - default: no + default: false wait_timeout: description: - - How long before wait gives up, in seconds + - How long before wait gives up, in seconds. default: 300 type: int custom_attrs: description: - - Dictionary of key/value custom attributes which will be used when instantiating a new service. + - Dictionary of key/value custom attributes which is used when instantiating a new service. default: {} type: dict role: description: - - Name of the role whose cardinality should be changed + - Name of the role whose cardinality should be changed. type: str cardinality: description: - - Number of VMs for the specified role + - Number of VMs for the specified role. type: int force: description: - - Force the new cardinality even if it is outside the limits + - Force the new cardinality even if it is outside the limits. type: bool - default: no + default: false author: - - "Milan Ilic (@ilicmilan)" -''' + - "Milan Ilic (@ilicmilan)" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Instantiate a new service community.general.one_service: template_id: 90 @@ -146,7 +137,7 @@ EXAMPLES = ''' community.general.one_service: template_id: 53 service_name: 'foo' - unique: yes + unique: true - name: Delete a service by ID community.general.one_service: @@ -173,7 +164,7 @@ EXAMPLES = ''' - name: Wait service to become RUNNING community.general.one_service: service_id: 112 - wait: yes + wait: true - name: Change role cardinality community.general.one_service: @@ -186,60 +177,60 @@ EXAMPLES = ''' service_id: 112 role: foo cardinality: 7 - wait: yes -''' + wait: true +""" -RETURN = ''' +RETURN = r""" service_id: - description: service id - type: int - returned: success - sample: 153 + description: Service ID. + type: int + returned: success + sample: 153 service_name: - description: service name - type: str - returned: success - sample: app1 + description: Service name. + type: str + returned: success + sample: app1 group_id: - description: service's group id - type: int - returned: success - sample: 1 + description: Service's group ID. + type: int + returned: success + sample: 1 group_name: - description: service's group name - type: str - returned: success - sample: one-users + description: Service's group name. + type: str + returned: success + sample: one-users owner_id: - description: service's owner id - type: int - returned: success - sample: 143 + description: Service's owner ID. + type: int + returned: success + sample: 143 owner_name: - description: service's owner name - type: str - returned: success - sample: ansible-test + description: Service's owner name. + type: str + returned: success + sample: ansible-test state: - description: state of service instance - type: str - returned: success - sample: RUNNING + description: State of service instance. + type: str + returned: success + sample: RUNNING mode: - description: service's mode - type: int - returned: success - sample: 660 + description: Service's mode. + type: int + returned: success + sample: 660 roles: - description: list of dictionaries of roles, each role is described by name, cardinality, state and nodes ids - type: list - returned: success - sample: '[{"cardinality": 1,"name": "foo","state": "RUNNING","ids": [ 123, 456 ]}, - {"cardinality": 2,"name": "bar","state": "RUNNING", "ids": [ 452, 567, 746 ]}]' -''' + description: List of dictionaries of roles, each role is described by name, cardinality, state and nodes IDs. + type: list + returned: success + sample: + - {"cardinality": 1, "name": "foo", "state": "RUNNING", "ids": [123, 456]} + - {"cardinality": 2, "name": "bar", "state": "RUNNING", "ids": [452, 567, 746]} +""" import os -import sys from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import open_url @@ -348,7 +339,7 @@ def get_service_info(module, auth, service): def create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout): # make sure that the values in custom_attrs dict are strings - custom_attrs_with_str = dict((k, str(v)) for k, v in custom_attrs.items()) + custom_attrs_with_str = {k: str(v) for k, v in custom_attrs.items()} data = { "action": { @@ -531,7 +522,7 @@ def create_service_and_operation(module, auth, template_id, service_name, owner_ if unique: service = get_service_by_name(module, auth, service_name) - if not service: + if not service or service["TEMPLATE"]["BODY"]["state"] == "DONE": if not module.check_mode: service = create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout) changed = True @@ -646,7 +637,6 @@ def get_service_id_by_name(module, auth, service_name): def get_connection_info(module): - url = module.params.get('api_url') username = module.params.get('api_username') password = module.params.get('api_password') @@ -660,7 +650,7 @@ def get_connection_info(module): if not password: password = os.environ.get('ONEFLOW_PASSWORD') - if not(url and username and password): + if not (url and username and password): module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified") from collections import namedtuple diff --git a/plugins/modules/cloud/opennebula/one_template.py b/plugins/modules/one_template.py similarity index 61% rename from plugins/modules/cloud/opennebula/one_template.py rename to plugins/modules/one_template.py index b1d2c69ccf..a279e3a88c 100644 --- a/plugins/modules/cloud/opennebula/one_template.py +++ b/plugins/modules/one_template.py @@ -1,15 +1,13 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: (c) 2021, Georg Gadinger -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Jyrki Gadinger +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: one_template short_description: Manages OpenNebula templates @@ -20,19 +18,25 @@ requirements: - pyone description: - - "Manages OpenNebula templates." + - Manages OpenNebula templates. +attributes: + check_mode: + support: partial + details: + - Note that check mode always returns C(changed=true) for existing templates, even if the template would not actually + change. + diff_mode: + support: none options: id: description: - - A I(id) of the template you would like to manage. If not set then a - - new template will be created with the given I(name). + - A O(id) of the template you would like to manage. If not set then a new template is created with the given O(name). type: int name: description: - - A I(name) of the template you would like to manage. If a template with - - the given name does not exist it will be created, otherwise it will be - - managed by this module. + - A O(name) of the template you would like to manage. If a template with the given name does not exist it is created, + otherwise it is managed by this module. type: str template: description: @@ -40,23 +44,31 @@ options: type: str state: description: - - C(present) - state that is used to manage the template. - - C(absent) - delete the template. + - V(present) - state that is used to manage the template. + - V(absent) - delete the template. choices: ["present", "absent"] default: present type: str - -notes: - - Supports C(check_mode). Note that check mode always returns C(changed=true) for existing templates, even if the template would not actually change. + filter: + description: + - V(user_primary_group) - Resources belonging to the user's primary group. + - V(user) - Resources belonging to the user. + - V(all) - All resources. + - V(user_groups) - Resources belonging to the user and any of his groups. + choices: [user_primary_group, user, all, user_groups] + default: user + type: str + version_added: 10.3.0 extends_documentation_fragment: - community.general.opennebula + - community.general.attributes author: - - "Georg Gadinger (@nilsding)" -''' + - "Jyrki Gadinger (@nilsding)" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Fetch the TEMPLATE by id community.general.one_template: id: 6459 @@ -103,44 +115,44 @@ EXAMPLES = ''' community.general.one_template: id: 6459 state: absent -''' +""" -RETURN = ''' +RETURN = r""" id: - description: template id - type: int - returned: when I(state=present) - sample: 153 + description: Template ID. + type: int + returned: when O(state=present) + sample: 153 name: - description: template name - type: str - returned: when I(state=present) - sample: app1 + description: Template name. + type: str + returned: when O(state=present) + sample: app1 template: - description: the parsed template - type: dict - returned: when I(state=present) + description: The parsed template. + type: dict + returned: when O(state=present) group_id: - description: template's group id - type: int - returned: when I(state=present) - sample: 1 + description: Template's group ID. + type: int + returned: when O(state=present) + sample: 1 group_name: - description: template's group name - type: str - returned: when I(state=present) - sample: one-users + description: Template's group name. + type: str + returned: when O(state=present) + sample: one-users owner_id: - description: template's owner id - type: int - returned: when I(state=present) - sample: 143 + description: Template's owner ID. + type: int + returned: when O(state=present) + sample: 143 owner_name: - description: template's owner name - type: str - returned: when I(state=present) - sample: ansible-test -''' + description: Template's owner name. + type: str + returned: when O(state=present) + sample: ansible-test +""" from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule @@ -149,10 +161,11 @@ from ansible_collections.community.general.plugins.module_utils.opennebula impor class TemplateModule(OpenNebulaModule): def __init__(self): argument_spec = dict( - id=dict(type='int', required=False), - name=dict(type='str', required=False), + id=dict(type='int'), + name=dict(type='str'), state=dict(type='str', choices=['present', 'absent'], default='present'), - template=dict(type='str', required=False), + template=dict(type='str'), + filter=dict(type='str', choices=['user_primary_group', 'user', 'all', 'user_groups'], default='user'), ) mutually_exclusive = [ @@ -178,10 +191,11 @@ class TemplateModule(OpenNebulaModule): name = params.get('name') desired_state = params.get('state') template_data = params.get('template') + filter = params.get('filter') self.result = {} - template = self.get_template_instance(id, name) + template = self.get_template_instance(id, name, filter) needs_creation = False if not template and desired_state != 'absent': if id: @@ -193,16 +207,19 @@ class TemplateModule(OpenNebulaModule): self.result = self.delete_template(template) else: if needs_creation: - self.result = self.create_template(name, template_data) + self.result = self.create_template(name, template_data, filter) else: - self.result = self.update_template(template, template_data) + self.result = self.update_template(template, template_data, filter) self.exit() - def get_template(self, predicate): - # -3 means "Resources belonging to the user" + def get_template(self, predicate, filter): + # filter was included, for discussions see: + # Issue: https://github.com/ansible-collections/community.general/issues/9278 + # PR: https://github.com/ansible-collections/community.general/pull/9547 # the other two parameters are used for pagination, -1 for both essentially means "return all" - pool = self.one.templatepool.info(-3, -1, -1) + filter_values = {'user_primary_group': -4, 'user': -3, 'all': -2, 'user_groups': -1} + pool = self.one.templatepool.info(filter_values[filter], -1, -1) for template in pool.VMTEMPLATE: if predicate(template): @@ -210,17 +227,17 @@ class TemplateModule(OpenNebulaModule): return None - def get_template_by_id(self, template_id): - return self.get_template(lambda template: (template.ID == template_id)) + def get_template_by_id(self, template_id, filter): + return self.get_template(lambda template: (template.ID == template_id), filter) - def get_template_by_name(self, name): - return self.get_template(lambda template: (template.NAME == name)) + def get_template_by_name(self, name, filter): + return self.get_template(lambda template: (template.NAME == name), filter) - def get_template_instance(self, requested_id, requested_name): + def get_template_instance(self, requested_id, requested_name, filter): if requested_id: - return self.get_template_by_id(requested_id) + return self.get_template_by_id(requested_id, filter) else: - return self.get_template_by_name(requested_name) + return self.get_template_by_name(requested_name, filter) def get_template_info(self, template): info = { @@ -235,21 +252,21 @@ class TemplateModule(OpenNebulaModule): return info - def create_template(self, name, template_data): + def create_template(self, name, template_data, filter): if not self.module.check_mode: self.one.template.allocate("NAME = \"" + name + "\"\n" + template_data) - result = self.get_template_info(self.get_template_by_name(name)) + result = self.get_template_info(self.get_template_by_name(name, filter)) result['changed'] = True return result - def update_template(self, template, template_data): + def update_template(self, template, template_data, filter): if not self.module.check_mode: # 0 = replace the whole template self.one.template.update(template.ID, template_data, 0) - result = self.get_template_info(self.get_template_by_id(template.ID)) + result = self.get_template_info(self.get_template_by_id(template.ID, filter)) if self.module.check_mode: # Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here. result['changed'] = True diff --git a/plugins/modules/cloud/opennebula/one_vm.py b/plugins/modules/one_vm.py similarity index 70% rename from plugins/modules/cloud/opennebula/one_vm.py rename to plugins/modules/one_vm.py index 86061f73cb..53806cad9b 100644 --- a/plugins/modules/cloud/opennebula/one_vm.py +++ b/plugins/modules/one_vm.py @@ -1,185 +1,161 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- +# Copyright (c) 2017, Milan Ilic +# Copyright (c) 2019, Jan Meerkamp +# Copyright (c) 2025, Tom Paine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -""" -(c) 2017, Milan Ilic -(c) 2019, Jan Meerkamp - -This file is part of Ansible - -Ansible is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -Ansible is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with Ansible. If not, see . -""" - -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: one_vm short_description: Creates or terminates OpenNebula instances description: - - Manages OpenNebula instances + - Manages OpenNebula instances. requirements: - pyone +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: api_url: description: - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - - transferred over the network unencrypted. - - If not set then the value of the C(ONE_URL) environment variable is used. + - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted. + - If not set then the value of the E(ONE_URL) environment variable is used. type: str api_username: description: - - Name of the user to login into the OpenNebula RPC server. If not set - - then the value of the C(ONE_USERNAME) environment variable is used. + - Name of the user to login into the OpenNebula RPC server. If not set then the value of the E(ONE_USERNAME) environment + variable is used. type: str api_password: description: - - Password of the user to login into OpenNebula RPC server. If not set - - then the value of the C(ONE_PASSWORD) environment variable is used. - - if both I(api_username) or I(api_password) are not set, then it will try - - authenticate with ONE auth file. Default path is "~/.one/one_auth". - - Set environment variable C(ONE_AUTH) to override this path. + - Password of the user to login into OpenNebula RPC server. If not set then the value of the E(ONE_PASSWORD) environment + variable is used. if both O(api_username) or O(api_password) are not set, then it tries to authenticate with ONE auth + file. Default path is C(~/.one/one_auth). + - Set environment variable E(ONE_AUTH) to override this path. type: str template_name: description: - - Name of VM template to use to create a new instace + - Name of VM template to use to create a new instance. type: str template_id: description: - - ID of a VM template to use to create a new instance + - ID of a VM template to use to create a new instance. type: int vm_start_on_hold: description: - - Set to true to put vm on hold while creating - default: False + - Set to true to put VM on hold while creating. + default: false type: bool instance_ids: description: - - A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff) + - 'A list of instance IDs used for states: V(absent), V(running), V(rebooted), V(poweredoff).' aliases: ['ids'] type: list elements: int state: description: - - C(present) - create instances from a template specified with C(template_id)/C(template_name). - - C(running) - run instances - - C(poweredoff) - power-off instances - - C(rebooted) - reboot instances - - C(absent) - terminate instances + - V(present) - create instances from a template specified with C(template_id)/C(template_name). + - V(running) - run instances. + - V(poweredoff) - power-off instances. + - V(rebooted) - reboot instances. + - V(absent) - terminate instances. choices: ["present", "absent", "running", "rebooted", "poweredoff"] default: present type: str hard: description: - - Reboot, power-off or terminate instances C(hard) - default: no + - Reboot, power-off or terminate instances C(hard). + default: false type: bool wait: description: - - Wait for the instance to reach its desired state before returning. Keep - - in mind if you are waiting for instance to be in running state it - - doesn't mean that you will be able to SSH on that machine only that - - boot process have started on that instance, see 'wait_for' example for - - details. - default: yes + - Wait for the instance to reach its desired state before returning. Keep in mind if you are waiting for instance to + be in running state it does not mean that you are able to SSH on that machine only that boot process have started + on that instance. See the example using the M(ansible.builtin.wait_for) module for details. + default: true type: bool wait_timeout: description: - - How long before wait gives up, in seconds + - How long before wait gives up, in seconds. default: 300 type: int attributes: description: - - A dictionary of key/value attributes to add to new instances, or for - - setting C(state) of instances with these attributes. + - A dictionary of key/value attributes to add to new instances, or for setting C(state) of instances with these attributes. - Keys are case insensitive and OpenNebula automatically converts them to upper case. - - Be aware C(NAME) is a special attribute which sets the name of the VM when it's deployed. - - C(#) character(s) can be appended to the C(NAME) and the module will automatically add - - indexes to the names of VMs. - - For example':' C(NAME':' foo-###) would create VMs with names C(foo-000), C(foo-001),... - - When used with C(count_attributes) and C(exact_count) the module will - - match the base name without the index part. + - Be aware V(NAME) is a special attribute which sets the name of the VM when it is deployed. + - C(#) character(s) can be appended to the C(NAME) and the module automatically adds indexes to the names of VMs. + - 'For example: V(NAME: foo-###) would create VMs with names V(foo-000), V(foo-001),...' + - When used with O(count_attributes) and O(exact_count) the module matches the base name without the index part. default: {} type: dict labels: description: - - A list of labels to associate with new instances, or for setting - - C(state) of instances with these labels. + - A list of labels to associate with new instances, or for setting C(state) of instances with these labels. default: [] type: list elements: str count_attributes: description: - - A dictionary of key/value attributes that can only be used with - - C(exact_count) to determine how many nodes based on a specific - - attributes criteria should be deployed. This can be expressed in - - multiple ways and is shown in the EXAMPLES section. + - A dictionary of key/value attributes that can only be used with O(exact_count) to determine how many nodes based on + a specific attributes criteria should be deployed. This can be expressed in multiple ways and is shown in the EXAMPLES + section. type: dict count_labels: description: - - A list of labels that can only be used with C(exact_count) to determine - - how many nodes based on a specific labels criteria should be deployed. - - This can be expressed in multiple ways and is shown in the EXAMPLES - - section. + - A list of labels that can only be used with O(exact_count) to determine how many nodes based on a specific labels + criteria should be deployed. This can be expressed in multiple ways and is shown in the EXAMPLES section. type: list elements: str count: description: - - Number of instances to launch + - Number of instances to launch. default: 1 type: int exact_count: description: - - Indicates how many instances that match C(count_attributes) and - - C(count_labels) parameters should be deployed. Instances are either - - created or terminated based on this value. - - NOTE':' Instances with the least IDs will be terminated first. + - Indicates how many instances that match O(count_attributes) and O(count_labels) parameters should be deployed. Instances + are either created or terminated based on this value. + - B(NOTE:) Instances with the least IDs are terminated first. type: int mode: description: - - Set permission mode of the instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others. + - Set permission mode of the instance in octet format, for example V(0600) to give owner C(use) and C(manage) and nothing + to group and others. type: str owner_id: description: - - ID of the user which will be set as the owner of the instance + - ID of the user which is set as the owner of the instance. type: int group_id: description: - - ID of the group which will be set as the group of the instance + - ID of the group which is set as the group of the instance. type: int memory: description: - - The size of the memory for new instances (in MB, GB, ...) + - The size of the memory for new instances (in MB, GB, ..). type: str disk_size: description: - The size of the disk created for new instances (in MB, GB, TB,...). - - NOTE':' If The Template hats Multiple Disks the Order of the Sizes is - - matched against the order specified in C(template_id)/C(template_name). + - B(NOTE:) If The Template hats Multiple Disks the Order of the Sizes is matched against the order specified in O(template_id)/O(template_name). type: list elements: str cpu: description: - - Percentage of CPU divided by 100 required for the new instance. Half a - - processor is written 0.5. + - Percentage of CPU divided by 100 required for the new instance. Half a processor is written 0.5. type: float vcpu: description: - - Number of CPUs (cores) new VM will have. + - Number of CPUs (cores) the new VM uses. type: int networks: description: @@ -192,33 +168,52 @@ options: - Creates an image from a VM disk. - It is a dictionary where you have to specify C(name) of the new image. - Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0. - - I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed) - - and the VM has to be in the C(poweredoff) state. - - Also this operation will fail if an image with specified C(name) already exists. + - B(NOTE:) This operation is only performed on the first VM (if more than one VM ID is passed) and the VM has to be + in the C(poweredoff) state. + - Also this operation fails if an image with specified C(name) already exists. type: dict persistent: description: - Create a private persistent copy of the template plus any image defined in DISK, and instantiate that copy. - default: NO + default: false type: bool version_added: '0.2.0' datastore_id: description: - - Name of Datastore to use to create a new instace + - Name of Datastore to use to create a new instance. version_added: '0.2.0' type: int datastore_name: description: - - Name of Datastore to use to create a new instace + - Name of Datastore to use to create a new instance. version_added: '0.2.0' type: str + updateconf: + description: + - When O(instance_ids) is provided, updates running VMs with the C(updateconf) API call. + - When new VMs are being created, emulates the C(updateconf) API call using direct template merge. + - Allows for complete modifications of the C(CONTEXT) attribute. + - 'Supported attributes include:' + - B(BACKUP_CONFIG:) V(BACKUP_VOLATILE), V(FS_FREEZE), V(INCREMENT_MODE), V(KEEP_LAST), V(MODE); + - B(CONTEXT:) (Any value, except V(ETH*). Variable substitutions are made); + - B(CPU_MODEL:) V(FEATURES), V(MODEL); + - B(FEATURES:) V(ACPI), V(APIC), V(GUEST_AGENT), V(HYPERV), V(IOTHREADS), V(LOCALTIME), V(PAE), V(VIRTIO_BLK_QUEUES), + V(VIRTIO_SCSI_QUEUES); + - B(GRAPHICS:) V(COMMAND), V(KEYMAP), V(LISTEN), V(PASSWD), V(PORT), V(TYPE); + - B(INPUT:) V(BUS), V(TYPE); + - B(OS:) V(ARCH), V(BOOT), V(BOOTLOADER), V(FIRMWARE), V(INITRD), V(KERNEL), V(KERNEL_CMD), V(MACHINE), V(ROOT), V(SD_DISK_BUS), + V(UUID); + - B(RAW:) V(DATA), V(DATA_VMX), V(TYPE), V(VALIDATE); + - B(VIDEO:) V(ATS), V(IOMMU), V(RESOLUTION), V(TYPE), V(VRAM). + type: dict + version_added: 6.3.0 author: - - "Milan Ilic (@ilicmilan)" - - "Jan Meerkamp (@meerkampdvv)" -''' + - "Milan Ilic (@ilicmilan)" + - "Jan Meerkamp (@meerkampdvv)" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new instance community.general.one_vm: template_id: 90 @@ -248,7 +243,7 @@ EXAMPLES = ''' - name: Deploy a new VM as persistent community.general.one_vm: template_id: 90 - persistent: yes + persistent: true - name: Change VM's permissions to 640 community.general.one_vm: @@ -358,8 +353,9 @@ EXAMPLES = ''' register: vm - name: Wait for SSH to come up - ansible.builtin.wait_for_connection: - delegate_to: '{{ vm.instances[0].networks[0].ip }}' + ansible.builtin.wait_for: + port: 22 + host: '{{ vm.instances[0].networks[0].ip }}' - name: Terminate VMs by ids community.general.one_vm: @@ -419,219 +415,265 @@ EXAMPLES = ''' disk_saveas: name: bar-image disk_id: 1 -''' -RETURN = ''' +- name: "Deploy 2 new instances with a custom 'start script'" + community.general.one_vm: + template_name: app_template + count: 2 + updateconf: + CONTEXT: + START_SCRIPT: ip r r 169.254.16.86/32 dev eth0 + +- name: "Add a custom 'start script' to a running VM" + community.general.one_vm: + instance_ids: 351 + updateconf: + CONTEXT: + START_SCRIPT: ip r r 169.254.16.86/32 dev eth0 + +- name: "Update SSH public keys inside the VM's context" + community.general.one_vm: + instance_ids: 351 + updateconf: + CONTEXT: + SSH_PUBLIC_KEY: |- + ssh-rsa ... + ssh-ed25519 ... +""" + +RETURN = r""" instances_ids: - description: a list of instances ids whose state is changed or which are fetched with C(instance_ids) option. - type: list - returned: success - sample: [ 1234, 1235 ] + description: A list of instances IDs whose state is changed or which are fetched with O(instance_ids) option. + type: list + returned: success + sample: [1234, 1235] instances: - description: a list of instances info whose state is changed or which are fetched with C(instance_ids) option. - type: complex - returned: success - contains: - vm_id: - description: vm id - type: int - sample: 153 - vm_name: - description: vm name - type: str - sample: foo - template_id: - description: vm's template id - type: int - sample: 153 - group_id: - description: vm's group id - type: int - sample: 1 - group_name: - description: vm's group name - type: str - sample: one-users - owner_id: - description: vm's owner id - type: int - sample: 143 - owner_name: - description: vm's owner name - type: str - sample: app-user - mode: - description: vm's mode - type: str - returned: success - sample: 660 - state: - description: state of an instance - type: str - sample: ACTIVE - lcm_state: - description: lcm state of an instance that is only relevant when the state is ACTIVE - type: str - sample: RUNNING - cpu: - description: Percentage of CPU divided by 100 - type: float - sample: 0.2 - vcpu: - description: Number of CPUs (cores) - type: int - sample: 2 - memory: - description: The size of the memory in MB - type: str - sample: 4096 MB - disk_size: - description: The size of the disk in MB - type: str - sample: 20480 MB - networks: - description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC - type: list - sample: [ - { - "ip": "10.120.5.33", - "mac": "02:00:0a:78:05:21", - "name": "default-test-private", - "security_groups": "0,10" - }, - { - "ip": "10.120.5.34", - "mac": "02:00:0a:78:05:22", - "name": "default-test-private", - "security_groups": "0" - } - ] - uptime_h: - description: Uptime of the instance in hours - type: int - sample: 35 - labels: - description: A list of string labels that are associated with the instance - type: list - sample: [ - "foo", - "spec-label" - ] - attributes: - description: A dictionary of key/values attributes that are associated with the instance - type: dict - sample: { - "HYPERVISOR": "kvm", - "LOGO": "images/logos/centos.png", - "TE_GALAXY": "bar", - "USER_INPUTS": null - } + description: A list of instances info whose state is changed or which are fetched with O(instance_ids) option. + type: complex + returned: success + contains: + vm_id: + description: VM ID. + type: int + sample: 153 + vm_name: + description: VM name. + type: str + sample: foo + template_id: + description: VM's template ID. + type: int + sample: 153 + group_id: + description: VM's group ID. + type: int + sample: 1 + group_name: + description: VM's group name. + type: str + sample: one-users + owner_id: + description: VM's owner ID. + type: int + sample: 143 + owner_name: + description: VM's owner name. + type: str + sample: app-user + mode: + description: VM's mode. + type: str + returned: success + sample: 660 + state: + description: State of an instance. + type: str + sample: ACTIVE + lcm_state: + description: Lcm state of an instance that is only relevant when the state is ACTIVE. + type: str + sample: RUNNING + cpu: + description: Percentage of CPU divided by 100. + type: float + sample: 0.2 + vcpu: + description: Number of CPUs (cores). + type: int + sample: 2 + memory: + description: The size of the memory in MB. + type: str + sample: 4096 MB + disk_size: + description: The size of the disk in MB. + type: str + sample: 20480 MB + networks: + description: A list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC. + type: list + sample: + [ + { + "ip": "10.120.5.33", + "mac": "02:00:0a:78:05:21", + "name": "default-test-private", + "security_groups": "0,10" + }, + { + "ip": "10.120.5.34", + "mac": "02:00:0a:78:05:22", + "name": "default-test-private", + "security_groups": "0" + } + ] + uptime_h: + description: Uptime of the instance in hours. + type: int + sample: 35 + labels: + description: A list of string labels that are associated with the instance. + type: list + sample: ["foo", "spec-label"] + attributes: + description: A dictionary of key/values attributes that are associated with the instance. + type: dict + sample: + { + "HYPERVISOR": "kvm", + "LOGO": "images/logos/centos.png", + "TE_GALAXY": "bar", + "USER_INPUTS": null + } + updateconf: + description: A dictionary of key/values attributes that are set with the updateconf API call. + type: dict + version_added: 6.3.0 + sample: + { + "OS": { + "ARCH": "x86_64" + }, + "CONTEXT": { + "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0", + "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..." + } + } tagged_instances: - description: - - A list of instances info based on a specific attributes and/or - - labels that are specified with C(count_attributes) and C(count_labels) - - options. - type: complex - returned: success - contains: - vm_id: - description: vm id - type: int - sample: 153 - vm_name: - description: vm name - type: str - sample: foo - template_id: - description: vm's template id - type: int - sample: 153 - group_id: - description: vm's group id - type: int - sample: 1 - group_name: - description: vm's group name - type: str - sample: one-users - owner_id: - description: vm's user id - type: int - sample: 143 - owner_name: - description: vm's user name - type: str - sample: app-user - mode: - description: vm's mode - type: str - returned: success - sample: 660 - state: - description: state of an instance - type: str - sample: ACTIVE - lcm_state: - description: lcm state of an instance that is only relevant when the state is ACTIVE - type: str - sample: RUNNING - cpu: - description: Percentage of CPU divided by 100 - type: float - sample: 0.2 - vcpu: - description: Number of CPUs (cores) - type: int - sample: 2 - memory: - description: The size of the memory in MB - type: str - sample: 4096 MB - disk_size: - description: The size of the disk in MB - type: list - sample: [ - "20480 MB", - "10240 MB" - ] - networks: - description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC - type: list - sample: [ - { - "ip": "10.120.5.33", - "mac": "02:00:0a:78:05:21", - "name": "default-test-private", - "security_groups": "0,10" - }, - { - "ip": "10.120.5.34", - "mac": "02:00:0a:78:05:22", - "name": "default-test-private", - "security_groups": "0" - } - ] - uptime_h: - description: Uptime of the instance in hours - type: int - sample: 35 - labels: - description: A list of string labels that are associated with the instance - type: list - sample: [ - "foo", - "spec-label" - ] - attributes: - description: A dictionary of key/values attributes that are associated with the instance - type: dict - sample: { - "HYPERVISOR": "kvm", - "LOGO": "images/logos/centos.png", - "TE_GALAXY": "bar", - "USER_INPUTS": null - } -''' + description: + - A list of instances info based on a specific attributes and/or labels that are specified with O(count_attributes) and + O(count_labels) options. + type: complex + returned: success + contains: + vm_id: + description: VM ID. + type: int + sample: 153 + vm_name: + description: VM name. + type: str + sample: foo + template_id: + description: VM's template ID. + type: int + sample: 153 + group_id: + description: VM's group ID. + type: int + sample: 1 + group_name: + description: VM's group name. + type: str + sample: one-users + owner_id: + description: VM's user ID. + type: int + sample: 143 + owner_name: + description: VM's user name. + type: str + sample: app-user + mode: + description: VM's mode. + type: str + returned: success + sample: 660 + state: + description: State of an instance. + type: str + sample: ACTIVE + lcm_state: + description: Lcm state of an instance that is only relevant when the state is ACTIVE. + type: str + sample: RUNNING + cpu: + description: Percentage of CPU divided by 100. + type: float + sample: 0.2 + vcpu: + description: Number of CPUs (cores). + type: int + sample: 2 + memory: + description: The size of the memory in MB. + type: str + sample: 4096 MB + disk_size: + description: The size of the disk in MB. + type: list + sample: ["20480 MB", "10240 MB"] + networks: + description: A list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC. + type: list + sample: + [ + { + "ip": "10.120.5.33", + "mac": "02:00:0a:78:05:21", + "name": "default-test-private", + "security_groups": "0,10" + }, + { + "ip": "10.120.5.34", + "mac": "02:00:0a:78:05:22", + "name": "default-test-private", + "security_groups": "0" + } + ] + uptime_h: + description: Uptime of the instance in hours. + type: int + sample: 35 + labels: + description: A list of string labels that are associated with the instance. + type: list + sample: ["foo", "spec-label"] + attributes: + description: A dictionary of key/values attributes that are associated with the instance. + type: dict + sample: + { + "HYPERVISOR": "kvm", + "LOGO": "images/logos/centos.png", + "TE_GALAXY": "bar", + "USER_INPUTS": null + } + updateconf: + description: A dictionary of key/values attributes that are set with the updateconf API call. + type: dict + version_added: 6.3.0 + sample: + { + "OS": { + "ARCH": "x86_64" + }, + "CONTEXT": { + "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0", + "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..." + } + } +""" try: import pyone @@ -639,9 +681,56 @@ try: except ImportError: HAS_PYONE = False -from ansible.module_utils.basic import AnsibleModule + import os +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.dict_transformations import dict_merge + +from ansible_collections.community.general.plugins.module_utils.opennebula import flatten, render + + +# Updateconf attributes documentation: https://docs.opennebula.io/6.10/integration_and_development/system_interfaces/api.html#one-vm-updateconf +UPDATECONF_ATTRIBUTES = { + "OS": ["ARCH", "MACHINE", "KERNEL", "INITRD", "BOOTLOADER", "BOOT", "SD_DISK_BUS", "UUID", "FIRMWARE"], + "CPU_MODEL": ["MODEL", "FEATURES"], + "FEATURES": ["ACPI", "PAE", "APIC", "LOCALTIME", "HYPERV", "GUEST_AGENT", "VIRTIO_BLK_QUEUES", "VIRTIO_SCSI_QUEUES", "IOTHREADS"], + "INPUT": ["TYPE", "BUS"], + "GRAPHICS": ["TYPE", "LISTEN", "PORT", "PASSWD", "KEYMAP", "COMMAND"], + "VIDEO": ["ATS", "IOMMU", "RESOLUTION", "TYPE", "VRAM"], + "RAW": ["DATA", "DATA_VMX", "TYPE", "VALIDATE"], + "CONTEXT": [], + "BACKUP_CONFIG": ["FS_FREEZE", "KEEP_LAST", "BACKUP_VOLATILE", "MODE", "INCREMENT_MODE"], +} + + +def check_updateconf(module, to_check): + '''Checks if attributes are compatible with one.vm.updateconf API call.''' + for attr, subattributes in to_check.items(): + if attr not in UPDATECONF_ATTRIBUTES: + module.fail_json(msg="'{0:}' is not a valid VM attribute.".format(attr)) + if not UPDATECONF_ATTRIBUTES[attr]: + continue + for subattr in subattributes: + if subattr not in UPDATECONF_ATTRIBUTES[attr]: + module.fail_json(msg="'{0:}' is not a valid VM subattribute of '{1:}'".format(subattr, attr)) + + +def parse_updateconf(vm_template): + '''Extracts 'updateconf' attributes from a VM template.''' + updateconf = {} + for attr, subattributes in vm_template.items(): + if attr not in UPDATECONF_ATTRIBUTES: + continue + tmp = {} + for subattr, value in subattributes.items(): + if UPDATECONF_ATTRIBUTES[attr] and subattr not in UPDATECONF_ATTRIBUTES[attr]: + continue + tmp[subattr] = value + if tmp: + updateconf[attr] = tmp + return updateconf + def get_template(module, client, predicate): @@ -783,6 +872,8 @@ def get_vm_info(client, vm): vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID) + updateconf = parse_updateconf(vm.TEMPLATE) + info = { 'template_id': int(vm.TEMPLATE['TEMPLATE_ID']), 'vm_id': vm.ID, @@ -801,7 +892,8 @@ def get_vm_info(client, vm): 'uptime_h': int(vm_uptime), 'attributes': vm_attributes, 'mode': permissions_str, - 'labels': vm_labels + 'labels': vm_labels, + 'updateconf': updateconf, } return info @@ -860,6 +952,28 @@ def set_vm_ownership(module, client, vms, owner_id, group_id): return changed +def update_vm(module, client, vm, updateconf_dict): + changed = False + if not updateconf_dict: + return changed + + before = client.vm.info(vm.ID).TEMPLATE + + client.vm.updateconf(vm.ID, render(updateconf_dict), 1) # 1: Merge new template with the existing one. + + after = client.vm.info(vm.ID).TEMPLATE + + changed = before != after + return changed + + +def update_vms(module, client, vms, *args): + changed = False + for vm in vms: + changed = update_vm(module, client, vm, *args) or changed + return changed + + def get_size_in_MB(module, size_str): SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB'] @@ -887,81 +1001,46 @@ def get_size_in_MB(module, size_str): return size_in_MB -def create_disk_str(module, client, template_id, disk_size_list): - - if not disk_size_list: - return '' - - template = client.template.info(template_id) - if isinstance(template.TEMPLATE['DISK'], list): - # check if the number of disks is correct - if len(template.TEMPLATE['DISK']) != len(disk_size_list): - module.fail_json(msg='This template has ' + str(len(template.TEMPLATE['DISK'])) + ' disks but you defined ' + str(len(disk_size_list))) - result = '' - index = 0 - for DISKS in template.TEMPLATE['DISK']: - disk = {} - diskresult = '' - # Get all info about existed disk e.g. IMAGE_ID,... - for key, value in DISKS.items(): - disk[key] = value - # copy disk attributes if it is not the size attribute - diskresult += 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE') - # Set the Disk Size - diskresult += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[index]))) + ']\n' - result += diskresult - index += 1 - else: - if len(disk_size_list) > 1: - module.fail_json(msg='This template has one disk but you defined ' + str(len(disk_size_list))) - disk = {} - # Get all info about existed disk e.g. IMAGE_ID,... - for key, value in template.TEMPLATE['DISK'].items(): - disk[key] = value - # copy disk attributes if it is not the size attribute - result = 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE') - # Set the Disk Size - result += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[0]))) + ']\n' - - return result - - -def create_attributes_str(attributes_dict, labels_list): - - attributes_str = '' - - if labels_list: - attributes_str += 'LABELS="' + ','.join('{label}'.format(label=label) for label in labels_list) + '"\n' - if attributes_dict: - attributes_str += '\n'.join('{key}="{val}"'.format(key=key.upper(), val=val) for key, val in attributes_dict.items()) + '\n' - - return attributes_str - - -def create_nics_str(network_attrs_list): - nics_str = '' - - for network in network_attrs_list: - # Packing key-value dict in string with format key="value", key="value" - network_str = ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in network.items()) - nics_str = nics_str + 'NIC = [' + network_str + ']\n' - - return nics_str - - -def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent): - +def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent, updateconf_dict): if attributes_dict: vm_name = attributes_dict.get('NAME', '') - disk_str = create_disk_str(module, client, template_id, disk_size) - vm_extra_template_str = create_attributes_str(attributes_dict, labels_list) + create_nics_str(network_attrs_list) + disk_str + template = client.template.info(template_id).TEMPLATE + + disk_count = len(flatten(template.get('DISK', []))) + if disk_size: + size_count = len(flatten(disk_size)) + # check if the number of disks is correct + if disk_count != size_count: + module.fail_json(msg='This template has ' + str(disk_count) + ' disks but you defined ' + str(size_count)) + + vm_extra_template = dict_merge(template or {}, attributes_dict or {}) + vm_extra_template = dict_merge(vm_extra_template, { + 'LABELS': ','.join(labels_list), + 'NIC': flatten(network_attrs_list, extract=True), + 'DISK': flatten([ + disk if not size else dict_merge(disk, { + 'SIZE': str(int(get_size_in_MB(module, size))), + }) + for disk, size in zip( + flatten(template.get('DISK', [])), + flatten(disk_size or [None] * disk_count), + ) + if disk is not None + ], extract=True) + }) + vm_extra_template = dict_merge(vm_extra_template, updateconf_dict or {}) + try: - vm_id = client.template.instantiate(template_id, vm_name, vm_start_on_hold, vm_extra_template_str, vm_persistent) + vm_id = client.template.instantiate(template_id, + vm_name, + vm_start_on_hold, + render(vm_extra_template), + vm_persistent) except pyone.OneException as e: module.fail_json(msg=str(e)) - vm = get_vm_by_id(client, vm_id) + vm = get_vm_by_id(client, vm_id) return get_vm_info(client, vm) @@ -986,7 +1065,7 @@ def get_vm_labels_and_attributes_dict(client, vm_id): if key != 'LABELS': attrs_dict[key] = value else: - if key is not None: + if key is not None and value is not None: labels_list = value.split(',') return labels_list, attrs_dict @@ -1044,8 +1123,10 @@ def get_all_vms_by_attributes(client, attributes_dict, labels_list): return vm_list -def create_count_of_vms( - module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout, vm_start_on_hold, vm_persistent): +def create_count_of_vms(module, client, + template_id, count, + attributes_dict, labels_list, disk_size, network_attrs_list, + wait, wait_timeout, vm_start_on_hold, vm_persistent, updateconf_dict): new_vms_list = [] vm_name = '' @@ -1063,7 +1144,7 @@ def create_count_of_vms( base_name = vm_name[:len(vm_name) - num_sign_cnt] vm_name = base_name # Make list which contains used indexes in format ['000', '001',...] - vm_filled_indexes_list = list((vm.NAME[len(base_name):].zfill(num_sign_cnt)) for vm in vm_list) + vm_filled_indexes_list = [vm.NAME[len(base_name):].zfill(num_sign_cnt) for vm in vm_list] while count > 0: new_vm_name = vm_name @@ -1074,7 +1155,9 @@ def create_count_of_vms( new_vm_name += next_index # Update NAME value in the attributes in case there is index attributes_dict['NAME'] = new_vm_name - new_vm_dict = create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent) + new_vm_dict = create_vm(module, client, + template_id, attributes_dict, labels_list, disk_size, network_attrs_list, + vm_start_on_hold, vm_persistent, updateconf_dict) new_vm_id = new_vm_dict.get('vm_id') new_vm = get_vm_by_id(client, new_vm_id) new_vms_list.append(new_vm) @@ -1092,9 +1175,10 @@ def create_count_of_vms( return True, new_vms_list, [] -def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict, - labels_list, count_labels_list, disk_size, network_attrs_list, hard, wait, wait_timeout, vm_start_on_hold, vm_persistent): - +def create_exact_count_of_vms(module, client, + template_id, exact_count, attributes_dict, count_attributes_dict, + labels_list, count_labels_list, disk_size, network_attrs_list, + hard, wait, wait_timeout, vm_start_on_hold, vm_persistent, updateconf_dict): vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list) vm_count_diff = exact_count - len(vm_list) @@ -1111,7 +1195,7 @@ def create_exact_count_of_vms(module, client, template_id, exact_count, attribut # Add more VMs changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout, - vm_start_on_hold, vm_persistent) + vm_start_on_hold, vm_persistent, updateconf_dict) tagged_instances_list += instances_list elif vm_count_diff < 0: @@ -1308,7 +1392,7 @@ def check_name_attribute(module, attributes): if attributes.get("NAME"): import re if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None: - module.fail_json(msg="Ilegal 'NAME' attribute: '" + attributes.get("NAME") + + module.fail_json(msg="Illegal 'NAME' attribute: '" + attributes.get("NAME") + "' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.") @@ -1414,7 +1498,8 @@ def main(): "labels": {"default": [], "type": "list", "elements": "str"}, "count_labels": {"required": False, "type": "list", "elements": "str"}, "disk_saveas": {"type": "dict"}, - "persistent": {"default": False, "type": "bool"} + "persistent": {"default": False, "type": "bool"}, + "updateconf": {"type": "dict"}, } module = AnsibleModule(argument_spec=fields, @@ -1468,6 +1553,7 @@ def main(): count_labels = params.get('count_labels') disk_saveas = params.get('disk_saveas') persistent = params.get('persistent') + updateconf = params.get('updateconf') if not (auth.username and auth.password): module.warn("Credentials missing") @@ -1475,17 +1561,20 @@ def main(): one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) if attributes: - attributes = dict((key.upper(), value) for key, value in attributes.items()) + attributes = {key.upper(): value for key, value in attributes.items()} check_attributes(module, attributes) if count_attributes: - count_attributes = dict((key.upper(), value) for key, value in count_attributes.items()) + count_attributes = {key.upper(): value for key, value in count_attributes.items()} if not attributes: import copy module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.') attributes = copy.copy(count_attributes) check_attributes(module, count_attributes) + if updateconf: + check_updateconf(module, updateconf) + if count_labels and not labels: module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.') labels = count_labels @@ -1545,13 +1634,13 @@ def main(): # Deploy an exact count of VMs changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes, count_attributes, labels, count_labels, disk_size, - networks, hard, wait, wait_timeout, put_vm_on_hold, persistent) + networks, hard, wait, wait_timeout, put_vm_on_hold, persistent, updateconf) vms = tagged_instances_list elif template_id is not None and state == 'present': # Deploy count VMs changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count, attributes, labels, disk_size, networks, wait, wait_timeout, - put_vm_on_hold, persistent) + put_vm_on_hold, persistent, updateconf) # instances_list - new instances # tagged_instances_list - all instances with specified `count_attributes` and `count_labels` vms = instances_list @@ -1603,6 +1692,9 @@ def main(): if owner_id is not None or group_id is not None: changed = set_vm_ownership(module, one_client, vms, owner_id, group_id) or changed + if template_id is None and updateconf is not None: + changed = update_vms(module, one_client, vms, updateconf) or changed + if wait and not module.check_mode and state != 'present': wait_for = { 'absent': wait_for_done, diff --git a/plugins/modules/one_vnet.py b/plugins/modules/one_vnet.py new file mode 100644 index 0000000000..3038f8cdc2 --- /dev/null +++ b/plugins/modules/one_vnet.py @@ -0,0 +1,434 @@ +#!/usr/bin/python +# +# Copyright (c) 2024, Alexander Bakanovskii +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Make coding more python3-ish +from __future__ import annotations + +DOCUMENTATION = r""" +module: one_vnet +short_description: Manages OpenNebula virtual networks +version_added: 9.4.0 +author: "Alexander Bakanovskii (@abakanovskii)" +requirements: + - pyone +description: + - Manages virtual networks in OpenNebula. +attributes: + check_mode: + support: partial + details: + - Note that check mode always returns C(changed=true) for existing networks, even if the network would not actually + change. + diff_mode: + support: none +options: + id: + description: + - A O(id) of the network you would like to manage. + - If not set then a new network is created with the given O(name). + type: int + name: + description: + - A O(name) of the network you would like to manage. If a network with the given name does not exist it, then is created, + otherwise it is managed by this module. + type: str + template: + description: + - A string containing the network template contents. + type: str + state: + description: + - V(present) - state that is used to manage the network. + - V(absent) - delete the network. + choices: ["present", "absent"] + default: present + type: str + +extends_documentation_fragment: + - community.general.opennebula + - community.general.attributes +""" + +EXAMPLES = r""" +- name: Make sure the network is present by ID + community.general.one_vnet: + id: 0 + state: present + register: result + +- name: Make sure the network is present by name + community.general.one_vnet: + name: opennebula-bridge + state: present + register: result + +- name: Create a new or update an existing network + community.general.one_vnet: + name: bridge-network + template: | + VN_MAD = "bridge" + BRIDGE = "br0" + BRIDGE_TYPE = "linux" + AR=[ + TYPE = "IP4", + IP = 192.0.2.50, + SIZE = "20" + ] + DNS = 192.0.2.1 + GATEWAY = 192.0.2.1 + +- name: Delete the network by ID + community.general.one_vnet: + id: 0 + state: absent +""" + +RETURN = r""" +id: + description: The network ID. + type: int + returned: when O(state=present) + sample: 153 +name: + description: The network name. + type: str + returned: when O(state=present) + sample: app1 +template: + description: The parsed network template. + type: dict + returned: when O(state=present) + sample: + BRIDGE: onebr.1000 + BRIDGE_TYPE: linux + DESCRIPTION: sampletext + PHYDEV: eth0 + SECURITY_GROUPS: 0 + VLAN_ID: 1000 + VN_MAD: 802.1Q +user_id: + description: The network's user name. + type: int + returned: when O(state=present) + sample: 1 +user_name: + description: The network's user ID. + type: str + returned: when O(state=present) + sample: oneadmin +group_id: + description: The network's group ID. + type: int + returned: when O(state=present) + sample: 1 +group_name: + description: The network's group name. + type: str + returned: when O(state=present) + sample: one-users +owner_id: + description: The network's owner ID. + type: int + returned: when O(state=present) + sample: 143 +owner_name: + description: The network's owner name. + type: str + returned: when O(state=present) + sample: ansible-test +permissions: + description: The network's permissions. + type: dict + returned: when O(state=present) + contains: + owner_u: + description: The network's owner USAGE permissions. + type: str + sample: 1 + owner_m: + description: The network's owner MANAGE permissions. + type: str + sample: 0 + owner_a: + description: The network's owner ADMIN permissions. + type: str + sample: 0 + group_u: + description: The network's group USAGE permissions. + type: str + sample: 0 + group_m: + description: The network's group MANAGE permissions. + type: str + sample: 0 + group_a: + description: The network's group ADMIN permissions. + type: str + sample: 0 + other_u: + description: The network's other users USAGE permissions. + type: str + sample: 0 + other_m: + description: The network's other users MANAGE permissions. + type: str + sample: 0 + other_a: + description: The network's other users ADMIN permissions. + type: str + sample: 0 + sample: + owner_u: 1 + owner_m: 0 + owner_a: 0 + group_u: 0 + group_m: 0 + group_a: 0 + other_u: 0 + other_m: 0 + other_a: 0 +clusters: + description: The network's clusters. + type: list + returned: when O(state=present) + sample: [0, 100] +bridge: + description: The network's bridge interface. + type: str + returned: when O(state=present) + sample: br0 +bridge_type: + description: The network's bridge type. + type: str + returned: when O(state=present) + sample: linux +parent_network_id: + description: The network's parent network ID. + type: int + returned: when O(state=present) + sample: 1 +vn_mad: + description: The network's VN_MAD. + type: str + returned: when O(state=present) + sample: bridge +phydev: + description: The network's physical device (NIC). + type: str + returned: when O(state=present) + sample: eth0 +vlan_id: + description: The network's VLAN tag. + type: int + returned: when O(state=present) + sample: 1000 +outer_vlan_id: + description: The network's outer VLAN tag. + type: int + returned: when O(state=present) + sample: 1000 +vrouters: + description: The network's list of virtual routers IDs. + type: list + returned: when O(state=present) + sample: [0, 1] +ar_pool: + description: The network's list of ar_pool. + type: list + returned: when O(state=present) + sample: + - ar_id: 0 + ip: 192.0.2.1 + mac: 6c:1e:46:01:cd:d1 + size: 20 + type: IP4 + - ar_id: 1 + allocated: 0 + ip: 198.51.100.1 + mac: 5d:9b:c0:9e:f6:e5 + size: 20 + type: IP4 +""" + + +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule + + +class NetworksModule(OpenNebulaModule): + + def __init__(self): + argument_spec = dict( + id=dict(type='int'), + name=dict(type='str'), + state=dict(type='str', choices=['present', 'absent'], default='present'), + template=dict(type='str'), + ) + + mutually_exclusive = [ + ['id', 'name'] + ] + + required_one_of = [('id', 'name')] + + required_if = [ + ['state', 'present', ['template']] + ] + + OpenNebulaModule.__init__(self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of, + required_if=required_if) + + def run(self, one, module, result): + params = module.params + id = params.get('id') + name = params.get('name') + desired_state = params.get('state') + template_data = params.get('template') + + self.result = {} + + template = self.get_template_instance(id, name) + needs_creation = False + if not template and desired_state != 'absent': + if id: + module.fail_json(msg="There is no template with id=" + str(id)) + else: + needs_creation = True + + if desired_state == 'absent': + self.result = self.delete_template(template) + else: + if needs_creation: + self.result = self.create_template(name, template_data) + else: + self.result = self.update_template(template, template_data) + + self.exit() + + def get_template(self, predicate): + # -2 means "Resources belonging to all users" + # the other two parameters are used for pagination, -1 for both essentially means "return all" + pool = self.one.vnpool.info(-2, -1, -1) + + for template in pool.VNET: + if predicate(template): + return template + + return None + + def get_template_by_id(self, template_id): + return self.get_template(lambda template: (template.ID == template_id)) + + def get_template_by_name(self, name): + return self.get_template(lambda template: (template.NAME == name)) + + def get_template_instance(self, requested_id, requested_name): + if requested_id: + return self.get_template_by_id(requested_id) + else: + return self.get_template_by_name(requested_name) + + def get_networks_ar_pool(self, template): + ar_pool = [] + template_pool = template.AR_POOL.AR + for ar in range(len(template_pool)): + template_param = template_pool[ar] + ar_pool.append({ + # These params will always be present + 'ar_id': template_param.AR_ID, + 'mac': template_param.MAC, + 'size': template_param.SIZE, + 'type': template_param.TYPE, + # These are optional so firstly check for presence + # and if not present set value to Null + 'allocated': getattr(template_param, 'ALLOCATED', 'Null'), + 'ip': getattr(template_param, 'IP', 'Null'), + 'global_prefix': getattr(template_param, 'GLOBAL_PREFIX', 'Null'), + 'parent_network_ar_id': getattr(template_param, 'PARENT_NETWORK_AR_ID', 'Null'), + 'ula_prefix': getattr(template_param, 'ULA_PREFIX', 'Null'), + 'vn_mad': getattr(template_param, 'VN_MAD', 'Null'), + }) + return ar_pool + + def get_template_info(self, template): + info = { + 'id': template.ID, + 'name': template.NAME, + 'template': template.TEMPLATE, + 'user_name': template.UNAME, + 'user_id': template.UID, + 'group_name': template.GNAME, + 'group_id': template.GID, + 'permissions': { + 'owner_u': template.PERMISSIONS.OWNER_U, + 'owner_m': template.PERMISSIONS.OWNER_M, + 'owner_a': template.PERMISSIONS.OWNER_A, + 'group_u': template.PERMISSIONS.GROUP_U, + 'group_m': template.PERMISSIONS.GROUP_M, + 'group_a': template.PERMISSIONS.GROUP_A, + 'other_u': template.PERMISSIONS.OTHER_U, + 'other_m': template.PERMISSIONS.OTHER_M, + 'other_a': template.PERMISSIONS.OTHER_A + }, + 'clusters': template.CLUSTERS.ID, + 'bridge': template.BRIDGE, + 'bride_type': template.BRIDGE_TYPE, + 'parent_network_id': template.PARENT_NETWORK_ID, + 'vn_mad': template.VN_MAD, + 'phydev': template.PHYDEV, + 'vlan_id': template.VLAN_ID, + 'outer_vlan_id': template.OUTER_VLAN_ID, + 'used_leases': template.USED_LEASES, + 'vrouters': template.VROUTERS.ID, + 'ar_pool': self.get_networks_ar_pool(template) + } + + return info + + def create_template(self, name, template_data): + if not self.module.check_mode: + # -1 means that network won't be added to any cluster which happens by default + self.one.vn.allocate("NAME = \"" + name + "\"\n" + template_data, -1) + + result = self.get_template_info(self.get_template_by_name(name)) + result['changed'] = True + + return result + + def update_template(self, template, template_data): + if not self.module.check_mode: + # 0 = replace the whole template + self.one.vn.update(template.ID, template_data, 0) + + result = self.get_template_info(self.get_template_by_id(template.ID)) + if self.module.check_mode: + # Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here. + result['changed'] = True + else: + # if the previous parsed template data is not equal to the updated one, this has changed + result['changed'] = template.TEMPLATE != result['template'] + + return result + + def delete_template(self, template): + if not template: + return {'changed': False} + + if not self.module.check_mode: + self.one.vn.delete(template.ID) + + return {'changed': True} + + +def main(): + NetworksModule().run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py b/plugins/modules/oneandone_firewall_policy.py similarity index 85% rename from plugins/modules/cloud/oneandone/oneandone_firewall_policy.py rename to plugins/modules/oneandone_firewall_policy.py index d46ce38897..9078075361 100644 --- a/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py +++ b/plugins/modules/oneandone_firewall_policy.py @@ -1,30 +1,22 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneandone_firewall_policy -short_description: Configure 1&1 firewall policy. +short_description: Configure 1&1 firewall policy description: - - Create, remove, reconfigure, update firewall policies. - This module has a dependency on 1and1 >= 1.0 + - Create, remove, reconfigure, update firewall policies. This module has a dependency on 1and1 >= 1.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: description: @@ -32,21 +24,19 @@ options: required: false type: str default: 'present' - choices: [ "present", "absent", "update" ] + choices: ["present", "absent", "update"] auth_token: description: - Authenticating API token provided by 1&1. type: str api_url: description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. + - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. type: str required: false name: description: - - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state. - maxLength=128 + - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128. type: str firewall_policy: description: @@ -54,80 +44,81 @@ options: type: str rules: description: - - A list of rules that will be set for the firewall policy. - Each rule must contain protocol parameter, in addition to three optional parameters - (port_from, port_to, and source) + - List of rules that are set for the firewall policy. Each rule must contain protocol parameter, in addition to three + optional parameters (port_from, port_to, and source). type: list elements: dict + default: [] add_server_ips: description: - - A list of server identifiers (id or name) to be assigned to a firewall policy. - Used in combination with update state. + - A list of server identifiers (ID or name) to be assigned to a firewall policy. Used in combination with update state. type: list elements: str required: false + default: [] remove_server_ips: description: - - A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state. + - A list of server IP IDs to be unassigned from a firewall policy. Used in combination with update state. type: list elements: str required: false + default: [] add_rules: description: - - A list of rules that will be added to an existing firewall policy. - It is syntax is the same as the one used for rules parameter. Used in combination with update state. + - List of rules that are added to an existing firewall policy. It is syntax is the same as the one used for rules parameter. + Used in combination with update state. type: list elements: dict required: false + default: [] remove_rules: description: - - A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state. + - List of rule IDs that are removed from an existing firewall policy. Used in combination with update state. type: list elements: str required: false + default: [] description: description: - - Firewall policy description. maxLength=256 + - Firewall policy description. maxLength=256. type: str required: false wait: description: - - wait for the instance to be in state 'running' before returning + - Wait for the instance to be in state 'running' before returning. required: false - default: "yes" + default: true type: bool wait_timeout: description: - - how long before wait gives up, in seconds + - How long before wait gives up, in seconds. type: int default: 600 wait_interval: description: - - Defines the number of seconds to wait when using the _wait_for methods + - Defines the number of seconds to wait when using the _wait_for methods. type: int default: 5 requirements: - "1and1" - - "python >= 2.6" author: - - "Amel Ajdinovic (@aajdinov)" - - "Ethan Devenport (@edevenport)" -''' + - "Amel Ajdinovic (@aajdinov)" + - "Ethan Devenport (@edevenport)" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a firewall policy community.general.oneandone_firewall_policy: auth_token: oneandone_private_api_key name: ansible-firewall-policy description: Testing creation of firewall policies with ansible rules: - - - protocol: TCP - port_from: 80 - port_to: 80 - source: 0.0.0.0 + - protocol: TCP + port_from: 80 + port_to: 80 + source: 0.0.0.0 wait: true wait_timeout: 500 @@ -150,8 +141,8 @@ EXAMPLES = ''' auth_token: oneandone_private_api_key firewall_policy: ansible-firewall-policy-updated add_server_ips: - - server_identifier (id or name) - - server_identifier #2 (id or name) + - server_identifier (id or name) + - "server_identifier #2 (id or name)" wait: true wait_timeout: 500 state: update @@ -161,7 +152,7 @@ EXAMPLES = ''' auth_token: oneandone_private_api_key firewall_policy: ansible-firewall-policy-updated remove_server_ips: - - B2504878540DBC5F7634EB00A07C1EBD (server's IP id) + - B2504878540DBC5F7634EB00A07C1EBD (server's IP id) wait: true wait_timeout: 500 state: update @@ -172,16 +163,14 @@ EXAMPLES = ''' firewall_policy: ansible-firewall-policy-updated description: Adding rules to an existing firewall policy add_rules: - - - protocol: TCP - port_from: 70 - port_to: 70 - source: 0.0.0.0 - - - protocol: TCP - port_from: 60 - port_to: 60 - source: 0.0.0.0 + - protocol: TCP + port_from: 70 + port_to: 70 + source: 0.0.0.0 + - protocol: TCP + port_from: 60 + port_to: 60 + source: 0.0.0.0 wait: true wait_timeout: 500 state: update @@ -191,21 +180,21 @@ EXAMPLES = ''' auth_token: oneandone_private_api_key firewall_policy: ansible-firewall-policy-updated remove_rules: - - rule_id #1 - - rule_id #2 - - ... + - "rule_id #1" + - "rule_id #2" + - '...' wait: true wait_timeout: 500 state: update -''' +""" -RETURN = ''' +RETURN = r""" firewall_policy: - description: Information about the firewall policy that was processed - type: dict - sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}' - returned: always -''' + description: Information about the firewall policy that was processed. + type: dict + sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"} + returned: always +""" import os from ansible.module_utils.basic import AnsibleModule @@ -297,7 +286,7 @@ def _add_firewall_rules(module, oneandone_conn, firewall_id, rules): if module.check_mode: firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id) - if (firewall_rules and firewall_policy_id): + if firewall_rules and firewall_policy_id: return True return False diff --git a/plugins/modules/cloud/oneandone/oneandone_load_balancer.py b/plugins/modules/oneandone_load_balancer.py similarity index 86% rename from plugins/modules/cloud/oneandone/oneandone_load_balancer.py rename to plugins/modules/oneandone_load_balancer.py index 5f541a878c..d75127e416 100644 --- a/plugins/modules/cloud/oneandone/oneandone_load_balancer.py +++ b/plugins/modules/oneandone_load_balancer.py @@ -1,30 +1,22 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneandone_load_balancer -short_description: Configure 1&1 load balancer. +short_description: Configure 1&1 load balancer description: - - Create, remove, update load balancers. - This module has a dependency on 1and1 >= 1.0 + - Create, remove, update load balancers. This module has a dependency on 1and1 >= 1.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: description: @@ -32,7 +24,7 @@ options: type: str required: false default: 'present' - choices: [ "present", "absent", "update" ] + choices: ["present", "absent", "update"] auth_token: description: - Authenticating API token provided by 1&1. @@ -43,32 +35,30 @@ options: type: str api_url: description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. + - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. type: str required: false name: description: - - Load balancer name used with present state. Used as identifier (id or name) when used with absent state. - maxLength=128 + - Load balancer name used with present state. Used as identifier (ID or name) when used with absent state. maxLength=128. type: str health_check_test: description: - Type of the health check. At the moment, HTTP is not allowed. type: str - choices: [ "NONE", "TCP", "HTTP", "ICMP" ] + choices: ["NONE", "TCP", "HTTP", "ICMP"] health_check_interval: description: - - Health check period in seconds. minimum=5, maximum=300, multipleOf=1 + - Health check period in seconds. minimum=5, maximum=300, multipleOf=1. type: str health_check_path: description: - - Url to call for checking. Required for HTTP health check. maxLength=1000 + - URL to call for checking. Required for HTTP health check. maxLength=1000. type: str required: false health_check_parse: description: - - Regular expression to check. Required for HTTP health check. maxLength=64 + - Regular expression to check. Required for HTTP health check. maxLength=64. type: str required: false persistence: @@ -77,84 +67,87 @@ options: type: bool persistence_time: description: - - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1 + - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1. type: str method: description: - Balancing procedure. type: str - choices: [ "ROUND_ROBIN", "LEAST_CONNECTIONS" ] + choices: ["ROUND_ROBIN", "LEAST_CONNECTIONS"] datacenter: description: - - ID or country code of the datacenter where the load balancer will be created. - - If not specified, it defaults to I(US). + - ID or country code of the datacenter where the load balancer is created. + - If not specified, it defaults to V(US). type: str - choices: [ "US", "ES", "DE", "GB" ] + choices: ["US", "ES", "DE", "GB"] required: false rules: description: - - A list of rule objects that will be set for the load balancer. Each rule must contain protocol, - port_balancer, and port_server parameters, in addition to source parameter, which is optional. + - A list of rule objects that are set for the load balancer. Each rule must contain protocol, port_balancer, and port_server + parameters, in addition to source parameter, which is optional. type: list elements: dict + default: [] description: description: - - Description of the load balancer. maxLength=256 + - Description of the load balancer. maxLength=256. type: str required: false add_server_ips: description: - - A list of server identifiers (id or name) to be assigned to a load balancer. - Used in combination with update state. + - A list of server identifiers (id or name) to be assigned to a load balancer. Used in combination with O(state=update). type: list elements: str required: false + default: [] remove_server_ips: description: - - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state. + - A list of server IP IDs to be unassigned from a load balancer. Used in combination with O(state=update). type: list elements: str required: false + default: [] add_rules: description: - - A list of rules that will be added to an existing load balancer. - It is syntax is the same as the one used for rules parameter. Used in combination with update state. + - A list of rules that are added to an existing load balancer. It is syntax is the same as the one used for rules parameter. + Used in combination with O(state=update). type: list elements: dict required: false + default: [] remove_rules: description: - - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state. + - A list of rule IDs that are removed from an existing load balancer. Used in combination with O(state=update). type: list elements: str required: false + default: [] wait: description: - - wait for the instance to be in state 'running' before returning + - Wait for the instance to be in state 'running' before returning. required: false - default: "yes" + default: true type: bool wait_timeout: description: - - how long before wait gives up, in seconds + - How long before wait gives up, in seconds. type: int default: 600 wait_interval: description: - - Defines the number of seconds to wait when using the _wait_for methods + - Defines the number of seconds to wait when using the _wait_for methods. type: int default: 5 requirements: - - "1and1" - - "python >= 2.6" + - "1and1" author: - Amel Ajdinovic (@aajdinov) - Ethan Devenport (@edevenport) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a load balancer community.general.oneandone_load_balancer: auth_token: oneandone_private_api_key @@ -167,11 +160,10 @@ EXAMPLES = ''' method: ROUND_ROBIN datacenter: US rules: - - - protocol: TCP - port_balancer: 80 - port_server: 80 - source: 0.0.0.0 + - protocol: TCP + port_balancer: 80 + port_server: 80 + source: 0.0.0.0 wait: true wait_timeout: 500 @@ -199,7 +191,7 @@ EXAMPLES = ''' load_balancer: ansible load balancer updated description: Adding server to a load balancer with ansible add_server_ips: - - server identifier (id or name) + - server identifier (id or name) wait: true wait_timeout: 500 state: update @@ -210,7 +202,7 @@ EXAMPLES = ''' load_balancer: ansible load balancer updated description: Removing server from a load balancer with ansible remove_server_ips: - - B2504878540DBC5F7634EB00A07C1EBD (server's ip id) + - B2504878540DBC5F7634EB00A07C1EBD (server's ip id) wait: true wait_timeout: 500 state: update @@ -221,16 +213,14 @@ EXAMPLES = ''' load_balancer: ansible load balancer updated description: Adding rules to a load balancer with ansible add_rules: - - - protocol: TCP - port_balancer: 70 - port_server: 70 - source: 0.0.0.0 - - - protocol: TCP - port_balancer: 60 - port_server: 60 - source: 0.0.0.0 + - protocol: TCP + port_balancer: 70 + port_server: 70 + source: 0.0.0.0 + - protocol: TCP + port_balancer: 60 + port_server: 60 + source: 0.0.0.0 wait: true wait_timeout: 500 state: update @@ -241,21 +231,21 @@ EXAMPLES = ''' load_balancer: ansible load balancer updated description: Adding rules to a load balancer with ansible remove_rules: - - rule_id #1 - - rule_id #2 - - ... + - "rule_id #1" + - "rule_id #2" + - '...' wait: true wait_timeout: 500 state: update -''' +""" -RETURN = ''' +RETURN = r""" load_balancer: - description: Information about the load balancer that was processed - type: dict - sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}' - returned: always -''' + description: Information about the load balancer that was processed. + type: dict + sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"} + returned: always +""" import os from ansible.module_utils.basic import AnsibleModule @@ -352,7 +342,7 @@ def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules): if module.check_mode: lb_id = get_load_balancer(oneandone_conn, load_balancer_id) - if (load_balancer_rules and lb_id): + if load_balancer_rules and lb_id: return True return False diff --git a/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py b/plugins/modules/oneandone_monitoring_policy.py similarity index 86% rename from plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py rename to plugins/modules/oneandone_monitoring_policy.py index 28dd0d41c5..a33abc8cb2 100644 --- a/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py +++ b/plugins/modules/oneandone_monitoring_policy.py @@ -1,31 +1,23 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneandone_monitoring_policy -short_description: Configure 1&1 monitoring policy. +short_description: Configure 1&1 monitoring policy description: - - Create, remove, update monitoring policies - (and add/remove ports, processes, and servers). - This module has a dependency on 1and1 >= 1.0 + - Create, remove, update monitoring policies (and add/remove ports, processes, and servers). This module has a dependency + on 1and1 >= 1.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: description: @@ -33,20 +25,19 @@ options: type: str required: false default: present - choices: [ "present", "absent", "update" ] + choices: ["present", "absent", "update"] auth_token: description: - Authenticating API token provided by 1&1. type: str api_url: description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. + - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. type: str required: false name: description: - - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128 + - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128. type: str monitoring_policy: description: @@ -58,21 +49,21 @@ options: type: str email: description: - - User's email. maxLength=128 + - User's email. maxLength=128. type: str description: description: - - Monitoring policy description. maxLength=256 + - Monitoring policy description. maxLength=256. type: str required: false thresholds: description: - - Monitoring policy thresholds. Each of the suboptions have warning and critical, - which both have alert and value suboptions. Warning is used to set limits for - warning alerts, critical is used to set critical alerts. alert enables alert, - and value is used to advise when the value is exceeded. + - Monitoring policy thresholds. Each of the suboptions have warning and critical, which both have alert and value suboptions. + Warning is used to set limits for warning alerts, critical is used to set critical alerts. alert enables alert, and + value is used to advise when the value is exceeded. type: list elements: dict + default: [] suboptions: cpu: description: @@ -96,23 +87,24 @@ options: required: true ports: description: - - Array of ports that will be monitoring. + - Array of ports that are to be monitored. type: list elements: dict + default: [] suboptions: protocol: description: - Internet protocol. - choices: [ "TCP", "UDP" ] + choices: ["TCP", "UDP"] required: true port: description: - - Port number. minimum=1, maximum=65535 + - Port number. minimum=1, maximum=65535. required: true alert_if: description: - Case of alert. - choices: [ "RESPONDING", "NOT_RESPONDING" ] + choices: ["RESPONDING", "NOT_RESPONDING"] required: true email_notification: description: @@ -120,18 +112,19 @@ options: required: true processes: description: - - Array of processes that will be monitoring. + - Array of processes that are to be monitored. type: list elements: dict + default: [] suboptions: process: description: - - Name of the process. maxLength=50 + - Name of the process. maxLength=50. required: true alert_if: description: - Case of alert. - choices: [ "RUNNING", "NOT_RUNNING" ] + choices: ["RUNNING", "NOT_RUNNING"] required: true add_ports: description: @@ -139,75 +132,82 @@ options: type: list elements: dict required: false + default: [] add_processes: description: - Processes to add to the monitoring policy. type: list elements: dict required: false + default: [] add_servers: description: - Servers to add to the monitoring policy. type: list elements: str required: false + default: [] remove_ports: description: - Ports to remove from the monitoring policy. type: list elements: str required: false + default: [] remove_processes: description: - Processes to remove from the monitoring policy. type: list elements: str required: false + default: [] remove_servers: description: - Servers to remove from the monitoring policy. type: list elements: str required: false + default: [] update_ports: description: - Ports to be updated on the monitoring policy. type: list elements: dict required: false + default: [] update_processes: description: - Processes to be updated on the monitoring policy. type: list elements: dict required: false + default: [] wait: description: - - wait for the instance to be in state 'running' before returning + - Wait for the instance to be in state 'running' before returning. required: false - default: "yes" + default: true type: bool wait_timeout: description: - - how long before wait gives up, in seconds + - How long before wait gives up, in seconds. type: int default: 600 wait_interval: description: - - Defines the number of seconds to wait when using the _wait_for methods + - Defines the number of seconds to wait when using the _wait_for methods. type: int default: 5 requirements: - "1and1" - - "python >= 2.6" author: - - "Amel Ajdinovic (@aajdinov)" - - "Ethan Devenport (@edevenport)" -''' + - "Amel Ajdinovic (@aajdinov)" + - "Ethan Devenport (@edevenport)" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a monitoring policy community.general.oneandone_monitoring_policy: auth_token: oneandone_private_api_key @@ -216,57 +216,50 @@ EXAMPLES = ''' email: your@emailaddress.com agent: true thresholds: - - - cpu: - warning: - value: 80 - alert: false - critical: - value: 92 - alert: false - - - ram: - warning: - value: 80 - alert: false - critical: - value: 90 - alert: false - - - disk: - warning: - value: 80 - alert: false - critical: - value: 90 - alert: false - - - internal_ping: - warning: - value: 50 - alert: false - critical: - value: 100 - alert: false - - - transfer: - warning: - value: 1000 - alert: false - critical: - value: 2000 - alert: false + - cpu: + warning: + value: 80 + alert: false + critical: + value: 92 + alert: false + - ram: + warning: + value: 80 + alert: false + critical: + value: 90 + alert: false + - disk: + warning: + value: 80 + alert: false + critical: + value: 90 + alert: false + - internal_ping: + warning: + value: 50 + alert: false + critical: + value: 100 + alert: false + - transfer: + warning: + value: 1000 + alert: false + critical: + value: 2000 + alert: false ports: - - - protocol: TCP - port: 22 - alert_if: RESPONDING - email_notification: false + - protocol: TCP + port: 22 + alert_if: RESPONDING + email_notification: false processes: - - - process: test - alert_if: NOT_RUNNING - email_notification: false + - process: test + alert_if: NOT_RUNNING + email_notification: false wait: true - name: Destroy a monitoring policy @@ -283,46 +276,41 @@ EXAMPLES = ''' description: Testing creation of a monitoring policy with ansible updated email: another@emailaddress.com thresholds: - - - cpu: - warning: - value: 70 - alert: false - critical: - value: 90 - alert: false - - - ram: - warning: - value: 70 - alert: false - critical: - value: 80 - alert: false - - - disk: - warning: - value: 70 - alert: false - critical: - value: 80 - alert: false - - - internal_ping: - warning: - value: 60 - alert: false - critical: - value: 90 - alert: false - - - transfer: - warning: - value: 900 - alert: false - critical: - value: 1900 - alert: false + - cpu: + warning: + value: 70 + alert: false + critical: + value: 90 + alert: false + - ram: + warning: + value: 70 + alert: false + critical: + value: 80 + alert: false + - disk: + warning: + value: 70 + alert: false + critical: + value: 80 + alert: false + - internal_ping: + warning: + value: 60 + alert: false + critical: + value: 90 + alert: false + - transfer: + warning: + value: 900 + alert: false + critical: + value: 1900 + alert: false wait: true state: update @@ -331,11 +319,10 @@ EXAMPLES = ''' auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated add_ports: - - - protocol: TCP - port: 33 - alert_if: RESPONDING - email_notification: false + - protocol: TCP + port: 33 + alert_if: RESPONDING + email_notification: false wait: true state: update @@ -344,18 +331,16 @@ EXAMPLES = ''' auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated update_ports: - - - id: existing_port_id - protocol: TCP - port: 34 - alert_if: RESPONDING - email_notification: false - - - id: existing_port_id - protocol: TCP - port: 23 - alert_if: RESPONDING - email_notification: false + - id: existing_port_id + protocol: TCP + port: 34 + alert_if: RESPONDING + email_notification: false + - id: existing_port_id + protocol: TCP + port: 23 + alert_if: RESPONDING + email_notification: false wait: true state: update @@ -364,7 +349,7 @@ EXAMPLES = ''' auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated remove_ports: - - port_id + - port_id state: update - name: Add a process to a monitoring policy @@ -372,10 +357,9 @@ EXAMPLES = ''' auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated add_processes: - - - process: test_2 - alert_if: NOT_RUNNING - email_notification: false + - process: test_2 + alert_if: NOT_RUNNING + email_notification: false wait: true state: update @@ -384,16 +368,14 @@ EXAMPLES = ''' auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated update_processes: - - - id: process_id - process: test_1 - alert_if: NOT_RUNNING - email_notification: false - - - id: process_id - process: test_3 - alert_if: NOT_RUNNING - email_notification: false + - id: process_id + process: test_1 + alert_if: NOT_RUNNING + email_notification: false + - id: process_id + process: test_3 + alert_if: NOT_RUNNING + email_notification: false wait: true state: update @@ -402,7 +384,7 @@ EXAMPLES = ''' auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated remove_processes: - - process_id + - process_id wait: true state: update @@ -411,7 +393,7 @@ EXAMPLES = ''' auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated add_servers: - - server id or name + - server id or name wait: true state: update @@ -420,18 +402,18 @@ EXAMPLES = ''' auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated remove_servers: - - server01 + - server01 wait: true state: update -''' +""" -RETURN = ''' +RETURN = r""" monitoring_policy: - description: Information about the monitoring policy that was processed - type: dict - sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}' - returned: always -''' + description: Information about the monitoring policy that was processed. + type: dict + sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"} + returned: always +""" import os from ansible.module_utils.basic import AnsibleModule @@ -553,7 +535,7 @@ def _add_processes(module, oneandone_conn, monitoring_policy_id, processes): if module.check_mode: mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id) - if (monitoring_policy_processes and mp_id): + if monitoring_policy_processes and mp_id: return True return False diff --git a/plugins/modules/cloud/oneandone/oneandone_private_network.py b/plugins/modules/oneandone_private_network.py similarity index 88% rename from plugins/modules/cloud/oneandone/oneandone_private_network.py rename to plugins/modules/oneandone_private_network.py index 6a16cf683e..2b74dff4f0 100644 --- a/plugins/modules/cloud/oneandone/oneandone_private_network.py +++ b/plugins/modules/oneandone_private_network.py @@ -1,30 +1,22 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneandone_private_network -short_description: Configure 1&1 private networking. +short_description: Configure 1&1 private networking description: - - Create, remove, reconfigure, update a private network. - This module has a dependency on 1and1 >= 1.0 + - Create, remove, reconfigure, update a private network. This module has a dependency on 1and1 >= 1.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: description: @@ -32,7 +24,7 @@ options: type: str required: false default: 'present' - choices: [ "present", "absent", "update" ] + choices: ["present", "absent", "update"] auth_token: description: - Authenticating API token provided by 1&1. @@ -43,8 +35,7 @@ options: type: str api_url: description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. + - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. type: str required: false name: @@ -57,54 +48,55 @@ options: type: str datacenter: description: - - The identifier of the datacenter where the private network will be created + - The identifier of the datacenter where the private network is created. type: str choices: [US, ES, DE, GB] network_address: description: - - Set a private network space, i.e. 192.168.1.0 + - Set a private network space, for example V(192.168.1.0). type: str subnet_mask: description: - - Set the netmask for the private network, i.e. 255.255.255.0 + - Set the netmask for the private network, for example V(255.255.255.0). type: str add_members: description: - List of server identifiers (name or id) to be added to the private network. type: list elements: str + default: [] remove_members: description: - List of server identifiers (name or id) to be removed from the private network. type: list elements: str + default: [] wait: description: - - wait for the instance to be in state 'running' before returning + - Wait for the instance to be in state 'running' before returning. required: false - default: "yes" + default: true type: bool wait_timeout: description: - - how long before wait gives up, in seconds + - How long before wait gives up, in seconds. type: int default: 600 wait_interval: description: - - Defines the number of seconds to wait when using the _wait_for methods + - Defines the number of seconds to wait when using the _wait_for methods. type: int default: 5 requirements: - - "1and1" - - "python >= 2.6" + - "1and1" author: - Amel Ajdinovic (@aajdinov) - Ethan Devenport (@edevenport) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a private network community.general.oneandone_private_network: auth_token: oneandone_private_api_key @@ -134,7 +126,7 @@ EXAMPLES = ''' state: update private_network: backup_network add_members: - - server identifier (id or name) + - server identifier (id or name) - name: Remove members from the private network community.general.oneandone_private_network: @@ -142,16 +134,16 @@ EXAMPLES = ''' state: update private_network: backup_network remove_members: - - server identifier (id or name) -''' + - server identifier (id or name) +""" -RETURN = ''' +RETURN = r""" private_network: - description: Information about the private network. - type: dict - sample: '{"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}' - returned: always -''' + description: Information about the private network. + type: dict + sample: {"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"} + returned: always +""" import os from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/cloud/oneandone/oneandone_public_ip.py b/plugins/modules/oneandone_public_ip.py similarity index 83% rename from plugins/modules/cloud/oneandone/oneandone_public_ip.py rename to plugins/modules/oneandone_public_ip.py index 96b1c9f3a5..4cc622eaa4 100644 --- a/plugins/modules/cloud/oneandone/oneandone_public_ip.py +++ b/plugins/modules/oneandone_public_ip.py @@ -1,56 +1,47 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneandone_public_ip -short_description: Configure 1&1 public IPs. +short_description: Configure 1&1 public IPs description: - - Create, update, and remove public IPs. - This module has a dependency on 1and1 >= 1.0 + - Create, update, and remove public IPs. This module has a dependency on 1and1 >= 1.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: description: - - Define a public ip state to create, remove, or update. + - Define a public IP state to create, remove, or update. type: str required: false default: 'present' - choices: [ "present", "absent", "update" ] + choices: ["present", "absent", "update"] auth_token: description: - Authenticating API token provided by 1&1. type: str api_url: description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. + - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. type: str required: false reverse_dns: description: - - Reverse DNS name. maxLength=256 + - Reverse DNS name. maxLength=256. type: str required: false datacenter: description: - - ID of the datacenter where the IP will be created (only for unassigned IPs). + - ID of the datacenter where the IP is created (only for unassigned IPs). type: str choices: [US, ES, DE, GB] default: US @@ -68,31 +59,30 @@ options: type: str wait: description: - - wait for the instance to be in state 'running' before returning + - Wait for the instance to be in state 'running' before returning. required: false - default: "yes" + default: true type: bool wait_timeout: description: - - how long before wait gives up, in seconds + - How long before wait gives up, in seconds. type: int default: 600 wait_interval: description: - - Defines the number of seconds to wait when using the _wait_for methods + - Defines the number of seconds to wait when using the _wait_for methods. type: int default: 5 requirements: - - "1and1" - - "python >= 2.6" + - "1and1" author: - Amel Ajdinovic (@aajdinov) - Ethan Devenport (@edevenport) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a public IP community.general.oneandone_public_ip: auth_token: oneandone_private_api_key @@ -112,15 +102,15 @@ EXAMPLES = ''' auth_token: oneandone_private_api_key public_ip_id: public ip id state: absent -''' +""" -RETURN = ''' +RETURN = r""" public_ip: - description: Information about the public ip that was processed - type: dict - sample: '{"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}' - returned: always -''' + description: Information about the public IP that was processed. + type: dict + sample: {"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"} + returned: always +""" import os from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/cloud/oneandone/oneandone_server.py b/plugins/modules/oneandone_server.py similarity index 86% rename from plugins/modules/cloud/oneandone/oneandone_server.py rename to plugins/modules/oneandone_server.py index aa651bd75f..23713890fd 100644 --- a/plugins/modules/cloud/oneandone/oneandone_server.py +++ b/plugins/modules/oneandone_server.py @@ -1,53 +1,44 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneandone_server -short_description: Create, destroy, start, stop, and reboot a 1&1 Host server. +short_description: Create, destroy, start, stop, and reboot a 1&1 Host server description: - - Create, destroy, update, start, stop, and reboot a 1&1 Host server. - When the server is created it can optionally wait for it to be 'running' before returning. + - Create, destroy, update, start, stop, and reboot a 1&1 Host server. When the server is created it can optionally wait + for it to be 'running' before returning. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: description: - Define a server's state to create, remove, start or stop it. type: str default: present - choices: [ "present", "absent", "running", "stopped" ] + choices: ["present", "absent", "running", "stopped"] auth_token: description: - - Authenticating API token provided by 1&1. Overrides the - ONEANDONE_AUTH_TOKEN environment variable. + - Authenticating API token provided by 1&1. Overrides the E(ONEANDONE_AUTH_TOKEN) environment variable. type: str api_url: description: - - Custom API URL. Overrides the - ONEANDONE_API_URL environment variable. + - Custom API URL. Overrides the E(ONEANDONE_API_URL) environment variable. type: str datacenter: description: - The datacenter location. type: str default: US - choices: [ "US", "ES", "DE", "GB" ] + choices: ["US", "ES", "DE", "GB"] hostname: description: - The hostname or ID of the server. Only used when state is 'present'. @@ -58,35 +49,30 @@ options: type: str appliance: description: - - The operating system name or ID for the server. - It is required only for 'present' state. + - The operating system name or ID for the server. It is required only for 'present' state. type: str fixed_instance_size: description: - - The instance size name or ID of the server. - It is required only for 'present' state, and it is mutually exclusive with - vcore, cores_per_processor, ram, and hdds parameters. - - 'The available choices are: C(S), C(M), C(L), C(XL), C(XXL), C(3XL), C(4XL), C(5XL)' + - The instance size name or ID of the server. It is required only for 'present' state, and it is mutually exclusive + with vcore, cores_per_processor, ram, and hdds parameters. + - 'The available choices are: V(S), V(M), V(L), V(XL), V(XXL), V(3XL), V(4XL), V(5XL).' type: str vcore: description: - - The total number of processors. - It must be provided with cores_per_processor, ram, and hdds parameters. + - The total number of processors. It must be provided with O(cores_per_processor), O(ram), and O(hdds) parameters. type: int cores_per_processor: description: - - The number of cores per processor. - It must be provided with vcore, ram, and hdds parameters. + - The number of cores per processor. It must be provided with O(vcore), O(ram), and O(hdds) parameters. type: int ram: description: - - The amount of RAM memory. - It must be provided with with vcore, cores_per_processor, and hdds parameters. + - The amount of RAM memory. It must be provided with with O(vcore), O(cores_per_processor), and O(hdds) parameters. type: float hdds: description: - - A list of hard disks with nested "size" and "is_main" properties. - It must be provided with vcore, cores_per_processor, and ram parameters. + - A list of hard disks with nested O(ignore:hdds[].size) and O(ignore:hdds[].is_main) properties. It must be provided + with O(vcore), O(cores_per_processor), and O(ram) parameters. type: list elements: dict private_network: @@ -123,44 +109,39 @@ options: - The type of server to be built. type: str default: "cloud" - choices: [ "cloud", "baremetal", "k8s_node" ] + choices: ["cloud", "baremetal", "k8s_node"] wait: description: - - Wait for the server to be in state 'running' before returning. - Also used for delete operation (set to 'false' if you don't want to wait - for each individual server to be deleted before moving on with - other tasks.) + - Wait for the server to be in state 'running' before returning. Also used for delete operation (set to V(false) if + you do not want to wait for each individual server to be deleted before moving on with other tasks). type: bool - default: 'yes' + default: true wait_timeout: description: - - how long before wait gives up, in seconds + - How long before wait gives up, in seconds. type: int default: 600 wait_interval: description: - - Defines the number of seconds to wait when using the wait_for methods + - Defines the number of seconds to wait when using the wait_for methods. type: int default: 5 auto_increment: description: - - When creating multiple servers at once, whether to differentiate - hostnames by appending a count after them or substituting the count - where there is a %02d or %03d in the hostname string. + - When creating multiple servers at once, whether to differentiate hostnames by appending a count after them or substituting + the count where there is a %02d or %03d in the hostname string. type: bool - default: 'yes' + default: true requirements: - "1and1" - - "python >= 2.6" author: - - "Amel Ajdinovic (@aajdinov)" - - "Ethan Devenport (@edevenport)" + - "Amel Ajdinovic (@aajdinov)" + - "Ethan Devenport (@edevenport)" +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create three servers and enumerate their names community.general.oneandone_server: auth_token: oneandone_private_api_key @@ -184,7 +165,7 @@ EXAMPLES = ''' datacenter: ES appliance: C5A349786169F140BCBC335675014C08 count: 3 - wait: yes + wait: true wait_timeout: 600 wait_interval: 10 ssh_key: SSH_PUBLIC_KEY @@ -206,19 +187,19 @@ EXAMPLES = ''' auth_token: oneandone_private_api_key state: stopped server: 'node01' -''' +""" -RETURN = ''' +RETURN = r""" servers: - description: Information about each server that was processed - type: list - sample: '[{"hostname": "my-server", "id": "server-id"}]' - returned: always -''' + description: Information about each server that was processed. + type: list + sample: + - {"hostname": "my-server", "id": "server-id"} + returned: always +""" import os import time -from ansible.module_utils.six.moves import xrange from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.oneandone import ( get_datacenter, @@ -534,7 +515,7 @@ def startstop_server(module, oneandone_conn): # Resolve server server = get_server(oneandone_conn, server_id, True) if server: - # Attempt to change the server state, only if it's not already there + # Attempt to change the server state, only if it is not already there # or on its way. try: if state == 'stopped' and server['status']['state'] == 'POWERED_ON': @@ -594,7 +575,7 @@ def _auto_increment_hostname(count, hostname): return [ hostname % i - for i in xrange(1, count + 1) + for i in range(1, count + 1) ] @@ -606,7 +587,7 @@ def _auto_increment_description(count, description): if '%' in description: return [ description % i - for i in xrange(1, count + 1) + for i in range(1, count + 1) ] else: return [description] * count diff --git a/plugins/modules/identity/onepassword_info.py b/plugins/modules/onepassword_info.py similarity index 65% rename from plugins/modules/identity/onepassword_info.py rename to plugins/modules/onepassword_info.py index 6621092303..e60f060b0e 100644 --- a/plugins/modules/identity/onepassword_info.py +++ b/plugins/modules/onepassword_info.py @@ -1,119 +1,119 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# (c) 2018, Ryan Conway (@rylon) -# (c) 2018, Scott Buchanan (onepassword.py used as starting point) -# (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Ryan Conway (@rylon) +# Copyright (c) 2018, Scott Buchanan (onepassword.py used as starting point) +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: onepassword_info author: - - Ryan Conway (@Rylon) + - Ryan Conway (@Rylon) requirements: - - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) + - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) notes: - - Tested with C(op) version 0.5.5 - - "Based on the C(onepassword) lookup plugin by Scott Buchanan ." + - Tested with C(op) version 0.5.5. + - Based on the P(community.general.onepassword#lookup) lookup plugin by Scott Buchanan . short_description: Gather items from 1Password description: - - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items. - - A fatal error occurs if any of the items being searched for can not be found. - - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved. - - This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.onepassword_info) module no longer returns C(ansible_facts)! - You must now use the C(register) option to use the facts in other tasks. + - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items. + - A fatal error occurs if any of the items being searched for can not be found. + - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module options: - search_terms: - type: list - elements: dict + search_terms: + type: list + elements: dict + description: + - A list of one or more search terms. + - Each search term can either be a simple string or it can be a dictionary for more control. + - When passing a simple string, O(search_terms[].field) is assumed to be V(password). + - When passing a dictionary, the following fields are available. + suboptions: + name: + type: str description: - - A list of one or more search terms. - - Each search term can either be a simple string or it can be a dictionary for more control. - - When passing a simple string, I(field) is assumed to be C(password). - - When passing a dictionary, the following fields are available. - suboptions: - name: - type: str - description: - - The name of the 1Password item to search for (required). - field: - type: str - description: - - The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment). - section: - type: str - description: - - The name of a section within this item containing the specified field (optional, will search all sections if not specified). - vault: - type: str - description: - - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional). - required: True - auto_login: - type: dict + - The name of the 1Password item to search for (required). + field: + type: str description: - - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info) - will attempt to sign in to 1Password automatically. - - Without this option, you must have already logged in via the 1Password CLI before running Ansible. - - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt - the Ansible Vault is equal to or greater in strength than the 1Password master password. - suboptions: - subdomain: - type: str - description: - - 1Password subdomain name (.1password.com). - - If this is not specified, the most recent subdomain will be used. - username: - type: str - description: - - 1Password username. - - Only required for initial sign in. - master_password: - type: str - description: - - The master password for your subdomain. - - This is always required when specifying C(auto_login). - required: True - secret_key: - type: str - description: - - The secret key for your subdomain. - - Only required for initial sign in. - default: {} - required: False - cli_path: - type: path - description: Used to specify the exact path to the C(op) command line interface - required: False - default: 'op' -''' + - The name of the field to search for within this item (optional, defaults to V(password), or V(document) if the + item has an attachment). + section: + type: str + description: + - The name of a section within this item containing the specified field (optional, it searches all sections if not + specified). + vault: + type: str + description: + - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults + (optional). + required: true + auto_login: + type: dict + description: + - A dictionary containing authentication details. If this is set, the module attempts to sign in to 1Password automatically. + - Without this option, you must have already logged in using the 1Password CLI before running Ansible. + - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt + the Ansible Vault is equal to or greater in strength than the 1Password master password. + suboptions: + subdomain: + type: str + description: + - 1Password subdomain name (V(subdomain).1password.com). + - If this is not specified, the most recent subdomain is used. + username: + type: str + description: + - 1Password username. + - Only required for initial sign in. + master_password: + type: str + description: + - The master password for your subdomain. + - This is always required when specifying O(auto_login). + required: true + secret_key: + type: str + description: + - The secret key for your subdomain. + - Only required for initial sign in. + required: false + cli_path: + type: path + description: Used to specify the exact path to the C(op) command line interface. + required: false + default: 'op' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Gather secrets from 1Password, assuming there is a 'password' field: - name: Get a password community.general.onepassword_info: search_terms: My 1Password item delegate_to: localhost register: my_1password_item - no_log: true # Don't want to log the secrets to the console! + no_log: true # Don't want to log the secrets to the console! # Gather secrets from 1Password, with more advanced search terms: - name: Get a password community.general.onepassword_info: search_terms: - - name: My 1Password item - field: Custom field name # optional, defaults to 'password' - section: Custom section name # optional, defaults to 'None' - vault: Name of the vault # optional, only necessary if there is more than 1 Vault available + - name: My 1Password item + field: Custom field name # optional, defaults to 'password' + section: Custom section name # optional, defaults to 'None' + vault: Name of the vault # optional, only necessary if there is more than 1 Vault available delegate_to: localhost register: my_1password_item - no_log: True # Don't want to log the secrets to the console! + no_log: true # Don't want to log the secrets to the console! # Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two # fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the @@ -121,39 +121,39 @@ EXAMPLES = ''' - name: Get a password community.general.onepassword_info: search_terms: - - My 1Password item # 'name' is optional when passing a simple string... - - name: My Other 1Password item # ...but it can also be set for consistency - - name: My 1Password item - field: Custom field name # optional, defaults to 'password' - section: Custom section name # optional, defaults to 'None' - vault: Name of the vault # optional, only necessary if there is more than 1 Vault available + - My 1Password item # 'name' is optional when passing a simple string... + - name: My Other 1Password item # ...but it can also be set for consistency + - name: My 1Password item + field: Custom field name # optional, defaults to 'password' + section: Custom section name # optional, defaults to 'None' + vault: Name of the vault # optional, only necessary if there is more than 1 Vault available - name: A 1Password item with document attachment delegate_to: localhost register: my_1password_item - no_log: true # Don't want to log the secrets to the console! + no_log: true # Don't want to log the secrets to the console! - name: Debug a password (for example) ansible.builtin.debug: msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}" -''' +""" -RETURN = ''' ---- +RETURN = r""" # One or more dictionaries for each matching item from 1Password, along with the appropriate fields. # This shows the response you would expect to receive from the third example documented above. onepassword: - description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above. - returned: success - type: dict - sample: - "My 1Password item": - password: the value of this field - Custom field name: the value of this field - "My Other 1Password item": - password: the value of this field - "A 1Password item with document attachment": - document: the contents of the document attached to this item -''' + description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third + example above. + returned: success + type: dict + sample: + "My 1Password item": + password: the value of this field + Custom field name: the value of this field + "My Other 1Password item": + password: the value of this field + "A 1Password item with document attachment": + document: the contents of the document attached to this item +""" import errno @@ -206,7 +206,7 @@ class OnePasswordInfo(object): def _parse_field(self, data_json, item_id, field_name, section_title=None): data = json.loads(data_json) - if ('documentAttributes' in data['details']): + if 'documentAttributes' in data['details']: # This is actually a document, let's fetch the document data instead! document = self._run(["get", "document", data['overview']['title']]) return {'document': document[1].strip()} @@ -216,7 +216,7 @@ class OnePasswordInfo(object): # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute, # not inside it, so we need to check there first. - if (field_name in data['details']): + if field_name in data['details']: return {field_name: data['details'][field_name]} # Otherwise we continue looking inside the 'fields' attribute for the specified field. @@ -372,7 +372,7 @@ def main(): username=dict(type='str'), master_password=dict(required=True, type='str', no_log=True), secret_key=dict(type='str', no_log=True), - ), default=None), + )), search_terms=dict(required=True, type='list', elements='dict'), ), supports_check_mode=True diff --git a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py b/plugins/modules/oneview_datacenter_info.py similarity index 72% rename from plugins/modules/remote_management/oneview/oneview_datacenter_info.py rename to plugins/modules/oneview_datacenter_info.py index bf3e9a8772..cf9f10af79 100644 --- a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py +++ b/plugins/modules/oneview_datacenter_info.py @@ -1,44 +1,45 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_datacenter_info short_description: Retrieve information about the OneView Data Centers description: - - Retrieve information about the OneView Data Centers. - - This module was called C(oneview_datacenter_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_datacenter_info) module no longer returns C(ansible_facts)! + - Retrieve information about the OneView Data Centers. requirements: - - "hpOneView >= 2.0.1" + - "hpOneView >= 2.0.1" author: - - Alex Monteiro (@aalexmonteiro) - - Madhav Bharadwaj (@madhav-bharadwaj) - - Priyanka Sood (@soodpr) - - Ricardo Galeno (@ricardogpsf) + - Alex Monteiro (@aalexmonteiro) + - Madhav Bharadwaj (@madhav-bharadwaj) + - Priyanka Sood (@soodpr) + - Ricardo Galeno (@ricardogpsf) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - description: - - Data Center name. - type: str - options: - description: - - "Retrieve additional information. Options available: 'visualContent'." - type: list - elements: str + name: + description: + - Data Center name. + type: str + options: + description: + - 'Retrieve additional information. Options available: V(visualContent).' + type: list + elements: str extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.factsparams + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all Data Centers community.general.oneview_datacenter_info: hostname: 172.16.101.48 @@ -102,19 +103,19 @@ EXAMPLES = ''' - name: Print fetched information about Data Center Visual Content ansible.builtin.debug: msg: "{{ result.datacenter_visual_content }}" -''' +""" -RETURN = ''' +RETURN = r""" datacenters: - description: Has all the OneView information about the Data Centers. - returned: Always, but can be null. - type: dict + description: Has all the OneView information about the Data Centers. + returned: Always, but can be null. + type: dict datacenter_visual_content: - description: Has information about the Data Center Visual Content. - returned: When requested, but can be null. - type: dict -''' + description: Has information about the Data Center Visual Content. + returned: When requested, but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py b/plugins/modules/oneview_enclosure_info.py similarity index 73% rename from plugins/modules/remote_management/oneview/oneview_enclosure_info.py rename to plugins/modules/oneview_enclosure_info.py index 18e245d617..b57c8210f4 100644 --- a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py +++ b/plugins/modules/oneview_enclosure_info.py @@ -1,46 +1,46 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_enclosure_info short_description: Retrieve information about one or more Enclosures description: - - Retrieve information about one or more of the Enclosures from OneView. - - This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_enclosure_info) module no longer returns C(ansible_facts)! + - Retrieve information about one or more of the Enclosures from OneView. requirements: - - hpOneView >= 2.0.1 + - hpOneView >= 2.0.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - description: - - Enclosure name. - type: str - options: - description: - - "List with options to gather additional information about an Enclosure and related resources. - Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization), - you can provide specific parameters." - type: list - elements: raw + name: + description: + - Enclosure name. + type: str + options: + description: + - 'List with options to gather additional information about an Enclosure and related resources. Options allowed: V(script), + V(environmentalConfiguration), and V(utilization). For the option V(utilization), you can provide specific parameters.' + type: list + elements: raw extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.factsparams + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all Enclosures community.general.oneview_enclosure_info: hostname: 172.16.101.48 @@ -70,7 +70,7 @@ EXAMPLES = ''' delegate_to: localhost register: result -- name: Print fetched information about paginated, filtered ans sorted list of Enclosures +- name: Print fetched information about paginated, filtered and sorted list of Enclosures ansible.builtin.debug: msg: "{{ result.enclosures }}" @@ -93,9 +93,9 @@ EXAMPLES = ''' community.general.oneview_enclosure_info: name: Test-Enclosure options: - - script # optional - - environmentalConfiguration # optional - - utilization # optional + - script # optional + - environmentalConfiguration # optional + - utilization # optional hostname: 172.16.101.48 username: administrator password: my_password @@ -121,11 +121,11 @@ EXAMPLES = ''' msg: "{{ result.enclosure_utilization }}" - name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two - specified dates" + specified dates" community.general.oneview_enclosure_info: name: Test-Enclosure options: - - utilization: # optional + - utilization: # optional fields: AmbientTemperature filter: - startDate=2016-07-01T14:29:42.000Z @@ -147,29 +147,29 @@ EXAMPLES = ''' - name: Print fetched information about Enclosure Utilization ansible.builtin.debug: msg: "{{ result.enclosure_utilization }}" -''' +""" -RETURN = ''' +RETURN = r""" enclosures: - description: Has all the OneView information about the Enclosures. - returned: Always, but can be null. - type: dict + description: Has all the OneView information about the Enclosures. + returned: Always, but can be null. + type: dict enclosure_script: - description: Has all the OneView information about the script of an Enclosure. - returned: When requested, but can be null. - type: str + description: Has all the OneView information about the script of an Enclosure. + returned: When requested, but can be null. + type: str enclosure_environmental_configuration: - description: Has all the OneView information about the environmental configuration of an Enclosure. - returned: When requested, but can be null. - type: dict + description: Has all the OneView information about the environmental configuration of an Enclosure. + returned: When requested, but can be null. + type: dict enclosure_utilization: - description: Has all the OneView information about the utilization of an Enclosure. - returned: When requested, but can be null. - type: dict -''' + description: Has all the OneView information about the utilization of an Enclosure. + returned: When requested, but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network.py b/plugins/modules/oneview_ethernet_network.py similarity index 81% rename from plugins/modules/remote_management/oneview/oneview_ethernet_network.py rename to plugins/modules/oneview_ethernet_network.py index 99b5d0fed9..1a50d9ea65 100644 --- a/plugins/modules/remote_management/oneview/oneview_ethernet_network.py +++ b/plugins/modules/oneview_ethernet_network.py @@ -1,45 +1,48 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_ethernet_network short_description: Manage OneView Ethernet Network resources description: - - Provides an interface to manage Ethernet Network resources. Can create, update, or delete. + - Provides an interface to manage Ethernet Network resources. Can create, update, or delete. requirements: - - hpOneView >= 3.1.0 + - hpOneView >= 3.1.0 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - Indicates the desired state for the Ethernet Network resource. - - C(present) will ensure data properties are compliant with OneView. - - C(absent) will remove the resource from OneView, if it exists. - - C(default_bandwidth_reset) will reset the network connection template to the default. - type: str - default: present - choices: [present, absent, default_bandwidth_reset] - data: - description: - - List with Ethernet Network properties. - type: dict - required: true + state: + description: + - Indicates the desired state for the Ethernet Network resource. + - V(present) ensures data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. + - V(default_bandwidth_reset) resets the network connection template to the default. + type: str + default: present + choices: [present, absent, default_bandwidth_reset] + data: + description: + - List with Ethernet Network properties. + type: dict + required: true extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.validateetag + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure that the Ethernet Network is present using the default configuration community.general.oneview_ethernet_network: config: '/etc/oneview/oneview_config.json' @@ -57,8 +60,8 @@ EXAMPLES = ''' name: 'Test Ethernet Network' purpose: Management bandwidth: - maximumBandwidth: 3000 - typicalBandwidth: 2000 + maximumBandwidth: 3000 + typicalBandwidth: 2000 delegate_to: localhost - name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network' @@ -100,24 +103,24 @@ EXAMPLES = ''' data: name: 'Test Ethernet Network' delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" ethernet_network: - description: Has the facts about the Ethernet Networks. - returned: On state 'present'. Can be null. - type: dict + description: Has the facts about the Ethernet Networks. + returned: On O(state=present). Can be null. + type: dict ethernet_network_bulk: - description: Has the facts about the Ethernet Networks affected by the bulk insert. - returned: When 'vlanIdRange' attribute is in data argument. Can be null. - type: dict + description: Has the facts about the Ethernet Networks affected by the bulk insert. + returned: When V(vlanIdRange) attribute is in O(data) argument. Can be null. + type: dict ethernet_network_connection_template: - description: Has the facts about the Ethernet Network Connection Template. - returned: On state 'default_bandwidth_reset'. Can be null. - type: dict -''' + description: Has the facts about the Ethernet Network Connection Template. + returned: On O(state=default_bandwidth_reset). Can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py b/plugins/modules/oneview_ethernet_network_info.py similarity index 72% rename from plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py rename to plugins/modules/oneview_ethernet_network_info.py index f1b55165b1..9528323fcf 100644 --- a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py +++ b/plugins/modules/oneview_ethernet_network_info.py @@ -1,43 +1,44 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_ethernet_network_info short_description: Retrieve the information about one or more of the OneView Ethernet Networks description: - - Retrieve the information about one or more of the Ethernet Networks from OneView. - - This module was called C(oneview_ethernet_network_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_ethernet_network_info) module no longer returns C(ansible_facts)! + - Retrieve the information about one or more of the Ethernet Networks from OneView. requirements: - - hpOneView >= 2.0.1 + - hpOneView >= 2.0.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - description: - - Ethernet Network name. - type: str - options: - description: - - "List with options to gather additional information about an Ethernet Network and related resources. - Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)." - type: list - elements: str + name: + description: + - Ethernet Network name. + type: str + options: + description: + - 'List with options to gather additional information about an Ethernet Network and related resources. Options allowed: + V(associatedProfiles) and V(associatedUplinkGroups).' + type: list + elements: str extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.factsparams + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all Ethernet Networks community.general.oneview_ethernet_network_info: config: /etc/oneview/oneview_config.json @@ -91,24 +92,24 @@ EXAMPLES = ''' - name: Print fetched information about Ethernet Network Associated Uplink Groups ansible.builtin.debug: msg: "{{ result.enet_associated_uplink_groups }}" -''' +""" -RETURN = ''' +RETURN = r""" ethernet_networks: - description: Has all the OneView information about the Ethernet Networks. - returned: Always, but can be null. - type: dict + description: Has all the OneView information about the Ethernet Networks. + returned: Always, but can be null. + type: dict enet_associated_profiles: - description: Has all the OneView information about the profiles which are using the Ethernet network. - returned: When requested, but can be null. - type: dict + description: Has all the OneView information about the profiles which are using the Ethernet network. + returned: When requested, but can be null. + type: dict enet_associated_uplink_groups: - description: Has all the OneView information about the uplink sets which are using the Ethernet network. - returned: When requested, but can be null. - type: dict -''' + description: Has all the OneView information about the uplink sets which are using the Ethernet network. + returned: When requested, but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network.py b/plugins/modules/oneview_fc_network.py similarity index 72% rename from plugins/modules/remote_management/oneview/oneview_fc_network.py rename to plugins/modules/oneview_fc_network.py index 59984ee8b6..0b20a96625 100644 --- a/plugins/modules/remote_management/oneview/oneview_fc_network.py +++ b/plugins/modules/oneview_fc_network.py @@ -1,42 +1,45 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_fc_network -short_description: Manage OneView Fibre Channel Network resources. +short_description: Manage OneView Fibre Channel Network resources description: - - Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete. + - Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete. requirements: - - "hpOneView >= 4.0.0" + - "hpOneView >= 4.0.0" author: "Felipe Bulsoni (@fgbulsoni)" +attributes: + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - Indicates the desired state for the Fibre Channel Network resource. - C(present) will ensure data properties are compliant with OneView. - C(absent) will remove the resource from OneView, if it exists. - type: str - choices: ['present', 'absent'] - required: true - data: - description: - - List with the Fibre Channel Network properties. - type: dict - required: true + state: + description: + - Indicates the desired state for the Fibre Channel Network resource. + - V(present) ensures data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. + type: str + choices: ['present', 'absent'] + required: true + data: + description: + - List with the Fibre Channel Network properties. + type: dict + required: true extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.validateetag + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure that the Fibre Channel Network is present using the default configuration community.general.oneview_fc_network: config: "{{ config_file_path }}" @@ -68,14 +71,14 @@ EXAMPLES = ''' state: absent data: name: 'New FC Network' -''' +""" -RETURN = ''' +RETURN = r""" fc_network: - description: Has the facts about the managed OneView FC Network. - returned: On state 'present'. Can be null. - type: dict -''' + description: Has the facts about the managed OneView FC Network. + returned: On O(state=present). Can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py b/plugins/modules/oneview_fc_network_info.py similarity index 67% rename from plugins/modules/remote_management/oneview/oneview_fc_network_info.py rename to plugins/modules/oneview_fc_network_info.py index 40fed8d017..525659e207 100644 --- a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py +++ b/plugins/modules/oneview_fc_network_info.py @@ -1,38 +1,39 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_fc_network_info short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks description: - - Retrieve the information about one or more of the Fibre Channel Networks from OneView. - - This module was called C(oneview_fc_network_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_fc_network_info) module no longer returns C(ansible_facts)! + - Retrieve the information about one or more of the Fibre Channel Networks from OneView. requirements: - - hpOneView >= 2.0.1 + - hpOneView >= 2.0.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - description: - - Fibre Channel Network name. - type: str + name: + description: + - Fibre Channel Network name. + type: str extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.factsparams + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all Fibre Channel Networks community.general.oneview_fc_network_info: config: /etc/oneview/oneview_config.json @@ -68,14 +69,14 @@ EXAMPLES = ''' - name: Print fetched information about Fibre Channel Network found by name ansible.builtin.debug: msg: "{{ result.fc_networks }}" -''' +""" -RETURN = ''' +RETURN = r""" fc_networks: - description: Has all the OneView information about the Fibre Channel Networks. - returned: Always, but can be null. - type: dict -''' + description: Has all the OneView information about the Fibre Channel Networks. + returned: Always, but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase @@ -84,8 +85,8 @@ class FcNetworkInfoModule(OneViewModuleBase): def __init__(self): argument_spec = dict( - name=dict(required=False, type='str'), - params=dict(required=False, type='dict') + name=dict(type='str'), + params=dict(type='dict') ) super(FcNetworkInfoModule, self).__init__( diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network.py b/plugins/modules/oneview_fcoe_network.py similarity index 71% rename from plugins/modules/remote_management/oneview/oneview_fcoe_network.py rename to plugins/modules/oneview_fcoe_network.py index ef24f8fc8e..0212ea0b64 100644 --- a/plugins/modules/remote_management/oneview/oneview_fcoe_network.py +++ b/plugins/modules/oneview_fcoe_network.py @@ -1,43 +1,46 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_fcoe_network short_description: Manage OneView FCoE Network resources description: - - Provides an interface to manage FCoE Network resources. Can create, update, or delete. + - Provides an interface to manage FCoE Network resources. Can create, update, or delete. requirements: - - "python >= 2.7.9" - - "hpOneView >= 4.0.0" + - "Python >= 2.7.9" + - "hpOneView >= 4.0.0" author: "Felipe Bulsoni (@fgbulsoni)" +attributes: + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - Indicates the desired state for the FCoE Network resource. - C(present) will ensure data properties are compliant with OneView. - C(absent) will remove the resource from OneView, if it exists. - type: str - default: present - choices: ['present', 'absent'] - data: - description: - - List with FCoE Network properties. - type: dict - required: true + state: + description: + - Indicates the desired state for the FCoE Network resource. + - V(present) ensures data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. + type: str + default: present + choices: ['present', 'absent'] + data: + description: + - List with FCoE Network properties. + type: dict + required: true extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.validateetag + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure that FCoE Network is present using the default configuration community.general.oneview_fcoe_network: config: '/etc/oneview/oneview_config.json' @@ -65,14 +68,14 @@ EXAMPLES = ''' data: name: New FCoE Network delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" fcoe_network: - description: Has the facts about the OneView FCoE Networks. - returned: On state 'present'. Can be null. - type: dict -''' + description: Has the facts about the OneView FCoE Networks. + returned: On O(state=present). Can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py b/plugins/modules/oneview_fcoe_network_info.py similarity index 70% rename from plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py rename to plugins/modules/oneview_fcoe_network_info.py index e581bff862..b1b4f49fda 100644 --- a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py +++ b/plugins/modules/oneview_fcoe_network_info.py @@ -1,37 +1,38 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_fcoe_network_info short_description: Retrieve the information about one or more of the OneView FCoE Networks description: - - Retrieve the information about one or more of the FCoE Networks from OneView. - - This module was called C(oneview_fcoe_network_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_fcoe_network_info) module no longer returns C(ansible_facts)! + - Retrieve the information about one or more of the FCoE Networks from OneView. requirements: - - hpOneView >= 2.0.1 + - hpOneView >= 2.0.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - description: - - FCoE Network name. - type: str + name: + description: + - FCoE Network name. + type: str extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.factsparams + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all FCoE Networks community.general.oneview_fcoe_network_info: config: /etc/oneview/oneview_config.json @@ -67,14 +68,14 @@ EXAMPLES = ''' - name: Print fetched information about FCoE Network found by name ansible.builtin.debug: msg: "{{ result.fcoe_networks }}" -''' +""" -RETURN = ''' +RETURN = r""" fcoe_networks: - description: Has all the OneView information about the FCoE Networks. - returned: Always, but can be null. - type: dict -''' + description: Has all the OneView information about the FCoE Networks. + returned: Always, but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py b/plugins/modules/oneview_logical_interconnect_group.py similarity index 75% rename from plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py rename to plugins/modules/oneview_logical_interconnect_group.py index e833f9e092..9f33726e8c 100644 --- a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py +++ b/plugins/modules/oneview_logical_interconnect_group.py @@ -1,45 +1,48 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_logical_interconnect_group short_description: Manage OneView Logical Interconnect Group resources description: - - Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete. + - Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete. requirements: - - hpOneView >= 4.0.0 + - hpOneView >= 4.0.0 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - Indicates the desired state for the Logical Interconnect Group resource. - C(absent) will remove the resource from OneView, if it exists. - C(present) will ensure data properties are compliant with OneView. - type: str - choices: [absent, present] - default: present - data: - description: - - List with the Logical Interconnect Group properties. - type: dict - required: true + state: + description: + - Indicates the desired state for the Logical Interconnect Group resource. + - V(absent) removes the resource from OneView, if it exists. + - V(present) ensures data properties are compliant with OneView. + type: str + choices: [absent, present] + default: present + data: + description: + - List with the Logical Interconnect Group properties. + type: dict + required: true extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.validateetag + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure that the Logical Interconnect Group is present community.general.oneview_logical_interconnect_group: config: /etc/oneview/oneview_config.json @@ -50,13 +53,13 @@ EXAMPLES = ''' enclosureType: C7000 interconnectMapTemplate: interconnectMapEntryTemplates: - - logicalDownlinkUri: ~ + - logicalDownlinkUri: logicalLocation: - locationEntries: - - relativeValue: 1 - type: Bay - - relativeValue: 1 - type: Enclosure + locationEntries: + - relativeValue: 1 + type: Bay + - relativeValue: 1 + type: Enclosure permittedInterconnectTypeName: HP VC Flex-10/10D Module # Alternatively you can inform permittedInterconnectTypeUri delegate_to: localhost @@ -88,14 +91,14 @@ EXAMPLES = ''' data: name: New Logical Interconnect Group delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" logical_interconnect_group: - description: Has the facts about the OneView Logical Interconnect Group. - returned: On state 'present'. Can be null. - type: dict -''' + description: Has the facts about the OneView Logical Interconnect Group. + returned: On O(state=present). Can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py b/plugins/modules/oneview_logical_interconnect_group_info.py similarity index 71% rename from plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py rename to plugins/modules/oneview_logical_interconnect_group_info.py index 436dd5d62b..25a278b15a 100644 --- a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py +++ b/plugins/modules/oneview_logical_interconnect_group_info.py @@ -1,38 +1,39 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_logical_interconnect_group_info short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups description: - - Retrieve information about one or more of the Logical Interconnect Groups from OneView - - This module was called C(oneview_logical_interconnect_group_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_logical_interconnect_group_info) module no longer returns C(ansible_facts)! + - Retrieve information about one or more of the Logical Interconnect Groups from OneView. requirements: - - hpOneView >= 2.0.1 + - hpOneView >= 2.0.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - description: - - Logical Interconnect Group name. - type: str + name: + description: + - Logical Interconnect Group name. + type: str extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.factsparams + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all Logical Interconnect Groups community.general.oneview_logical_interconnect_group_info: hostname: 172.16.101.48 @@ -80,14 +81,14 @@ EXAMPLES = ''' - name: Print fetched information about Logical Interconnect Group found by name ansible.builtin.debug: msg: "{{ result.logical_interconnect_groups }}" -''' +""" -RETURN = ''' +RETURN = r""" logical_interconnect_groups: - description: Has all the OneView information about the Logical Interconnect Groups. - returned: Always, but can be null. - type: dict -''' + description: Has all the OneView information about the Logical Interconnect Groups. + returned: Always, but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/remote_management/oneview/oneview_network_set.py b/plugins/modules/oneview_network_set.py similarity index 76% rename from plugins/modules/remote_management/oneview/oneview_network_set.py rename to plugins/modules/oneview_network_set.py index 3a2632b765..a7a9592a5b 100644 --- a/plugins/modules/remote_management/oneview/oneview_network_set.py +++ b/plugins/modules/oneview_network_set.py @@ -1,45 +1,48 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_network_set short_description: Manage HPE OneView Network Set resources description: - - Provides an interface to manage Network Set resources. Can create, update, or delete. + - Provides an interface to manage Network Set resources. Can create, update, or delete. requirements: - - hpOneView >= 4.0.0 + - hpOneView >= 4.0.0 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - Indicates the desired state for the Network Set resource. - - C(present) will ensure data properties are compliant with OneView. - - C(absent) will remove the resource from OneView, if it exists. - type: str - default: present - choices: ['present', 'absent'] - data: - description: - - List with the Network Set properties. - type: dict - required: true + state: + description: + - Indicates the desired state for the Network Set resource. + - V(present) ensures data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. + type: str + default: present + choices: ['present', 'absent'] + data: + description: + - List with the Network Set properties. + type: dict + required: true extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.validateetag + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a Network Set community.general.oneview_network_set: config: /etc/oneview/oneview_config.json @@ -47,8 +50,8 @@ EXAMPLES = ''' data: name: OneViewSDK Test Network Set networkUris: - - Test Ethernet Network_1 # can be a name - - /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI + - Test Ethernet Network_1 # can be a name + - /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI delegate_to: localhost - name: Update the Network Set name to 'OneViewSDK Test Network Set - Renamed' and change the associated networks @@ -67,7 +70,7 @@ EXAMPLES = ''' config: /etc/oneview/oneview_config.json state: absent data: - name: OneViewSDK Test Network Set - Renamed + name: OneViewSDK Test Network Set - Renamed delegate_to: localhost - name: Update the Network set with two scopes @@ -80,14 +83,14 @@ EXAMPLES = ''' - /rest/scopes/01SC123456 - /rest/scopes/02SC123456 delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" network_set: - description: Has the facts about the Network Set. - returned: On state 'present', but can be null. - type: dict -''' + description: Has the facts about the Network Set. + returned: On O(state=present), but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound diff --git a/plugins/modules/remote_management/oneview/oneview_network_set_info.py b/plugins/modules/oneview_network_set_info.py similarity index 74% rename from plugins/modules/remote_management/oneview/oneview_network_set_info.py rename to plugins/modules/oneview_network_set_info.py index 2d610f2b57..4b413f278e 100644 --- a/plugins/modules/remote_management/oneview/oneview_network_set_info.py +++ b/plugins/modules/oneview_network_set_info.py @@ -1,46 +1,46 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_network_set_info short_description: Retrieve information about the OneView Network Sets description: - - Retrieve information about the Network Sets from OneView. - - This module was called C(oneview_network_set_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_network_set_info) module no longer returns C(ansible_facts)! + - Retrieve information about the Network Sets from OneView. requirements: - - hpOneView >= 2.0.1 + - hpOneView >= 2.0.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name: - description: - - Network Set name. - type: str + name: + description: + - Network Set name. + type: str - options: - description: - - "List with options to gather information about Network Set. - Option allowed: C(withoutEthernet). - The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks." - type: list - elements: str + options: + description: + - 'List with options to gather information about Network Set. Option allowed: V(withoutEthernet). The option V(withoutEthernet) + retrieves the list of network_sets excluding Ethernet networks.' + type: list + elements: str extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.factsparams + - community.general.oneview + - community.general.oneview.factsparams + - community.general.attributes + - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all Network Sets community.general.oneview_network_set_info: hostname: 172.16.101.48 @@ -81,7 +81,7 @@ EXAMPLES = ''' password: my_password api_version: 500 options: - - withoutEthernet + - withoutEthernet no_log: true delegate_to: localhost register: result @@ -113,7 +113,7 @@ EXAMPLES = ''' api_version: 500 name: Name of the Network Set options: - - withoutEthernet + - withoutEthernet no_log: true delegate_to: localhost register: result @@ -121,14 +121,14 @@ EXAMPLES = ''' - name: Print fetched information about Network Set found by name, excluding Ethernet networks ansible.builtin.debug: msg: "{{ result.network_sets }}" -''' +""" -RETURN = ''' +RETURN = r""" network_sets: - description: Has all the OneView information about the Network Sets. - returned: Always, but can be empty. - type: dict -''' + description: Has all the OneView information about the Network Sets. + returned: Always, but can be empty. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager.py b/plugins/modules/oneview_san_manager.py similarity index 83% rename from plugins/modules/remote_management/oneview/oneview_san_manager.py rename to plugins/modules/oneview_san_manager.py index 20870a31d5..105aca72ac 100644 --- a/plugins/modules/remote_management/oneview/oneview_san_manager.py +++ b/plugins/modules/oneview_san_manager.py @@ -1,46 +1,49 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_san_manager short_description: Manage OneView SAN Manager resources description: - - Provides an interface to manage SAN Manager resources. Can create, update, or delete. + - Provides an interface to manage SAN Manager resources. Can create, update, or delete. requirements: - - hpOneView >= 3.1.1 + - hpOneView >= 3.1.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - Indicates the desired state for the Uplink Set resource. - - C(present) ensures data properties are compliant with OneView. - - C(absent) removes the resource from OneView, if it exists. - - C(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent. - type: str - default: present - choices: [present, absent, connection_information_set] - data: - description: - - List with SAN Manager properties. - type: dict - required: true + state: + description: + - Indicates the desired state for the Uplink Set resource. + - V(present) ensures data properties are compliant with OneView. + - V(absent) removes the resource from OneView, if it exists. + - V(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent. + type: str + default: present + choices: [present, absent, connection_information_set] + data: + description: + - List with SAN Manager properties. + type: dict + required: true extends_documentation_fragment: -- community.general.oneview -- community.general.oneview.validateetag + - community.general.oneview + - community.general.oneview.validateetag + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials community.general.oneview_san_manager: config: /etc/oneview/oneview_config.json @@ -116,14 +119,14 @@ EXAMPLES = ''' data: name: '172.18.15.1' delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" san_manager: - description: Has the OneView facts about the SAN Manager. - returned: On state 'present'. Can be null. - type: dict -''' + description: Has the OneView facts about the SAN Manager. + returned: On O(state=present). Can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py b/plugins/modules/oneview_san_manager_info.py similarity index 65% rename from plugins/modules/remote_management/oneview/oneview_san_manager_info.py rename to plugins/modules/oneview_san_manager_info.py index 284371cafc..e158a40533 100644 --- a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py +++ b/plugins/modules/oneview_san_manager_info.py @@ -1,45 +1,46 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: oneview_san_manager_info short_description: Retrieve information about one or more of the OneView SAN Managers description: - - Retrieve information about one or more of the SAN Managers from OneView - - This module was called C(oneview_san_manager_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.oneview_san_manager_info) module no longer returns C(ansible_facts)! + - Retrieve information about one or more of the SAN Managers from OneView. requirements: - - hpOneView >= 2.0.1 + - hpOneView >= 2.0.1 author: - - Felipe Bulsoni (@fgbulsoni) - - Thiago Miotto (@tmiotto) - - Adriane Cardozo (@adriane-cardozo) + - Felipe Bulsoni (@fgbulsoni) + - Thiago Miotto (@tmiotto) + - Adriane Cardozo (@adriane-cardozo) +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - provider_display_name: - description: - - Provider Display Name. - type: str - params: - description: - - List of params to delimit, filter and sort the list of resources. - - "params allowed: - - C(start): The first item to return, using 0-based indexing. - - C(count): The number of resources to return. - - C(query): A general query string to narrow the list of resources returned. - - C(sort): The sort order of the returned data set." - type: dict + provider_display_name: + description: + - Provider Display Name. + type: str + params: + description: + - List of params to delimit, filter and sort the list of resources. + - 'Params allowed:' + - 'V(start): The first item to return, using 0-based indexing.' + - 'V(count): The number of resources to return.' + - 'V(query): A general query string to narrow the list of resources returned.' + - 'V(sort): The sort order of the returned data set.' + type: dict extends_documentation_fragment: -- community.general.oneview + - community.general.oneview + - community.general.attributes + - community.general.attributes.info_module +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather information about all SAN Managers community.general.oneview_san_manager_info: config: /etc/oneview/oneview_config.json @@ -75,14 +76,14 @@ EXAMPLES = ''' - name: Print fetched information about SAN Manager found by provider display name ansible.builtin.debug: msg: "{{ result.san_managers }}" -''' +""" -RETURN = ''' +RETURN = r""" san_managers: - description: Has all the OneView information about the SAN Managers. - returned: Always, but can be null. - type: dict -''' + description: Has all the OneView information about the SAN Managers. + returned: Always, but can be null. + type: dict +""" from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase diff --git a/plugins/modules/online_server_info.py b/plugins/modules/online_server_info.py new file mode 100644 index 0000000000..3c241d062b --- /dev/null +++ b/plugins/modules/online_server_info.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: online_server_info +short_description: Gather information about Online servers +description: + - Gather information about the servers. + - U(https://www.online.net/en/dedicated-server). +author: + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.online + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Gather Online server information + community.general.online_server_info: + api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f' + register: result + +- ansible.builtin.debug: + msg: "{{ result.online_server_info }}" +""" + +RETURN = r""" +online_server_info: + description: + - Response from Online API. + - 'For more details please refer to: U(https://console.online.net/en/api/).' + returned: success + type: list + elements: dict + sample: + [ + { + "abuse": "abuse@example.com", + "anti_ddos": false, + "bmc": { + "session_key": null + }, + "boot_mode": "normal", + "contacts": { + "owner": "foobar", + "tech": "foobar" + }, + "disks": [ + { + "$ref": "/api/v1/server/hardware/disk/68452" + }, + { + "$ref": "/api/v1/server/hardware/disk/68453" + } + ], + "drive_arrays": [ + { + "disks": [ + { + "$ref": "/api/v1/server/hardware/disk/68452" + }, + { + "$ref": "/api/v1/server/hardware/disk/68453" + } + ], + "raid_controller": { + "$ref": "/api/v1/server/hardware/raidController/9910" + }, + "raid_level": "RAID1" + } + ], + "hardware_watch": true, + "hostname": "sd-42", + "id": 42, + "ip": [ + { + "address": "195.154.172.149", + "mac": "28:92:4a:33:5e:c6", + "reverse": "195-154-172-149.rev.poneytelecom.eu.", + "switch_port_state": "up", + "type": "public" + }, + { + "address": "10.90.53.212", + "mac": "28:92:4a:33:5e:c7", + "reverse": null, + "switch_port_state": "up", + "type": "private" + } + ], + "last_reboot": "2018-08-23T08:32:03.000Z", + "location": { + "block": "A", + "datacenter": "DC3", + "position": 19, + "rack": "A23", + "room": "4 4-4" + }, + "network": { + "ip": [ + "195.154.172.149" + ], + "ipfo": [], + "private": [ + "10.90.53.212" + ] + }, + "offer": "Pro-1-S-SATA", + "os": { + "name": "FreeBSD", + "version": "11.1-RELEASE" + }, + "power": "ON", + "proactive_monitoring": false, + "raid_controllers": [ + { + "$ref": "/api/v1/server/hardware/raidController/9910" + } + ], + "support": "Basic service level" + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.online import ( + Online, OnlineException, online_argument_spec +) + + +class OnlineServerInfo(Online): + + def __init__(self, module): + super(OnlineServerInfo, self).__init__(module) + self.name = 'api/v1/server' + + def _get_server_detail(self, server_path): + try: + return self.get(path=server_path).json + except OnlineException as exc: + self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc)) + + def all_detailed_servers(self): + servers_api_path = self.get_resources() + + server_data = ( + self._get_server_detail(server_api_path) + for server_api_path in servers_api_path + ) + + return [s for s in server_data if s is not None] + + +def main(): + module = AnsibleModule( + argument_spec=online_argument_spec(), + supports_check_mode=True, + ) + + try: + servers_info = OnlineServerInfo(module).all_detailed_servers() + module.exit_json( + online_server_info=servers_info + ) + except OnlineException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/online/online_user_info.py b/plugins/modules/online_user_info.py similarity index 61% rename from plugins/modules/cloud/online/online_user_info.py rename to plugins/modules/online_user_info.py index cd1b6dfa45..61b2c23ae8 100644 --- a/plugins/modules/cloud/online/online_user_info.py +++ b/plugins/modules/online_user_info.py @@ -1,48 +1,49 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: online_user_info -short_description: Gather information about Online user. +short_description: Gather information about Online user description: - Gather information about the user. author: - "Remy Leone (@remyleone)" extends_documentation_fragment: -- community.general.online -''' + - community.general.online + - community.general.attributes + - community.general.attributes.info_module +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Online user info community.general.online_user_info: register: result - ansible.builtin.debug: msg: "{{ result.online_user_info }}" -''' +""" -RETURN = r''' +RETURN = r""" online_user_info: description: - Response from Online API. - - "For more details please refer to: U(https://console.online.net/en/api/)." + - 'For more details please refer to: U(https://console.online.net/en/api/).' returned: success type: dict sample: - "online_user_info": { - "company": "foobar LLC", - "email": "foobar@example.com", - "first_name": "foo", - "id": 42, - "last_name": "bar", - "login": "foobar" + { + "company": "foobar LLC", + "email": "foobar@example.com", + "first_name": "foo", + "id": 42, + "last_name": "bar", + "login": "foobar" } -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.online import ( diff --git a/plugins/modules/system/open_iscsi.py b/plugins/modules/open_iscsi.py similarity index 62% rename from plugins/modules/system/open_iscsi.py rename to plugins/modules/open_iscsi.py index 1768a8b6df..8ccd5351e2 100644 --- a/plugins/modules/system/open_iscsi.py +++ b/plugins/modules/open_iscsi.py @@ -1,129 +1,131 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2013, Serge van Ginderachter -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, Serge van Ginderachter +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: open_iscsi author: -- Serge van Ginderachter (@srvg) + - Serge van Ginderachter (@srvg) short_description: Manage iSCSI targets with Open-iSCSI description: - - Discover targets on given portal, (dis)connect targets, mark targets to - manually or auto start, return device nodes of connected targets. + - Discover targets on given portal, (dis)connect targets, mark targets to manually or auto start, return device nodes of + connected targets. requirements: - - open_iscsi library and tools (iscsiadm) + - open_iscsi library and tools (iscsiadm) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - portal: - description: - - The domain name or IP address of the iSCSI target. - type: str - aliases: [ ip ] - port: - description: - - The port on which the iSCSI target process listens. - type: str - default: '3260' - target: - description: - - The iSCSI target name. - type: str - aliases: [ name, targetname ] - login: - description: - - Whether the target node should be connected. - type: bool - aliases: [ state ] - node_auth: - description: - - The value for C(node.session.auth.authmethod). - type: str - default: CHAP - node_user: - description: - - The value for C(node.session.auth.username). - type: str - node_pass: - description: - - The value for C(node.session.auth.password). - type: str - node_user_in: - description: - - The value for C(node.session.auth.username_in). - type: str - version_added: 3.8.0 - node_pass_in: - description: - - The value for C(node.session.auth.password_in). - type: str - version_added: 3.8.0 - auto_node_startup: - description: - - Whether the target node should be automatically connected at startup. - type: bool - aliases: [ automatic ] - auto_portal_startup: - description: - - Whether the target node portal should be automatically connected at startup. - type: bool - version_added: 3.2.0 - discover: - description: - - Whether the list of target nodes on the portal should be - (re)discovered and added to the persistent iSCSI database. - - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup) - to manual, hence combined with C(auto_node_startup=yes) will always return - a changed state. - type: bool - default: false - show_nodes: - description: - - Whether the list of nodes in the persistent iSCSI database should be returned by the module. - type: bool - default: false - rescan: - description: - - Rescan an established session for discovering new targets. - - When I(target) is omitted, will rescan all sessions. - type: bool - default: false - version_added: 4.1.0 + portal: + description: + - The domain name or IP address of the iSCSI target. + type: str + aliases: [ip] + port: + description: + - The port on which the iSCSI target process listens. + type: str + default: '3260' + target: + description: + - The iSCSI target name. + type: str + aliases: [name, targetname] + login: + description: + - Whether the target node should be connected. + - When O(target) is omitted, it logins to all available. + type: bool + aliases: [state] + node_auth: + description: + - The value for C(node.session.auth.authmethod). + type: str + default: CHAP + node_user: + description: + - The value for C(node.session.auth.username). + type: str + node_pass: + description: + - The value for C(node.session.auth.password). + type: str + node_user_in: + description: + - The value for C(node.session.auth.username_in). + type: str + version_added: 3.8.0 + node_pass_in: + description: + - The value for C(node.session.auth.password_in). + type: str + version_added: 3.8.0 + auto_node_startup: + description: + - Whether the target node should be automatically connected at startup. + type: bool + aliases: [automatic] + auto_portal_startup: + description: + - Whether the target node portal should be automatically connected at startup. + type: bool + version_added: 3.2.0 + discover: + description: + - Whether the list of target nodes on the portal should be (re)discovered and added to the persistent iSCSI database. + - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup) to manual, hence combined with + O(auto_node_startup=true) always returns a changed state. + type: bool + default: false + show_nodes: + description: + - Whether the list of nodes in the persistent iSCSI database should be returned by the module. + type: bool + default: false + rescan: + description: + - Rescan an established session for discovering new targets. + - When O(target) is omitted, it rescans all sessions. + type: bool + default: false + version_added: 4.1.0 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Perform a discovery on sun.com and show available target nodes community.general.open_iscsi: - show_nodes: yes - discover: yes + show_nodes: true + discover: true portal: sun.com - name: Perform a discovery on 10.1.2.3 and show available target nodes community.general.open_iscsi: - show_nodes: yes - discover: yes + show_nodes: true + discover: true ip: 10.1.2.3 -# NOTE: Only works if exactly one target is exported to the initiator -- name: Discover targets on portal and login to the one available +- name: Discover targets on portal and login to the ones available community.general.open_iscsi: portal: '{{ iscsi_target }}' - login: yes - discover: yes + login: true + discover: true - name: Connect to the named target, after updating the local persistent database (cache) community.general.open_iscsi: - login: yes + login: true target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d - name: Disconnect from the cached named target community.general.open_iscsi: - login: no + login: false target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d - name: Override and disable automatic portal login on specific portal @@ -137,7 +139,7 @@ EXAMPLES = r''' community.general.open_iscsi: rescan: true target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d -''' +""" import glob import os @@ -219,7 +221,7 @@ def target_loggedon(module, target, portal=None, port=None): module.fail_json(cmd=cmd, rc=rc, msg=err) -def target_login(module, target, portal=None, port=None): +def target_login(module, target, check_rc, portal=None, port=None): node_auth = module.params['node_auth'] node_user = module.params['node_user'] node_pass = module.params['node_pass'] @@ -232,21 +234,22 @@ def target_login(module, target, portal=None, port=None): ('node.session.auth.password', node_pass)] for (name, value) in params: cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', name, '--value', value] - module.run_command(cmd, check_rc=True) + module.run_command(cmd, check_rc=check_rc) if node_user_in: params = [('node.session.auth.username_in', node_user_in), ('node.session.auth.password_in', node_pass_in)] for (name, value) in params: - cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value) - module.run_command(cmd, check_rc=True) + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', name, '--value', value] + module.run_command(cmd, check_rc=check_rc) cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--login'] if portal is not None and port is not None: cmd.append('--portal') cmd.append('%s:%s' % (portal, port)) - module.run_command(cmd, check_rc=True) + rc, out, err = module.run_command(cmd, check_rc=check_rc) + return rc def target_logout(module, target): @@ -331,7 +334,10 @@ def main(): ), required_together=[['node_user', 'node_pass'], ['node_user_in', 'node_pass_in']], - required_if=[('discover', True, ['portal'])], + required_if=[ + ('discover', True, ['portal']), + ('auto_node_startup', True, ['target']), + ('auto_portal_startup', True, ['target'])], supports_check_mode=True, ) @@ -361,6 +367,8 @@ def main(): # return json dict result = {'changed': False} + login_to_all_nodes = False + check_rc = True if discover: if check: @@ -377,9 +385,10 @@ def main(): if login is not None or automatic is not None: if target is None: if len(nodes) > 1: - module.fail_json(msg="Need to specify a target") - else: - target = nodes[0] + # Disable strict return code checking if there are multiple targets + # That will allow to skip target where we have no rights to login + login_to_all_nodes = True + check_rc = False else: # check given target is in cache check_target = False @@ -394,26 +403,54 @@ def main(): result['nodes'] = nodes if login is not None: - loggedon = target_loggedon(module, target, portal, port) - if (login and loggedon) or (not login and not loggedon): - result['changed'] |= False - if login: - result['devicenodes'] = target_device_node(target) - elif not check: - if login: - target_login(module, target, portal, port) - # give udev some time - time.sleep(1) - result['devicenodes'] = target_device_node(target) - else: - target_logout(module, target) - result['changed'] |= True - result['connection_changed'] = True + if login_to_all_nodes: + result['devicenodes'] = [] + for index_target in nodes: + loggedon = target_loggedon(module, index_target, portal, port) + if (login and loggedon) or (not login and not loggedon): + result['changed'] |= False + if login: + result['devicenodes'] += target_device_node(index_target) + elif not check: + if login: + login_result = target_login(module, index_target, check_rc, portal, port) + # give udev some time + time.sleep(1) + result['devicenodes'] += target_device_node(index_target) + else: + target_logout(module, index_target) + # Check if there are multiple targets on a single portal and + # do not mark the task changed if host could not login to one of them + if len(nodes) > 1 and login_result == 24: + result['changed'] |= False + result['connection_changed'] = False + else: + result['changed'] |= True + result['connection_changed'] = True + else: + result['changed'] |= True + result['connection_changed'] = True else: - result['changed'] |= True - result['connection_changed'] = True + loggedon = target_loggedon(module, target, portal, port) + if (login and loggedon) or (not login and not loggedon): + result['changed'] |= False + if login: + result['devicenodes'] = target_device_node(target) + elif not check: + if login: + target_login(module, target, portal, port) + # give udev some time + time.sleep(1) + result['devicenodes'] = target_device_node(target) + else: + target_logout(module, target) + result['changed'] |= True + result['connection_changed'] = True + else: + result['changed'] |= True + result['connection_changed'] = True - if automatic is not None: + if automatic is not None and not login_to_all_nodes: isauto = target_isauto(module, target) if (automatic and isauto) or (not automatic and not isauto): result['changed'] |= False @@ -429,7 +466,7 @@ def main(): result['changed'] |= True result['automatic_changed'] = True - if automatic_portal is not None: + if automatic_portal is not None and not login_to_all_nodes: isauto = target_isauto(module, target, portal, port) if (automatic_portal and isauto) or (not automatic_portal and not isauto): result['changed'] |= False diff --git a/plugins/modules/packaging/os/openbsd_pkg.py b/plugins/modules/openbsd_pkg.py similarity index 81% rename from plugins/modules/packaging/os/openbsd_pkg.py rename to plugins/modules/openbsd_pkg.py index 6943569f8d..8d199a9da4 100644 --- a/plugins/modules/packaging/os/openbsd_pkg.py +++ b/plugins/modules/openbsd_pkg.py @@ -1,80 +1,87 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, Patrik Lundin +# Copyright (c) 2013, Patrik Lundin # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: openbsd_pkg author: -- Patrik Lundin (@eest) + - Patrik Lundin (@eest) short_description: Manage packages on OpenBSD description: - - Manage packages on OpenBSD using the pkg tools. -requirements: -- python >= 2.5 + - Manage packages on OpenBSD using the pkg tools. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + version_added: 9.1.0 + details: + - Only works when check mode is not enabled. options: - name: - description: - - A name or a list of names of the packages. - required: yes - type: list - elements: str - state: - description: - - C(present) will make sure the package is installed. - C(latest) will make sure the latest version of the package is installed. - C(absent) will make sure the specified package is not installed. - choices: [ absent, latest, present, installed, removed ] - default: present - type: str - build: - description: - - Build the package from source instead of downloading and installing - a binary. Requires that the port source tree is already installed. - Automatically builds and installs the 'sqlports' package, if it is - not already installed. - - Mutually exclusive with I(snapshot). - type: bool - default: no - snapshot: - description: - - Force C(%c) and C(%m) to expand to C(snapshots), even on a release kernel. - - Mutually exclusive with I(build). - type: bool - default: no - version_added: 1.3.0 - ports_dir: - description: - - When used in combination with the C(build) option, allows overriding - the default ports source directory. - default: /usr/ports - type: path - clean: - description: - - When updating or removing packages, delete the extra configuration - file(s) in the old packages which are annotated with @extra in - the packaging-list. - type: bool - default: no - quick: - description: - - Replace or delete packages quickly; do not bother with checksums - before removing normal files. - type: bool - default: no + name: + description: + - A name or a list of names of the packages. + required: true + type: list + elements: str + state: + description: + - V(present) ensures the package is installed. + - V(latest) ensures the latest version of the package is installed. + - V(absent) ensures the specified package is not installed. + choices: [absent, latest, present, installed, removed] + default: present + type: str + build: + description: + - Build the package from source instead of downloading and installing a binary. Requires that the port source tree is + already installed. Automatically builds and installs the C(sqlports) package, if it is not already installed. + - Mutually exclusive with O(snapshot). + type: bool + default: false + snapshot: + description: + - Force C(%c) and C(%m) to expand to C(snapshots), even on a release kernel. + - Mutually exclusive with O(build). + type: bool + default: false + version_added: 1.3.0 + ports_dir: + description: + - When used in combination with the O(build) option, allows overriding the default ports source directory. + default: /usr/ports + type: path + clean: + description: + - When updating or removing packages, delete the extra configuration file(s) in the old packages which are annotated + with C(@extra) in the packaging-list. + type: bool + default: false + quick: + description: + - Replace or delete packages quickly; do not bother with checksums before removing normal files. + type: bool + default: false + autoremove: + description: + - Calls C(pkg_delete -a) to remove automatically installed packages which are no longer needed. + type: bool + default: false + version_added: 11.3.0 notes: - - When used with a `loop:` each package will be processed individually, - it is much more efficient to pass the list directly to the `name` option. -''' + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Make sure nmap is installed community.general.openbsd_pkg: name: nmap @@ -94,7 +101,7 @@ EXAMPLES = ''' community.general.openbsd_pkg: name: nmap state: present - build: yes + build: true - name: Specify a pkg flavour with '--' community.general.openbsd_pkg: @@ -116,18 +123,28 @@ EXAMPLES = ''' name: '*' state: latest -- name: Purge a package and it's configuration files +- name: Purge a package and its configuration files community.general.openbsd_pkg: name: mpd - clean: yes + clean: true state: absent - name: Quickly remove a package without checking checksums community.general.openbsd_pkg: name: qt5 - quick: yes + quick: true state: absent -''' + +- name: Install packages, remove unused dependencies + community.general.openbsd_pkg: + name: ["tree", "mtr"] + autoremove: true + +- name: Remove all unused dependencies + community.general.openbsd_pkg: + name: '*' + autoremove: true +""" import os import platform @@ -146,7 +163,25 @@ def execute_command(cmd, module): # This makes run_command() use shell=False which we need to not cause shell # expansion of special characters like '*'. cmd_args = shlex.split(cmd) - return module.run_command(cmd_args) + + # We set TERM to 'dumb' to keep pkg_add happy if the machine running + # ansible is using a TERM that the managed machine does not know about, + # e.g.: "No progress meter: failed termcap lookup on xterm-kitty". + return module.run_command(cmd_args, environ_update={'TERM': 'dumb'}) + + +def get_all_installed(module): + """ + Get all installed packaged. Used to support diff mode + """ + command = 'pkg_info -Iq' + + rc, stdout, stderr = execute_command(command, module) + + if stderr: + module.fail_json(msg="failed in get_all_installed(): %s" % stderr) + + return stdout # Function used to find out if a package is currently installed. @@ -159,7 +194,11 @@ def get_package_state(names, pkg_spec, module): rc, stdout, stderr = execute_command(command, module) if stderr: - module.fail_json(msg="failed in get_package_state(): " + stderr) + match = re.search(r"^Can't find inst:%s$" % re.escape(name), stderr) + if match: + pkg_spec[name]['installed_state'] = False + else: + module.fail_json(msg="failed in get_package_state(): " + stderr) if stdout: # If the requested package name is just a stem, like "python", we may @@ -358,6 +397,30 @@ def package_absent(names, pkg_spec, module): pkg_spec[name]['changed'] = False +# Function used to remove unused dependencies. +def package_rm_unused_deps(pkg_spec, module): + rm_unused_deps_cmd = 'pkg_delete -Ia' + + if module.check_mode: + rm_unused_deps_cmd += 'n' + + if module.params['clean']: + rm_unused_deps_cmd += 'c' + + if module.params['quick']: + rm_unused_deps_cmd += 'q' + + # If we run the commands, we set changed to true to let + # the package list change detection code do the actual work. + + # Create a minimal pkg_spec entry for '*' to store return values. + pkg_spec['*'] = {} + + # Attempt to remove unused dependencies. + pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command(rm_unused_deps_cmd, module) + pkg_spec['*']['changed'] = True + + # Function used to parse the package name based on packages-specs(7). # The general name structure is "stem-version[-flavors]". # @@ -511,7 +574,7 @@ def upgrade_packages(pkg_spec, module): pkg_spec['*'] = {} # Attempt to upgrade all packages. - pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command("%s" % upgrade_cmd, module) + pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command(upgrade_cmd, module) # Try to find any occurrence of a package changing version like: # "bzip2-1.0.6->1.0.6p0: ok". @@ -542,6 +605,7 @@ def main(): ports_dir=dict(type='path', default='/usr/ports'), quick=dict(type='bool', default=False), clean=dict(type='bool', default=False), + autoremove=dict(type='bool', default=False), ), mutually_exclusive=[['snapshot', 'build']], supports_check_mode=True @@ -552,17 +616,17 @@ def main(): build = module.params['build'] ports_dir = module.params['ports_dir'] - rc = 0 - stdout = '' - stderr = '' result = {} result['name'] = name result['state'] = state result['build'] = build + result['diff'] = {} # The data structure used to keep track of package information. pkg_spec = {} + new_package_list = original_package_list = get_all_installed(module) + if build is True: if not os.path.isdir(ports_dir): module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir)) @@ -583,11 +647,16 @@ def main(): asterisk_name = True if asterisk_name: - if state != 'latest': - module.fail_json(msg="the package name '*' is only valid when using state=latest") - else: + if state != 'latest' and not module.params['autoremove']: + module.fail_json(msg="the package name '*' is only valid when using state=latest or autoremove=true") + + if state == 'latest': # Perform an upgrade of all installed packages. upgrade_packages(pkg_spec, module) + + if module.params['autoremove']: + # Remove unused dependencies. + package_rm_unused_deps(pkg_spec, module) else: # Parse package names and put results in the pkg_spec dictionary. parse_package_name(name, pkg_spec, module) @@ -609,6 +678,10 @@ def main(): elif state == 'latest': package_latest(name, pkg_spec, module) + # Handle autoremove if requested for non-asterisk packages + if module.params['autoremove']: + package_rm_unused_deps(pkg_spec, module) + # The combined changed status for all requested packages. If anything # is changed this is set to True. combined_changed = False @@ -647,6 +720,11 @@ def main(): result['changed'] = combined_changed + if not module.check_mode: + new_package_list = get_all_installed(module) + result['diff'] = dict(before=original_package_list, after=new_package_list) + result['changed'] = (result['diff']['before'] != result['diff']['after']) + module.exit_json(**result) diff --git a/plugins/modules/identity/opendj/opendj_backendprop.py b/plugins/modules/opendj_backendprop.py similarity index 60% rename from plugins/modules/identity/opendj/opendj_backendprop.py rename to plugins/modules/opendj_backendprop.py index be118a505d..4f0940d391 100644 --- a/plugins/modules/identity/opendj/opendj_backendprop.py +++ b/plugins/modules/opendj_backendprop.py @@ -1,93 +1,98 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl) -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: opendj_backendprop -short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command. +short_description: Update the backend configuration of OpenDJ using the dsconfig set-backend-prop command description: - - This module will update settings for OpenDJ with the command set-backend-prop. - - It will check first via de get-backend-prop if configuration needs to be applied. + - This module updates settings for OpenDJ with the command C(set-backend-prop). + - It checks first using C(get-backend-prop) if configuration needs to be applied. author: - - Werner Dijkerman (@dj-wasabi) + - Werner Dijkerman (@dj-wasabi) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - opendj_bindir: - description: - - The path to the bin directory of OpenDJ. - required: false - default: /opt/opendj/bin - type: path - hostname: - description: - - The hostname of the OpenDJ server. - required: true - type: str - port: - description: - - The Admin port on which the OpenDJ instance is available. - required: true - type: str - username: - description: - - The username to connect to. - required: false - default: cn=Directory Manager - type: str - password: - description: - - The password for the cn=Directory Manager user. - - Either password or passwordfile is needed. - required: false - type: str - passwordfile: - description: - - Location to the password file which holds the password for the cn=Directory Manager user. - - Either password or passwordfile is needed. - required: false - type: path - backend: - description: - - The name of the backend on which the property needs to be updated. - required: true - type: str - name: - description: - - The configuration setting to update. - required: true - type: str - value: - description: - - The value for the configuration item. - required: true - type: str - state: - description: - - If configuration needs to be added/updated - required: false - default: "present" - type: str -''' + opendj_bindir: + description: + - The path to the bin directory of OpenDJ. + required: false + default: /opt/opendj/bin + type: path + hostname: + description: + - The hostname of the OpenDJ server. + required: true + type: str + port: + description: + - The Admin port on which the OpenDJ instance is available. + required: true + type: str + username: + description: + - The username to connect to. + required: false + default: cn=Directory Manager + type: str + password: + description: + - The password for the C(cn=Directory Manager) user. + - Either password or passwordfile is needed. + required: false + type: str + passwordfile: + description: + - Location to the password file which holds the password for the C(cn=Directory Manager) user. + - Either password or passwordfile is needed. + required: false + type: path + backend: + description: + - The name of the backend on which the property needs to be updated. + required: true + type: str + name: + description: + - The configuration setting to update. + required: true + type: str + value: + description: + - The value for the configuration item. + required: true + type: str + state: + description: + - If configuration needs to be added/updated. + required: false + default: "present" + type: str +""" -EXAMPLES = ''' - - name: Add or update OpenDJ backend properties - action: opendj_backendprop - hostname=localhost - port=4444 - username="cn=Directory Manager" - password=password - backend=userRoot - name=index-entry-limit - value=5000 -''' +EXAMPLES = r""" +- name: Add or update OpenDJ backend properties + opendj_backendprop: + hostname: localhost + port: 4444 + username: "cn=Directory Manager" + password: password + backend: userRoot + name: index-entry-limit + value: 5000 +""" -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule @@ -146,9 +151,9 @@ def main(): opendj_bindir=dict(default="/opt/opendj/bin", type="path"), hostname=dict(required=True), port=dict(required=True), - username=dict(default="cn=Directory Manager", required=False), - password=dict(required=False, no_log=True), - passwordfile=dict(required=False, type="path"), + username=dict(default="cn=Directory Manager"), + password=dict(no_log=True), + passwordfile=dict(type="path"), backend=dict(required=True), name=dict(required=True), value=dict(required=True), diff --git a/plugins/modules/system/openwrt_init.py b/plugins/modules/openwrt_init.py similarity index 74% rename from plugins/modules/system/openwrt_init.py rename to plugins/modules/openwrt_init.py index fa9488ecb2..abee16bbf3 100644 --- a/plugins/modules/system/openwrt_init.py +++ b/plugins/modules/openwrt_init.py @@ -1,50 +1,55 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2016, Andrew Gaffney -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Andrew Gaffney +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: openwrt_init author: - - "Andrew Gaffney (@agaffney)" -short_description: Manage services on OpenWrt. + - "Andrew Gaffney (@agaffney)" +short_description: Manage services on OpenWrt description: - - Controls OpenWrt services on remote hosts. + - Controls OpenWrt services on remote hosts. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - description: - - Name of the service. - required: true - aliases: ['service'] - state: - type: str - description: - - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary. - C(restarted) will always bounce the service. C(reloaded) will always reload. - choices: [ 'started', 'stopped', 'restarted', 'reloaded' ] - enabled: - description: - - Whether the service should start on boot. B(At least one of state and enabled are required.) - type: bool - pattern: - type: str - description: - - If the service does not respond to the 'running' command, name a - substring to look for as would be found in the output of the I(ps) - command as a stand-in for a 'running' result. If the string is found, - the service will be assumed to be running. + name: + type: str + description: + - Name of the service. + required: true + aliases: ['service'] + state: + type: str + description: + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. + - V(restarted) always bounces the service. + - V(reloaded) always reloads. + choices: ['started', 'stopped', 'restarted', 'reloaded'] + enabled: + description: + - Whether the service should start on boot. B(At least one) of O(state) and O(enabled) are required. + type: bool + pattern: + type: str + description: + - If the service does not respond to the C(running) command, name a substring to look for as would be found in the output + of the C(ps) command as a stand-in for a C(running) result. If the string is found, the service is assumed to be running. notes: - - One option other than name is required. + - One option other than O(name) is required. requirements: - - An OpenWrt system (with python) -''' + - An OpenWrt system (with python) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Start service httpd, if not running community.general.openwrt_init: state: started @@ -63,11 +68,11 @@ EXAMPLES = ''' - name: Enable service httpd community.general.openwrt_init: name: httpd - enabled: yes -''' + enabled: true +""" -RETURN = ''' -''' +RETURN = r""" +""" import os from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/opkg.py b/plugins/modules/opkg.py new file mode 100644 index 0000000000..a65c00193d --- /dev/null +++ b/plugins/modules/opkg.py @@ -0,0 +1,223 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Patrick Pelletier +# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: opkg +author: "Patrick Pelletier (@skinp)" +short_description: Package manager for OpenWrt and Openembedded/Yocto based Linux distributions +description: + - Manages ipk packages for OpenWrt and Openembedded/Yocto based Linux distributions. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of package(s) to install/remove. + - V(NAME=VERSION) syntax is also supported to install a package in a certain version. See the examples. This only works + on Yocto based Linux distributions (opkg>=0.3.2) and not for OpenWrt. This is supported since community.general 6.2.0. + aliases: [pkg] + required: true + type: list + elements: str + state: + description: + - State of the package. + choices: ['present', 'absent', 'installed', 'removed'] + default: present + type: str + force: + description: + - The C(opkg --force) parameter used. + choices: + - "depends" + - "maintainer" + - "reinstall" + - "overwrite" + - "downgrade" + - "space" + - "postinstall" + - "remove" + - "checksum" + - "removal-of-dependent-packages" + type: str + update_cache: + description: + - Update the package DB first. + default: false + type: bool + executable: + description: + - The executable location for C(opkg). + type: path + version_added: 7.2.0 +requirements: + - opkg + - python +""" + +EXAMPLES = r""" +- name: Install foo + community.general.opkg: + name: foo + state: present + +- name: Install foo in version 1.2 (opkg>=0.3.2 on Yocto based Linux distributions) + community.general.opkg: + name: foo=1.2 + state: present + +- name: Update cache and install foo + community.general.opkg: + name: foo + state: present + update_cache: true + +- name: Remove foo + community.general.opkg: + name: foo + state: absent + +- name: Remove foo and bar + community.general.opkg: + name: + - foo + - bar + state: absent + +- name: Install foo using overwrite option forcibly + community.general.opkg: + name: foo + state: present + force: overwrite +""" + +RETURN = r""" +version: + description: Version of opkg. + type: str + returned: always + sample: "2.80.0" + version_added: 10.0.0 +""" + +import os +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper + + +class Opkg(StateModuleHelper): + module = dict( + argument_spec=dict( + name=dict(aliases=["pkg"], required=True, type="list", elements="str"), + state=dict(default="present", choices=["present", "installed", "absent", "removed"]), + force=dict(choices=["depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", + "postinstall", "remove", "checksum", "removal-of-dependent-packages"]), + update_cache=dict(default=False, type='bool'), + executable=dict(type="path"), + ), + ) + + def __init_module__(self): + self.vars.set("install_c", 0, output=False, change=True) + self.vars.set("remove_c", 0, output=False, change=True) + + state_map = dict( + query="list-installed", + present="install", + installed="install", + absent="remove", + removed="remove", + ) + + dir, cmd = os.path.split(self.vars.executable) if self.vars.executable else (None, "opkg") + + self.runner = CmdRunner( + self.module, + command=cmd, + arg_formats=dict( + package=cmd_runner_fmt.as_list(), + state=cmd_runner_fmt.as_map(state_map), + force=cmd_runner_fmt.as_optval("--force-"), + update_cache=cmd_runner_fmt.as_bool("update"), + version=cmd_runner_fmt.as_fixed("--version"), + ), + path_prefix=dir, + ) + + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip().replace("opkg version ", "") + + if self.vars.update_cache: + rc, dummy, dummy = self.runner("update_cache").run() + if rc != 0: + self.do_raise("could not update package db") + + @staticmethod + def split_name_and_version(package): + """ Split the name and the version when using the NAME=VERSION syntax """ + splitted = package.split('=', 1) + if len(splitted) == 1: + return splitted[0], None + else: + return splitted[0], splitted[1] + + def _package_in_desired_state(self, name, want_installed, version=None): + dummy, out, dummy = self.runner("state package").run(state="query", package=name) + + has_package = out.startswith(name + " - %s" % ("" if not version else (version + " "))) + return want_installed == has_package + + def state_present(self): + with self.runner("state force package") as ctx: + for package in self.vars.name: + pkg_name, pkg_version = self.split_name_and_version(package) + if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version) or self.vars.force == "reinstall": + ctx.run(package=package) + self.vars.set("run_info", ctx.run_info, verbosity=4) + if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version): + self.do_raise("failed to install %s" % package) + self.vars.install_c += 1 + if self.vars.install_c > 0: + self.vars.msg = "installed %s package(s)" % self.vars.install_c + else: + self.vars.msg = "package(s) already present" + + def state_absent(self): + with self.runner("state force package") as ctx: + for package in self.vars.name: + package, dummy = self.split_name_and_version(package) + if not self._package_in_desired_state(package, want_installed=False): + ctx.run(package=package) + self.vars.set("run_info", ctx.run_info, verbosity=4) + if not self._package_in_desired_state(package, want_installed=False): + self.do_raise("failed to remove %s" % package) + self.vars.remove_c += 1 + if self.vars.remove_c > 0: + self.vars.msg = "removed %s package(s)" % self.vars.remove_c + else: + self.vars.msg = "package(s) already absent" + + state_installed = state_present + state_removed = state_absent + + +def main(): + Opkg.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/osx_defaults.py b/plugins/modules/osx_defaults.py similarity index 78% rename from plugins/modules/system/osx_defaults.py rename to plugins/modules/osx_defaults.py index 45179dc7d2..f694dbaad2 100644 --- a/plugins/modules/system/osx_defaults.py +++ b/plugins/modules/osx_defaults.py @@ -1,26 +1,31 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2014, GeekChimp - Franck Nijhof (DO NOT CONTACT!) -# Copyright: (c) 2019, Ansible project -# Copyright: (c) 2019, Abhijeet Kasurde -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, GeekChimp - Franck Nijhof (DO NOT CONTACT!) +# Copyright (c) 2019, Ansible project +# Copyright (c) 2019, Abhijeet Kasurde +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: osx_defaults author: # DO NOT RE-ADD GITHUB HANDLE! -- Franck Nijhof (!UNKNOWN) + - Franck Nijhof (!UNKNOWN) short_description: Manage macOS user defaults description: - - osx_defaults allows users to read, write, and delete macOS user defaults from Ansible scripts. - - macOS applications and other programs use the defaults system to record user preferences and other - information that must be maintained when the applications are not running (such as default font for new - documents, or the position of an Info panel). + - This module allows users to read, write, and delete macOS user defaults from Ansible scripts. + - MacOS applications and other programs use the defaults system to record user preferences and other information that must + be maintained when the applications are not running (such as default font for new documents, or the position of an Info + panel). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: domain: description: @@ -30,7 +35,7 @@ options: host: description: - The host on which the preference should apply. - - The special value C(currentHost) corresponds to the C(-currentHost) switch of the defaults commandline tool. + - The special value V(currentHost) corresponds to the C(-currentHost) switch of the defaults commandline tool. type: str key: description: @@ -40,25 +45,31 @@ options: description: - The type of value to write. type: str - choices: [ array, bool, boolean, date, float, int, integer, string ] + choices: [array, bool, boolean, date, float, int, integer, string] default: string + check_type: + description: + - Checks if the type of the provided O(value) matches the type of an existing default. + - If the types do not match, raises an error. + type: bool + default: true + version_added: 8.6.0 array_add: description: - Add new elements to the array for a key which has an array as its value. type: bool - default: no + default: false value: description: - The value to write. - - Only required when C(state=present). + - Only required when O(state=present). type: raw state: description: - The state of the user defaults. - - If set to C(list) will query the given parameter specified by C(key). Returns 'null' is nothing found or mis-spelled. - - C(list) added in version 2.8. + - If set to V(list) it queries the given parameter specified by O(key). Returns V(null) is nothing found or misspelled. type: str - choices: [ absent, list, present ] + choices: [absent, list, present] default: present path: description: @@ -66,63 +77,67 @@ options: type: str default: /usr/bin:/usr/local/bin notes: - - Apple Mac caches defaults. You may need to logout and login to apply the changes. -''' + - Apple Mac caches defaults. You may need to logout and login to apply the changes. +""" -EXAMPLES = r''' -# TODO: Describe what happens in each example - -- community.general.osx_defaults: +EXAMPLES = r""" +- name: Set boolean valued key for application domain + community.general.osx_defaults: domain: com.apple.Safari key: IncludeInternalDebugMenu type: bool value: true state: present -- community.general.osx_defaults: +- name: Set string valued key for global domain + community.general.osx_defaults: domain: NSGlobalDomain key: AppleMeasurementUnits type: string value: Centimeters state: present -- community.general.osx_defaults: +- name: Set int valued key for arbitrary plist + community.general.osx_defaults: domain: /Library/Preferences/com.apple.SoftwareUpdate key: AutomaticCheckEnabled type: int value: 1 - become: yes + become: true -- community.general.osx_defaults: +- name: Set int valued key only for the current host + community.general.osx_defaults: domain: com.apple.screensaver host: currentHost key: showClock type: int value: 1 -- community.general.osx_defaults: +- name: Defaults to global domain and setting value + community.general.osx_defaults: key: AppleMeasurementUnits type: string value: Centimeters -- community.general.osx_defaults: +- name: Setting an array valued key + community.general.osx_defaults: key: AppleLanguages type: array value: - en - nl -- community.general.osx_defaults: +- name: Removing a key + community.general.osx_defaults: domain: com.geekchimp.macable key: ExampleKeyToRemove state: absent -''' +""" from datetime import datetime import re from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import binary_type, text_type # exceptions --------------------------------------------------------------- {{{ @@ -146,6 +161,7 @@ class OSXDefaults(object): self.domain = module.params['domain'] self.host = module.params['host'] self.key = module.params['key'] + self.check_type = module.params['check_type'] self.type = module.params['type'] self.array_add = module.params['array_add'] self.value = module.params['value'] @@ -172,7 +188,7 @@ class OSXDefaults(object): @staticmethod def is_int(value): as_str = str(value) - if (as_str.startswith("-")): + if as_str.startswith("-"): return as_str[1:].isdigit() else: return as_str.isdigit() @@ -183,7 +199,7 @@ class OSXDefaults(object): if data_type == "string": return str(value) elif data_type in ["bool", "boolean"]: - if isinstance(value, (binary_type, text_type)): + if isinstance(value, (bytes, str)): value = value.lower() if value in [True, 1, "true", "1", "yes"]: return True @@ -256,7 +272,7 @@ class OSXDefaults(object): # If the RC is not 0, then terrible happened! Ooooh nooo! if rc != 0: - raise OSXDefaultsException("An error occurred while reading key type from defaults: %s" % out) + raise OSXDefaultsException("An error occurred while reading key type from defaults: %s" % err) # Ok, lets parse the type from output data_type = out.strip().replace('Type is ', '') @@ -267,9 +283,9 @@ class OSXDefaults(object): # Strip output out = out.strip() - # An non zero RC at this point is kinda strange... + # A non zero RC at this point is kinda strange... if rc != 0: - raise OSXDefaultsException("An error occurred while reading key value from defaults: %s" % out) + raise OSXDefaultsException("An error occurred while reading key value from defaults: %s" % err) # Convert string to list when type is array if data_type == "array": @@ -303,16 +319,17 @@ class OSXDefaults(object): if not isinstance(value, list): value = [value] - rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value) + rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value, + expand_user_and_vars=False) if rc != 0: - raise OSXDefaultsException('An error occurred while writing value to defaults: %s' % out) + raise OSXDefaultsException('An error occurred while writing value to defaults: %s' % err) def delete(self): """ Deletes defaults key from domain """ rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key]) if rc != 0: - raise OSXDefaultsException("An error occurred while deleting key from defaults: %s" % out) + raise OSXDefaultsException("An error occurred while deleting key from defaults: %s" % err) # /commands ----------------------------------------------------------- }}} @@ -336,10 +353,11 @@ class OSXDefaults(object): self.delete() return True - # There is a type mismatch! Given type does not match the type in defaults - value_type = type(self.value) - if self.current_value is not None and not isinstance(self.current_value, value_type): - raise OSXDefaultsException("Type mismatch. Type in defaults: %s" % type(self.current_value).__name__) + # Check if there is a type mismatch, e.g. given type does not match the type in defaults + if self.check_type: + value_type = type(self.value) + if self.current_value is not None and not isinstance(self.current_value, value_type): + raise OSXDefaultsException("Type mismatch. Type in defaults: %s" % type(self.current_value).__name__) # Current value matches the given value. Nothing need to be done. Arrays need extra care if self.type == "array" and self.current_value is not None and not self.array_add and \ @@ -370,6 +388,7 @@ def main(): domain=dict(type='str', default='NSGlobalDomain'), host=dict(type='str'), key=dict(type='str', no_log=False), + check_type=dict(type='bool', default=True), type=dict(type='str', default='string', choices=['array', 'bool', 'boolean', 'date', 'float', 'int', 'integer', 'string']), array_add=dict(type='bool', default=False), value=dict(type='raw'), diff --git a/plugins/modules/cloud/ovh/ovh_ip_failover.py b/plugins/modules/ovh_ip_failover.py similarity index 68% rename from plugins/modules/cloud/ovh/ovh_ip_failover.py rename to plugins/modules/ovh_ip_failover.py index 26179eb8f7..a32db78451 100644 --- a/plugins/modules/cloud/ovh/ovh_ip_failover.py +++ b/plugins/modules/ovh_ip_failover.py @@ -1,88 +1,86 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ovh_ip_failover short_description: Manage OVH IP failover address description: - - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move - an ip failover (or failover block) between services + - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move an IP + failover (or failover block) between services. author: "Pascal HERAUD (@pascalheraud)" notes: - - Uses the python OVH Api U(https://github.com/ovh/python-ovh). - You have to create an application (a key and secret) with a consummer - key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/) + - Uses the Python OVH API U(https://github.com/ovh/python-ovh). You have to create an application (a key and secret) with + a consumer key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/). requirements: - - ovh >= 0.4.8 + - ovh >= 0.4.8 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - required: true - description: - - The IP address to manage (can be a single IP like 1.1.1.1 - or a block like 1.1.1.1/28 ) - type: str - service: - required: true - description: - - The name of the OVH service this IP address should be routed - type: str - endpoint: - required: true - description: - - The endpoint to use ( for instance ovh-eu) - type: str - wait_completion: - required: false - default: true - type: bool - description: - - If true, the module will wait for the IP address to be moved. - If false, exit without waiting. The taskId will be returned - in module output - wait_task_completion: - required: false - default: 0 - description: - - If not 0, the module will wait for this task id to be - completed. Use wait_task_completion if you want to wait for - completion of a previously executed task with - wait_completion=false. You can execute this module repeatedly on - a list of failover IPs using wait_completion=false (see examples) - type: int - application_key: - required: true - description: - - The applicationKey to use - type: str - application_secret: - required: true - description: - - The application secret to use - type: str - consumer_key: - required: true - description: - - The consumer key to use - type: str - timeout: - required: false - default: 120 - description: - - The timeout in seconds used to wait for a task to be - completed. Default is 120 seconds. - type: int + name: + required: true + description: + - The IP address to manage (can be a single IP like V(1.1.1.1) or a block like V(1.1.1.1/28)). + type: str + service: + required: true + description: + - The name of the OVH service this IP address should be routed. + type: str + endpoint: + required: true + description: + - The endpoint to use (for instance V(ovh-eu)). + type: str + wait_completion: + required: false + default: true + type: bool + description: + - If V(true), the module waits for the IP address to be moved. If false, exit without waiting. The C(taskId) is returned + in module output. + wait_task_completion: + required: false + default: 0 + description: + - If not V(0), the module waits for this task ID to be completed. Use O(wait_task_completion) if you want to wait for + completion of a previously executed task with O(wait_completion=false). You can execute this module repeatedly on + a list of failover IPs using O(wait_completion=false) (see examples). + type: int + application_key: + required: true + description: + - The applicationKey to use. + type: str + application_secret: + required: true + description: + - The application secret to use. + type: str + consumer_key: + required: true + description: + - The consumer key to use. + type: str + timeout: + required: false + default: 120 + description: + - The timeout in seconds used to wait for a task to be completed. Default is 120 seconds. + type: int +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Route an IP address 1.1.1.1 to the service ns666.ovh.net - community.general.ovh_ip_failover: name: 1.1.1.1 @@ -108,12 +106,13 @@ EXAMPLES = ''' application_key: yourkey application_secret: yoursecret consumer_key: yourconsumerkey -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import time +from urllib.parse import quote_plus try: import ovh @@ -124,7 +123,6 @@ except ImportError: HAS_OVH = False from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import quote_plus def getOvhClient(ansibleModule): @@ -159,7 +157,7 @@ def waitForTaskDone(client, name, taskId, timeout): task = client.get('/ip/{0}/task/{1}'.format(quote_plus(name), taskId)) if task['status'] == 'done': return True - time.sleep(5) # Delay for 5 sec because it's long to wait completion, do not harass the API + time.sleep(5) # Delay for 5 sec to not harass the API currentTimeout -= 5 if currentTimeout < 0: return False diff --git a/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py b/plugins/modules/ovh_ip_loadbalancing_backend.py similarity index 80% rename from plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py rename to plugins/modules/ovh_ip_loadbalancing_backend.py index 28d6f3a129..2c786022ba 100644 --- a/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py +++ b/plugins/modules/ovh_ip_loadbalancing_backend.py @@ -1,85 +1,86 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ovh_ip_loadbalancing_backend short_description: Manage OVH IP LoadBalancing backends description: - - Manage OVH (French European hosting provider) LoadBalancing IP backends + - Manage OVH (French European hosting provider) LoadBalancing IP backends. author: Pascal Heraud (@pascalheraud) notes: - - Uses the python OVH Api U(https://github.com/ovh/python-ovh). - You have to create an application (a key and secret) with a consumer - key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/) + - Uses the Python OVH API U(https://github.com/ovh/python-ovh). You have to create an application (a key and secret) with + a consumer key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/). requirements: - - ovh > 0.3.5 + - ovh > 0.3.5 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - name: - required: true - description: - - Name of the LoadBalancing internal name (ip-X.X.X.X) - type: str - backend: - required: true - description: - - The IP address of the backend to update / modify / delete - type: str - state: - default: present - choices: ['present', 'absent'] - description: - - Determines whether the backend is to be created/modified - or deleted - type: str - probe: - default: 'none' - choices: ['none', 'http', 'icmp' , 'oco'] - description: - - Determines the type of probe to use for this backend - type: str - weight: - default: 8 - description: - - Determines the weight for this backend - type: int - endpoint: - required: true - description: - - The endpoint to use ( for instance ovh-eu) - type: str - application_key: - required: true - description: - - The applicationKey to use - type: str - application_secret: - required: true - description: - - The application secret to use - type: str - consumer_key: - required: true - description: - - The consumer key to use - type: str - timeout: - default: 120 - description: - - The timeout in seconds used to wait for a task to be - completed. - type: int + name: + required: true + description: + - Name of the LoadBalancing internal name (V(ip-X.X.X.X)). + type: str + backend: + required: true + description: + - The IP address of the backend to update / modify / delete. + type: str + state: + default: present + choices: ['present', 'absent'] + description: + - Determines whether the backend is to be created/modified or deleted. + type: str + probe: + default: 'none' + choices: ['none', 'http', 'icmp', 'oco'] + description: + - Determines the type of probe to use for this backend. + type: str + weight: + default: 8 + description: + - Determines the weight for this backend. + type: int + endpoint: + required: true + description: + - The endpoint to use (for instance V(ovh-eu)). + type: str + application_key: + required: true + description: + - The applicationKey to use. + type: str + application_secret: + required: true + description: + - The application secret to use. + type: str + consumer_key: + required: true + description: + - The consumer key to use. + type: str + timeout: + default: 120 + description: + - The timeout in seconds used to wait for a task to be completed. + type: int +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Adds or modify the backend '212.1.1.1' to a loadbalancing 'ip-1.1.1.1' ovh_ip_loadbalancing: name: ip-1.1.1.1 @@ -101,10 +102,10 @@ EXAMPLES = ''' application_key: yourkey application_secret: yoursecret consumer_key: yourconsumerkey -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import time @@ -241,7 +242,7 @@ def main(): 'parameters. Error returned by OVH api was : {0}' .format(apiError)) - if (backendProperties['weight'] != weight): + if backendProperties['weight'] != weight: # Change weight try: client.post( @@ -260,7 +261,7 @@ def main(): .format(apiError)) moduleChanged = True - if (backendProperties['probe'] != probe): + if backendProperties['probe'] != probe: # Change probe backendProperties['probe'] = probe try: diff --git a/plugins/modules/cloud/ovh/ovh_monthly_billing.py b/plugins/modules/ovh_monthly_billing.py similarity index 70% rename from plugins/modules/cloud/ovh/ovh_monthly_billing.py rename to plugins/modules/ovh_monthly_billing.py index 75c70a79ec..e297e8979d 100644 --- a/plugins/modules/cloud/ovh/ovh_monthly_billing.py +++ b/plugins/modules/ovh_monthly_billing.py @@ -1,52 +1,57 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Francois Lallart (@fraff) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Francois Lallart (@fraff) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ovh_monthly_billing author: Francois Lallart (@fraff) version_added: '0.2.0' short_description: Manage OVH monthly billing description: - - Enable monthly billing on OVH cloud intances (be aware OVH does not allow to disable it). -requirements: [ "ovh" ] + - Enable monthly billing on OVH cloud instances (be aware OVH does not allow to disable it). +requirements: ["ovh"] +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - project_id: - required: true - type: str - description: - - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET) - instance_id: - required: true - type: str - description: - - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET) - endpoint: - type: str - description: - - The endpoint to use (for instance ovh-eu) - application_key: - type: str - description: - - The applicationKey to use - application_secret: - type: str - description: - - The application secret to use - consumer_key: - type: str - description: - - The consumer key to use -''' + project_id: + required: true + type: str + description: + - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET). + instance_id: + required: true + type: str + description: + - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET). + endpoint: + type: str + description: + - The endpoint to use (for instance V(ovh-eu)). + application_key: + type: str + description: + - The applicationKey to use. + application_secret: + type: str + description: + - The application secret to use. + consumer_key: + type: str + description: + - The consumer key to use. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Basic usage, using auth from /etc/ovh.conf community.general.ovh_monthly_billing: project_id: 0c727a20aa144485b70c44dee9123b46 @@ -67,13 +72,11 @@ EXAMPLES = ''' application_key: yourkey application_secret: yoursecret consumer_key: yourconsumerkey -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" -import os -import sys import traceback try: @@ -93,10 +96,10 @@ def main(): argument_spec=dict( project_id=dict(required=True), instance_id=dict(required=True), - endpoint=dict(required=False), - application_key=dict(required=False, no_log=True), - application_secret=dict(required=False, no_log=True), - consumer_key=dict(required=False, no_log=True), + endpoint=dict(), + application_key=dict(no_log=True), + application_secret=dict(no_log=True), + consumer_key=dict(no_log=True), ), supports_check_mode=True ) diff --git a/plugins/modules/pacemaker_cluster.py b/plugins/modules/pacemaker_cluster.py new file mode 100644 index 0000000000..f72f0fa5e5 --- /dev/null +++ b/plugins/modules/pacemaker_cluster.py @@ -0,0 +1,169 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Mathieu Bultel +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: pacemaker_cluster +short_description: Manage pacemaker clusters +author: + - Mathieu Bultel (@matbu) + - Dexter Le (@munchtoast) +description: + - This module can manage a pacemaker cluster and nodes from Ansible using the pacemaker CLI. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Indicate desired state of the cluster. + - The value V(maintenance) has been added in community.general 11.1.0. + choices: [cleanup, offline, online, restart, maintenance] + type: str + required: true + name: + description: + - Specify which node of the cluster you want to manage. V(null) == the cluster status itself, V(all) == check the status + of all nodes. + type: str + aliases: ['node'] + timeout: + description: + - Timeout period (in seconds) for polling the cluster operation. + type: int + default: 300 + force: + description: + - Force the change of the cluster state. + type: bool + default: true +""" + +EXAMPLES = r""" +- name: Set cluster Online + hosts: localhost + gather_facts: false + tasks: + - name: Get cluster state + community.general.pacemaker_cluster: + state: online +""" + +RETURN = r""" +out: + description: The output of the current state of the cluster. It returns a list of the nodes state. + type: str + sample: 'out: [[" overcloud-controller-0", " Online"]]}' + returned: always +""" + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner, get_pacemaker_maintenance_mode + + +class PacemakerCluster(StateModuleHelper): + module = dict( + argument_spec=dict( + state=dict(type='str', choices=[ + 'cleanup', 'offline', 'online', 'restart', 'maintenance'], required=True), + name=dict(type='str', aliases=['node']), + timeout=dict(type='int', default=300), + force=dict(type='bool', default=True) + ), + supports_check_mode=True, + ) + default_state = "" + + def __init_module__(self): + self.runner = pacemaker_runner(self.module) + self.vars.set('apply_all', True if not self.module.params['name'] else False) + get_args = dict(cli_action='cluster', state='status', name=None, apply_all=self.vars.apply_all) + if self.module.params['state'] == "maintenance": + get_args['cli_action'] = "property" + get_args['state'] = "config" + get_args['name'] = "maintenance-mode" + elif self.module.params['state'] == "cleanup": + get_args['cli_action'] = "resource" + get_args['name'] = self.module.params['name'] + + self.vars.set('get_args', get_args) + self.vars.set('previous_value', self._get()['out']) + self.vars.set('value', self.vars.previous_value, change=True, diff=True) + + if self.module.params['state'] == "cleanup": + self.module.deprecate( + 'The value `cleanup` for "state" is being deprecated, use pacemaker_resource module instead.', + version='14.0.0', + collection_name='community.general' + ) + + def __quit_module__(self): + self.vars.set('value', self._get()['out']) + + def _process_command_output(self, fail_on_err, ignore_err_msg=""): + def process(rc, out, err): + if fail_on_err and rc != 0 and err and ignore_err_msg not in err: + self.do_raise('pcs failed with error (rc={0}): {1}'.format(rc, err)) + out = out.rstrip() + return None if out == "" else out + return process + + def _get(self): + with self.runner('cli_action state name') as ctx: + result = ctx.run(cli_action=self.vars.get_args['cli_action'], state=self.vars.get_args['state'], name=self.vars.get_args['name']) + return dict(rc=result[0], + out=(result[1] if result[1] != "" else None), + err=result[2]) + + def state_cleanup(self): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource') + + def state_offline(self): + with self.runner('cli_action state name apply_all wait', + output_process=self._process_command_output(True, "not currently running"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='cluster', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) + + def state_online(self): + with self.runner('cli_action state name apply_all wait', + output_process=self._process_command_output(True, "currently running"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='cluster', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) + + if get_pacemaker_maintenance_mode(self.runner): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx: + ctx.run(cli_action='property', state='maintenance', name='maintenance-mode=false') + + def state_maintenance(self): + with self.runner('cli_action state name', + output_process=self._process_command_output(True, "Fail"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='property', name='maintenance-mode=true') + + def state_restart(self): + with self.runner('cli_action state name apply_all wait', + output_process=self._process_command_output(True, "not currently running"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='cluster', state='offline', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) + ctx.run(cli_action='cluster', state='online', apply_all=self.vars.apply_all, wait=self.module.params['timeout']) + + if get_pacemaker_maintenance_mode(self.runner): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx: + ctx.run(cli_action='property', state='maintenance', name='maintenance-mode=false') + + +def main(): + PacemakerCluster.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pacemaker_info.py b/plugins/modules/pacemaker_info.py new file mode 100644 index 0000000000..f57accd429 --- /dev/null +++ b/plugins/modules/pacemaker_info.py @@ -0,0 +1,107 @@ +#!/usr/bin/python + +# Copyright (c) 2025, Dexter Le +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: pacemaker_info +short_description: Gather information about Pacemaker cluster +author: + - Dexter Le (@munchtoast) +version_added: 11.2.0 +description: + - Gather information about a Pacemaker cluster. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Gather Pacemaker cluster info + community.general.pacemaker_info: + register: result + +- name: Debug cluster info + ansible.builtin.debug: + msg: "{{ result }}" +""" + +RETURN = r""" +version: + description: Pacemaker CLI version + returned: always + type: str +cluster_info: + description: Cluster information such as the name, UUID, and nodes. + returned: always + type: dict +resource_info: + description: All resources available on the cluster and their status. + returned: success + type: dict +stonith_info: + description: All STONITH information on the cluster. + returned: success + type: dict +constraint_info: + description: All cluster resource constraints on the cluster. + returned: success + type: dict +property_info: + description: All properties present on the cluster. + returned: success + type: dict +""" + +import json + +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper +from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner + + +class PacemakerInfo(ModuleHelper): + module = dict( + argument_spec=dict(), + supports_check_mode=True, + ) + info_vars = { + "cluster_info": "cluster", + "resource_info": "resource", + "stonith_info": "stonith", + "constraint_info": "constraint", + "property_info": "property" + } + output_params = info_vars.keys() + + def __init_module__(self): + self.runner = pacemaker_runner(self.module) + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() + + def _process_command_output(self, cli_action=""): + def process(rc, out, err): + if rc != 0: + self.do_raise('pcs {0} config failed with error (rc={1}): {2}'.format(cli_action, rc, err)) + out = json.loads(out) + return None if out == "" else out + return process + + def _get_info(self, cli_action): + with self.runner("cli_action config output_format", output_process=self._process_command_output(cli_action)) as ctx: + return ctx.run(cli_action=cli_action, output_format="json") + + def __run__(self): + for key, cli_action in sorted(self.info_vars.items()): + self.vars.set(key, self._get_info(cli_action)) + + +def main(): + PacemakerInfo.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pacemaker_resource.py b/plugins/modules/pacemaker_resource.py new file mode 100644 index 0000000000..c3c11f683a --- /dev/null +++ b/plugins/modules/pacemaker_resource.py @@ -0,0 +1,256 @@ +#!/usr/bin/python + +# Copyright (c) 2025, Dexter Le +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: pacemaker_resource +short_description: Manage pacemaker resources +author: + - Dexter Le (@munchtoast) +version_added: 10.5.0 +description: + - This module can manage resources in a Pacemaker cluster using the pacemaker CLI. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + description: + - Indicate desired state for cluster resource. + - The states V(cleanup) and V(cloned) have been added in community.general 11.3.0. + - If O(state=cloned) or O(state=present), you can set O(resource_clone_ids) and O(resource_clone_meta) to determine exactly what and how to clone. + choices: [present, absent, cloned, enabled, disabled, cleanup] + default: present + type: str + name: + description: + - Specify the resource name to create or clone to. + - This is required if O(state=present), O(state=absent), O(state=enabled), or O(state=disabled). + type: str + resource_type: + description: + - Resource type to create. + type: dict + suboptions: + resource_name: + description: + - Specify the resource type name. + type: str + resource_standard: + description: + - Specify the resource type standard. + type: str + resource_provider: + description: + - Specify the resource type providers. + type: str + resource_option: + description: + - Specify the resource option to create. + type: list + elements: str + default: [] + resource_operation: + description: + - List of operations to associate with resource. + type: list + elements: dict + default: [] + suboptions: + operation_action: + description: + - Operation action to associate with resource. + type: str + operation_option: + description: + - Operation option to associate with action. + type: list + elements: str + resource_meta: + description: + - List of meta to associate with resource. + type: list + elements: str + resource_argument: + description: + - Action to associate with resource. + type: dict + suboptions: + argument_action: + description: + - Action to apply to resource. + type: str + choices: [clone, master, group, promotable] + argument_option: + description: + - Options to associate with resource action. + type: list + elements: str + resource_clone_ids: + description: + - List of clone resource IDs to clone from. + type: list + elements: str + version_added: 11.3.0 + resource_clone_meta: + description: + - List of metadata to associate with clone resource. + type: list + elements: str + version_added: 11.3.0 + wait: + description: + - Timeout period for polling the resource creation. + type: int + default: 300 +""" + +EXAMPLES = r""" +--- +- name: Create pacemaker resource + hosts: localhost + gather_facts: false + tasks: + - name: Create virtual-ip resource + community.general.pacemaker_resource: + state: present + name: virtual-ip + resource_type: + resource_name: IPaddr2 + resource_option: + - "ip=[192.168.2.1]" + resource_argument: + argument_action: group + argument_option: + - master + resource_operation: + - operation_action: monitor + operation_option: + - interval=20 +""" + +RETURN = r""" +cluster_resources: + description: The cluster resource output message. + type: str + sample: "Assumed agent name ocf:heartbeat:IPaddr2 (deduced from IPaddr2)" + returned: always +""" + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner, get_pacemaker_maintenance_mode + + +class PacemakerResource(StateModuleHelper): + module = dict( + argument_spec=dict( + state=dict(type='str', default='present', choices=[ + 'present', 'absent', 'cloned', 'enabled', 'disabled', 'cleanup']), + name=dict(type='str'), + resource_type=dict(type='dict', options=dict( + resource_name=dict(type='str'), + resource_standard=dict(type='str'), + resource_provider=dict(type='str'), + )), + resource_option=dict(type='list', elements='str', default=list()), + resource_operation=dict(type='list', elements='dict', default=list(), options=dict( + operation_action=dict(type='str'), + operation_option=dict(type='list', elements='str'), + )), + resource_meta=dict(type='list', elements='str'), + resource_argument=dict(type='dict', options=dict( + argument_action=dict(type='str', choices=['clone', 'master', 'group', 'promotable']), + argument_option=dict(type='list', elements='str'), + )), + resource_clone_ids=dict(type='list', elements='str'), + resource_clone_meta=dict(type='list', elements='str'), + wait=dict(type='int', default=300), + ), + required_if=[ + ('state', 'present', ['resource_type', 'resource_option', 'name']), + ('state', 'absent', ['name']), + ('state', 'enabled', ['name']), + ('state', 'disabled', ['name']), + ], + supports_check_mode=True, + ) + + def __init_module__(self): + self.runner = pacemaker_runner(self.module) + self.vars.set('previous_value', self._get()['out']) + self.vars.set('value', self.vars.previous_value, change=True, diff=True) + self.module.params['name'] = self.module.params['name'] or None + + def __quit_module__(self): + self.vars.set('value', self._get()['out']) + + def _process_command_output(self, fail_on_err, ignore_err_msg=""): + def process(rc, out, err): + if fail_on_err and rc != 0 and err and ignore_err_msg not in err: + self.do_raise('pcs failed with error (rc={0}): {1}'.format(rc, err)) + out = out.rstrip() + return None if out == "" else out + return process + + def _get(self): + with self.runner('cli_action state name') as ctx: + result = ctx.run(cli_action="resource", state='status') + return dict(rc=result[0], + out=(result[1] if result[1] != "" else None), + err=result[2]) + + def fmt_as_stack_argument(self, value, arg): + if value is not None: + return [x for k in value for x in (arg, k)] + + def state_absent(self): + force = get_pacemaker_maintenance_mode(self.runner) + with self.runner('cli_action state name force', output_process=self._process_command_output(True, "does not exist"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource', force=force) + + def state_present(self): + with self.runner( + 'cli_action state name resource_type resource_option resource_operation resource_meta resource_argument ' + 'resource_clone_ids resource_clone_meta wait', + output_process=self._process_command_output(not get_pacemaker_maintenance_mode(self.runner), "already exists"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='resource', resource_clone_ids=self.fmt_as_stack_argument(self.module.params["resource_clone_ids"], "clone")) + + def state_cloned(self): + with self.runner( + 'cli_action state name resource_clone_ids resource_clone_meta wait', + output_process=self._process_command_output( + not get_pacemaker_maintenance_mode(self.runner), + "already a clone resource"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource', resource_clone_meta=self.fmt_as_stack_argument(self.module.params["resource_clone_meta"], "meta")) + + def state_enabled(self): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Starting"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource') + + def state_disabled(self): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Stopped"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource') + + def state_cleanup(self): + runner_args = ['cli_action', 'state'] + if self.module.params['name']: + runner_args.append('name') + with self.runner(runner_args, output_process=self._process_command_output(True, "Clean"), check_mode_skip=True) as ctx: + ctx.run(cli_action='resource') + + +def main(): + PacemakerResource.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pacemaker_stonith.py b/plugins/modules/pacemaker_stonith.py new file mode 100644 index 0000000000..f8c6bbddc4 --- /dev/null +++ b/plugins/modules/pacemaker_stonith.py @@ -0,0 +1,218 @@ +#!/usr/bin/python + +# Copyright (c) 2025, Dexter Le +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = ''' +module: pacemaker_stonith +short_description: Manage Pacemaker STONITH +author: + - Dexter Le (@munchtoast) +version_added: 11.3.0 +description: + - This module manages STONITH in a Pacemaker cluster using the Pacemaker CLI. +seealso: + - name: Pacemaker STONITH documentation + description: Complete documentation for Pacemaker STONITH. + link: https://clusterlabs.org/projects/pacemaker/doc/3.0/Pacemaker_Explained/html/resources.html#stonith +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - Only works when check mode is not enabled. +options: + state: + description: + - Indicate desired state for cluster STONITH. + choices: [present, absent, enabled, disabled] + default: present + type: str + name: + description: + - Specify the STONITH name to create. + required: true + type: str + stonith_type: + description: + - Specify the STONITH device type. + type: str + stonith_options: + description: + - Specify the STONITH option to create. + type: list + elements: str + default: [] + stonith_operations: + description: + - List of operations to associate with STONITH. + type: list + elements: dict + default: [] + suboptions: + operation_action: + description: + - Operation action to associate with STONITH. + type: str + operation_options: + description: + - Operation options to associate with action. + type: list + elements: str + stonith_metas: + description: + - List of metadata to associate with STONITH. + type: list + elements: str + stonith_argument: + description: + - Action to associate with STONITH. + type: dict + suboptions: + argument_action: + description: + - Action to apply to STONITH. + type: str + choices: [group, before, after] + argument_options: + description: + - Options to associate with STONITH action. + type: list + elements: str + agent_validation: + description: + - Enabled agent validation for STONITH creation. + type: bool + default: false + wait: + description: + - Timeout period for polling the STONITH creation. + type: int + default: 300 +''' + +EXAMPLES = ''' +- name: Create virtual-ip STONITH + community.general.pacemaker_stonith: + state: present + name: virtual-stonith + stonith_type: fence_virt + stonith_options: + - "pcmk_host_list=f1" + stonith_operations: + - operation_action: monitor + operation_options: + - "interval=30s" +''' + +RETURN = ''' +previous_value: + description: The value of the STONITH before executing the module. + type: str + sample: " * virtual-stonith\t(stonith:fence_virt):\t Started" + returned: on success +value: + description: The value of the STONITH after executing the module. + type: str + sample: " * virtual-stonith\t(stonith:fence_virt):\t Started" + returned: on success +''' + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner + + +class PacemakerStonith(StateModuleHelper): + module = dict( + argument_spec=dict( + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + name=dict(type='str', required=True), + stonith_type=dict(type='str'), + stonith_options=dict(type='list', elements='str', default=[]), + stonith_operations=dict(type='list', elements='dict', default=[], options=dict( + operation_action=dict(type='str'), + operation_options=dict(type='list', elements='str'), + )), + stonith_metas=dict(type='list', elements='str'), + stonith_argument=dict(type='dict', options=dict( + argument_action=dict(type='str', choices=['before', 'after', 'group']), + argument_options=dict(type='list', elements='str'), + )), + agent_validation=dict(type='bool', default=False), + wait=dict(type='int', default=300), + ), + required_if=[('state', 'present', ['stonith_type', 'stonith_options'])], + supports_check_mode=True + ) + + def __init_module__(self): + self.runner = pacemaker_runner(self.module) + self.vars.set('previous_value', self._get()['out']) + self.vars.set('value', self.vars.previous_value, change=True, diff=True) + + def __quit_module__(self): + self.vars.set('value', self._get()['out']) + + def _process_command_output(self, fail_on_err, ignore_err_msg=""): + def process(rc, out, err): + if fail_on_err and rc != 0 and err and ignore_err_msg not in err: + self.do_raise('pcs failed with error (rc={0}): {1}'.format(rc, err)) + out = out.rstrip() + return None if out == "" else out + return process + + def _get(self): + with self.runner('cli_action state name') as ctx: + result = ctx.run(cli_action='stonith', state='status') + return dict(rc=result[0], + out=result[1] if result[1] != "" else None, + err=result[2]) + + def fmt_stonith_resource(self): + return dict(resource_name=self.vars.stonith_type) + + # TODO: Pluralize operation_options in separate PR and remove this helper fmt function + def fmt_stonith_operations(self): + modified_stonith_operations = [] + for stonith_operation in self.vars.stonith_operations: + modified_stonith_operations.append(dict(operation_action=stonith_operation.get('operation_action'), + operation_option=stonith_operation.get('operation_options'))) + return modified_stonith_operations + + def state_absent(self): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "does not exist"), check_mode_skip=True) as ctx: + ctx.run(cli_action='stonith') + + def state_present(self): + with self.runner( + 'cli_action state name resource_type resource_option resource_operation resource_meta resource_argument agent_validation wait', + output_process=self._process_command_output(True, "already exists"), + check_mode_skip=True) as ctx: + ctx.run(cli_action='stonith', + resource_type=self.fmt_stonith_resource(), + resource_option=self.vars.stonith_options, + resource_operation=self.fmt_stonith_operations(), + resource_meta=self.vars.stonith_metas, + resource_argument=self.vars.stonith_argument) + + def state_enabled(self): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Starting"), check_mode_skip=True) as ctx: + ctx.run(cli_action='stonith') + + def state_disabled(self): + with self.runner('cli_action state name', output_process=self._process_command_output(True, "Stopped"), check_mode_skip=True) as ctx: + ctx.run(cli_action='stonith') + + +def main(): + PacemakerStonith.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/language/ansible_galaxy_install.py b/plugins/modules/packaging/language/ansible_galaxy_install.py deleted file mode 100644 index 968a8d093d..0000000000 --- a/plugins/modules/packaging/language/ansible_galaxy_install.py +++ /dev/null @@ -1,348 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2021, Alexei Znamensky -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = """ -module: ansible_galaxy_install -author: - - "Alexei Znamensky (@russoz)" -short_description: Install Ansible roles or collections using ansible-galaxy -version_added: 3.5.0 -description: - - This module allows the installation of Ansible collections or roles using C(ansible-galaxy). -notes: - - > - B(Ansible 2.9/2.10): The C(ansible-galaxy) command changed significantly between Ansible 2.9 and - ansible-base 2.10 (later ansible-core 2.11). See comments in the parameters. -requirements: - - Ansible 2.9, ansible-base 2.10, or ansible-core 2.11 or newer -options: - type: - description: - - The type of installation performed by C(ansible-galaxy). - - If I(type) is C(both), then I(requirements_file) must be passed and it may contain both roles and collections. - - "Note however that the opposite is not true: if using a I(requirements_file), then I(type) can be any of the three choices." - - "B(Ansible 2.9): The option C(both) will have the same effect as C(role)." - type: str - choices: [collection, role, both] - required: true - name: - description: - - Name of the collection or role being installed. - - > - Versions can be specified with C(ansible-galaxy) usual formats. - For example, the collection C(community.docker:1.6.1) or the role C(ansistrano.deploy,3.8.0). - - I(name) and I(requirements_file) are mutually exclusive. - type: str - requirements_file: - description: - - Path to a file containing a list of requirements to be installed. - - It works for I(type) equals to C(collection) and C(role). - - I(name) and I(requirements_file) are mutually exclusive. - - "B(Ansible 2.9): It can only be used to install either I(type=role) or I(type=collection), but not both at the same run." - type: path - dest: - description: - - The path to the directory containing your collections or roles, according to the value of I(type). - - > - Please notice that C(ansible-galaxy) will not install collections with I(type=both), when I(requirements_file) - contains both roles and collections and I(dest) is specified. - type: path - no_deps: - description: - - Refrain from installing dependencies. - version_added: 4.5.0 - type: bool - default: false - force: - description: - - Force overwriting an existing role or collection. - - Using I(force=true) is mandatory when downgrading. - - "B(Ansible 2.9 and 2.10): Must be C(true) to upgrade roles and collections." - type: bool - default: false - ack_ansible29: - description: - - Acknowledge using Ansible 2.9 with its limitations, and prevents the module from generating warnings about them. - - This option is completely ignored if using a version of Ansible greater than C(2.9.x). - - Note that this option will be removed without any further deprecation warning once support - for Ansible 2.9 is removed from this module. - type: bool - default: false - ack_min_ansiblecore211: - description: - - Acknowledge the module is deprecating support for Ansible 2.9 and ansible-base 2.10. - - Support for those versions will be removed in community.general 8.0.0. - At the same time, this option will be removed without any deprecation warning! - - This option is completely ignored if using a version of ansible-core/ansible-base/Ansible greater than C(2.11). - - For the sake of conciseness, setting this parameter to C(true) implies I(ack_ansible29=true). - type: bool - default: false -""" - -EXAMPLES = """ -- name: Install collection community.network - community.general.ansible_galaxy_install: - type: collection - name: community.network - -- name: Install role at specific path - community.general.ansible_galaxy_install: - type: role - name: ansistrano.deploy - dest: /ansible/roles - -- name: Install collections and roles together - community.general.ansible_galaxy_install: - type: both - requirements_file: requirements.yml - -- name: Force-install collection community.network at specific version - community.general.ansible_galaxy_install: - type: collection - name: community.network:3.0.2 - force: true - -""" - -RETURN = """ - type: - description: The value of the I(type) parameter. - type: str - returned: always - name: - description: The value of the I(name) parameter. - type: str - returned: always - dest: - description: The value of the I(dest) parameter. - type: str - returned: always - requirements_file: - description: The value of the I(requirements_file) parameter. - type: str - returned: always - force: - description: The value of the I(force) parameter. - type: bool - returned: always - installed_roles: - description: - - If I(requirements_file) is specified instead, returns dictionary with all the roles installed per path. - - If I(name) is specified, returns that role name and the version installed per path. - - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand." - type: dict - returned: always when installing roles - contains: - "": - description: Roles and versions for that path. - type: dict - sample: - /home/user42/.ansible/roles: - ansistrano.deploy: 3.9.0 - baztian.xfce: v0.0.3 - /custom/ansible/roles: - ansistrano.deploy: 3.8.0 - installed_collections: - description: - - If I(requirements_file) is specified instead, returns dictionary with all the collections installed per path. - - If I(name) is specified, returns that collection name and the version installed per path. - - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand." - type: dict - returned: always when installing collections - contains: - "": - description: Collections and versions for that path - type: dict - sample: - /home/az/.ansible/collections/ansible_collections: - community.docker: 1.6.0 - community.general: 3.0.2 - /custom/ansible/ansible_collections: - community.general: 3.1.0 - new_collections: - description: New collections installed by this module. - returned: success - type: dict - sample: - community.general: 3.1.0 - community.docker: 1.6.1 - new_roles: - description: New roles installed by this module. - returned: success - type: dict - sample: - ansistrano.deploy: 3.8.0 - baztian.xfce: v0.0.3 -""" - -import re - -from ansible_collections.community.general.plugins.module_utils.module_helper import CmdModuleHelper, ArgFormat - - -class AnsibleGalaxyInstall(CmdModuleHelper): - _RE_GALAXY_VERSION = re.compile(r'^ansible-galaxy(?: \[core)? (?P\d+\.\d+\.\d+)(?:\.\w+)?(?:\])?') - _RE_LIST_PATH = re.compile(r'^# (?P.*)$') - _RE_LIST_COLL = re.compile(r'^(?P\w+\.\w+)\s+(?P[\d\.]+)\s*$') - _RE_LIST_ROLE = re.compile(r'^- (?P\w+\.\w+),\s+(?P[\d\.]+)\s*$') - _RE_INSTALL_OUTPUT = None # Set after determining ansible version, see __init_module__() - ansible_version = None - is_ansible29 = None - - output_params = ('type', 'name', 'dest', 'requirements_file', 'force', 'no_deps') - module = dict( - argument_spec=dict( - type=dict(type='str', choices=('collection', 'role', 'both'), required=True), - name=dict(type='str'), - requirements_file=dict(type='path'), - dest=dict(type='path'), - force=dict(type='bool', default=False), - no_deps=dict(type='bool', default=False), - ack_ansible29=dict(type='bool', default=False), - ack_min_ansiblecore211=dict(type='bool', default=False), - ), - mutually_exclusive=[('name', 'requirements_file')], - required_one_of=[('name', 'requirements_file')], - required_if=[('type', 'both', ['requirements_file'])], - supports_check_mode=False, - ) - - command = 'ansible-galaxy' - command_args_formats = dict( - type=dict(fmt=lambda v: [] if v == 'both' else [v]), - galaxy_cmd=dict(), - requirements_file=dict(fmt=('-r', '{0}'),), - dest=dict(fmt=('-p', '{0}'),), - force=dict(fmt="--force", style=ArgFormat.BOOLEAN), - no_deps=dict(fmt="--no-deps", style=ArgFormat.BOOLEAN), - ) - force_lang = "en_US.UTF-8" - check_rc = True - - def _get_ansible_galaxy_version(self): - ansible_galaxy = self.module.get_bin_path("ansible-galaxy", required=True) - dummy, out, dummy = self.module.run_command([ansible_galaxy, "--version"], check_rc=True) - line = out.splitlines()[0] - match = self._RE_GALAXY_VERSION.match(line) - if not match: - raise RuntimeError("Unable to determine ansible-galaxy version from: {0}".format(line)) - version = match.group("version") - version = tuple(int(x) for x in version.split('.')[:3]) - return version - - def __init_module__(self): - self.ansible_version = self._get_ansible_galaxy_version() - if self.ansible_version < (2, 11) and not self.vars.ack_min_ansiblecore211: - self.module.deprecate( - "Support for Ansible 2.9 and ansible-base 2.10 is being deprecated. " - "At the same time support for them is ended, also the ack_ansible29 option will be removed. " - "Upgrading is strongly recommended, or set 'ack_min_ansiblecore211' to supress this message.", - version="8.0.0", - collection_name="community.general", - ) - self.is_ansible29 = self.ansible_version < (2, 10) - if self.is_ansible29: - self._RE_INSTALL_OUTPUT = re.compile(r"^(?:.*Installing '(?P\w+\.\w+):(?P[\d\.]+)'.*" - r'|- (?P\w+\.\w+) \((?P[\d\.]+)\)' - r' was installed successfully)$') - else: - # Collection install output changed: - # ansible-base 2.10: "coll.name (x.y.z)" - # ansible-core 2.11+: "coll.name:x.y.z" - self._RE_INSTALL_OUTPUT = re.compile(r'^(?:(?P\w+\.\w+)(?: \(|:)(?P[\d\.]+)\)?' - r'|- (?P\w+\.\w+) \((?P[\d\.]+)\))' - r' was installed successfully$') - - @staticmethod - def _process_output_list(*args): - if "None of the provided paths were usable" in args[1]: - return [] - return args[1].splitlines() - - def _list_element(self, _type, path_re, elem_re): - params = ({'type': _type}, {'galaxy_cmd': 'list'}, 'dest') - elems = self.run_command(params=params, - publish_rc=False, publish_out=False, publish_err=False, publish_cmd=False, - process_output=self._process_output_list, - check_rc=False) - elems_dict = {} - current_path = None - for line in elems: - if line.startswith("#"): - match = path_re.match(line) - if not match: - continue - if self.vars.dest is not None and match.group('path') != self.vars.dest: - current_path = None - continue - current_path = match.group('path') if match else None - elems_dict[current_path] = {} - - elif current_path is not None: - match = elem_re.match(line) - if not match or (self.vars.name is not None and match.group('elem') != self.vars.name): - continue - elems_dict[current_path][match.group('elem')] = match.group('version') - return elems_dict - - def _list_collections(self): - return self._list_element('collection', self._RE_LIST_PATH, self._RE_LIST_COLL) - - def _list_roles(self): - return self._list_element('role', self._RE_LIST_PATH, self._RE_LIST_ROLE) - - def _setup29(self): - self.vars.set("new_collections", {}) - self.vars.set("new_roles", {}) - self.vars.set("ansible29_change", False, change=True, output=False) - if not (self.vars.ack_ansible29 or self.vars.ack_min_ansiblecore211): - self.module.warn("Ansible 2.9 or older: unable to retrieve lists of roles and collections already installed") - if self.vars.requirements_file is not None and self.vars.type == 'both': - self.module.warn("Ansible 2.9 or older: will install only roles from requirement files") - - def _setup210plus(self): - self.vars.set("new_collections", {}, change=True) - self.vars.set("new_roles", {}, change=True) - if self.vars.type != "collection": - self.vars.installed_roles = self._list_roles() - if self.vars.type != "roles": - self.vars.installed_collections = self._list_collections() - - def __run__(self): - if self.is_ansible29: - if self.vars.type == 'both': - raise ValueError("Type 'both' not supported in Ansible 2.9") - self._setup29() - else: - self._setup210plus() - params = ('type', {'galaxy_cmd': 'install'}, 'force', 'no_deps', 'dest', 'requirements_file', 'name') - self.run_command(params=params) - - def process_command_output(self, rc, out, err): - for line in out.splitlines(): - match = self._RE_INSTALL_OUTPUT.match(line) - if not match: - continue - if match.group("collection"): - self.vars.new_collections[match.group("collection")] = match.group("cversion") - if self.is_ansible29: - self.vars.ansible29_change = True - elif match.group("role"): - self.vars.new_roles[match.group("role")] = match.group("rversion") - if self.is_ansible29: - self.vars.ansible29_change = True - - -def main(): - galaxy = AnsibleGalaxyInstall() - galaxy.run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/language/cpanm.py b/plugins/modules/packaging/language/cpanm.py deleted file mode 100644 index 8c8f2ea1c3..0000000000 --- a/plugins/modules/packaging/language/cpanm.py +++ /dev/null @@ -1,239 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Franck Cuny -# (c) 2021, Alexei Znamensky -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: cpanm -short_description: Manages Perl library dependencies. -description: - - Manage Perl library dependencies using cpanminus. -options: - name: - type: str - description: - - The Perl library to install. Valid values change according to the I(mode), see notes for more details. - - Note that for installing from a local path the parameter I(from_path) should be used. - aliases: [pkg] - from_path: - type: path - description: - - The local directory or C(tar.gz) file to install from. - notest: - description: - - Do not run unit tests. - type: bool - default: no - locallib: - description: - - Specify the install base to install modules. - type: path - mirror: - description: - - Specifies the base URL for the CPAN mirror to use. - type: str - mirror_only: - description: - - Use the mirror's index file instead of the CPAN Meta DB. - type: bool - default: no - installdeps: - description: - - Only install dependencies. - type: bool - default: no - version: - description: - - Version specification for the perl module. When I(mode) is C(new), C(cpanm) version operators are accepted. - type: str - executable: - description: - - Override the path to the cpanm executable. - type: path - mode: - description: - - Controls the module behavior. See notes below for more details. - type: str - choices: [compatibility, new] - default: compatibility - version_added: 3.0.0 - name_check: - description: - - When in C(new) mode, this parameter can be used to check if there is a module I(name) installed (at I(version), when specified). - type: str - version_added: 3.0.0 -notes: - - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. - - "This module now comes with a choice of execution I(mode): C(compatibility) or C(new)." - - "C(compatibility) mode:" - - When using C(compatibility) mode, the module will keep backward compatibility. This is the default mode. - - I(name) must be either a module name or a distribution file. - - > - If the perl module given by I(name) is installed (at the exact I(version) when specified), then nothing happens. - Otherwise, it will be installed using the C(cpanm) executable. - - I(name) cannot be an URL, or a git URL. - - C(cpanm) version specifiers do not work in this mode. - - "C(new) mode:" - - "When using C(new) mode, the module will behave differently" - - > - The I(name) parameter may refer to a module name, a distribution file, - a HTTP URL or a git repository URL as described in C(cpanminus) documentation. - - C(cpanm) version specifiers are recognized. -author: - - "Franck Cuny (@fcuny)" - - "Alexei Znamensky (@russoz)" -''' - -EXAMPLES = ''' -- name: Install Dancer perl package - community.general.cpanm: - name: Dancer - -- name: Install version 0.99_05 of the Plack perl package - community.general.cpanm: - name: MIYAGAWA/Plack-0.99_05.tar.gz - -- name: Install Dancer into the specified locallib - community.general.cpanm: - name: Dancer - locallib: /srv/webapps/my_app/extlib - -- name: Install perl dependencies from local directory - community.general.cpanm: - from_path: /srv/webapps/my_app/src/ - -- name: Install Dancer perl package without running the unit tests in indicated locallib - community.general.cpanm: - name: Dancer - notest: True - locallib: /srv/webapps/my_app/extlib - -- name: Install Dancer perl package from a specific mirror - community.general.cpanm: - name: Dancer - mirror: 'http://cpan.cpantesters.org/' - -- name: Install Dancer perl package into the system root path - become: yes - community.general.cpanm: - name: Dancer - -- name: Install Dancer if it is not already installed OR the installed version is older than version 1.0 - community.general.cpanm: - name: Dancer - version: '1.0' -''' - -import os - -from ansible_collections.community.general.plugins.module_utils.module_helper import ( - ModuleHelper, CmdMixin, ArgFormat -) - - -class CPANMinus(CmdMixin, ModuleHelper): - output_params = ['name', 'version'] - module = dict( - argument_spec=dict( - name=dict(type='str', aliases=['pkg']), - version=dict(type='str'), - from_path=dict(type='path'), - notest=dict(type='bool', default=False), - locallib=dict(type='path'), - mirror=dict(type='str'), - mirror_only=dict(type='bool', default=False), - installdeps=dict(type='bool', default=False), - executable=dict(type='path'), - mode=dict(type='str', choices=['compatibility', 'new'], default='compatibility'), - name_check=dict(type='str') - ), - required_one_of=[('name', 'from_path')], - - ) - command = 'cpanm' - command_args_formats = dict( - notest=dict(fmt="--notest", style=ArgFormat.BOOLEAN), - locallib=dict(fmt=('--local-lib', '{0}'),), - mirror=dict(fmt=('--mirror', '{0}'),), - mirror_only=dict(fmt="--mirror-only", style=ArgFormat.BOOLEAN), - installdeps=dict(fmt="--installdeps", style=ArgFormat.BOOLEAN), - ) - check_rc = True - - def __init_module__(self): - v = self.vars - if v.mode == "compatibility": - if v.name_check: - self.do_raise("Parameter name_check can only be used with mode=new") - else: - if v.name and v.from_path: - self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'") - - self.command = self.module.get_bin_path(v.executable if v.executable else self.command) - self.vars.set("binary", self.command) - - def _is_package_installed(self, name, locallib, version): - if name is None or name.endswith('.tar.gz'): - return False - version = "" if version is None else " " + version - - env = {"PERL5LIB": "%s/lib/perl5" % locallib} if locallib else {} - cmd = ['perl', '-le', 'use %s%s;' % (name, version)] - rc, out, err = self.module.run_command(cmd, check_rc=False, environ_update=env) - - return rc == 0 - - def sanitize_pkg_spec_version(self, pkg_spec, version): - if version is None: - return pkg_spec - if pkg_spec.endswith('.tar.gz'): - self.do_raise(msg="parameter 'version' must not be used when installing from a file") - if os.path.isdir(pkg_spec): - self.do_raise(msg="parameter 'version' must not be used when installing from a directory") - if pkg_spec.endswith('.git'): - if version.startswith('~'): - self.do_raise(msg="operator '~' not allowed in version parameter when installing from git repository") - version = version if version.startswith('@') else '@' + version - elif version[0] not in ('@', '~'): - version = '~' + version - return pkg_spec + version - - def __run__(self): - v = self.vars - pkg_param = 'from_path' if v.from_path else 'name' - - if v.mode == 'compatibility': - if self._is_package_installed(v.name, v.locallib, v.version): - return - pkg_spec = v[pkg_param] - self.changed = self.run_command( - params=['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', {'name': pkg_spec}], - ) - else: - installed = self._is_package_installed(v.name_check, v.locallib, v.version) if v.name_check else False - if installed: - return - pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version) - self.changed = self.run_command( - params=['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', {'name': pkg_spec}], - ) - - def process_command_output(self, rc, out, err): - if self.vars.mode == "compatibility" and rc != 0: - self.do_raise(msg=err, cmd=self.vars.cmd_args) - return 'is up to date' not in err and 'is up to date' not in out - - -def main(): - CPANMinus.execute() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/language/pipx.py b/plugins/modules/packaging/language/pipx.py deleted file mode 100644 index 0d1103000a..0000000000 --- a/plugins/modules/packaging/language/pipx.py +++ /dev/null @@ -1,296 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2021, Alexei Znamensky -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: pipx -short_description: Manages applications installed with pipx -version_added: 3.8.0 -description: - - Manage Python applications installed in isolated virtualenvs using pipx. -options: - state: - type: str - choices: [present, absent, install, uninstall, uninstall_all, inject, upgrade, upgrade_all, reinstall, reinstall_all] - default: install - description: - - Desired state for the application. - - The states C(present) and C(absent) are aliases to C(install) and C(uninstall), respectively. - name: - type: str - description: - - > - The name of the application to be installed. It must to be a simple package name. - For passing package specifications or installing from URLs or directories, - please use the I(source) option. - source: - type: str - description: - - > - If the application source, such as a package with version specifier, or an URL, - directory or any other accepted specification. See C(pipx) documentation for more details. - - When specified, the C(pipx) command will use I(source) instead of I(name). - install_deps: - description: - - Include applications of dependent packages. - - Only used when I(state=install) or I(state=upgrade). - type: bool - default: false - inject_packages: - description: - - Packages to be injected into an existing virtual environment. - - Only used when I(state=inject). - type: list - elements: str - force: - description: - - Force modification of the application's virtual environment. See C(pipx) for details. - - Only used when I(state=install), I(state=upgrade), I(state=upgrade_all), or I(state=inject). - type: bool - default: false - include_injected: - description: - - Upgrade the injected packages along with the application. - - Only used when I(state=upgrade) or I(state=upgrade_all). - type: bool - default: false - index_url: - description: - - Base URL of Python Package Index. - - Only used when I(state=install), I(state=upgrade), or I(state=inject). - type: str - python: - description: - - Python version to be used when creating the application virtual environment. Must be 3.6+. - - Only used when I(state=install), I(state=reinstall), or I(state=reinstall_all). - type: str - executable: - description: - - Path to the C(pipx) installed in the system. - - > - If not specified, the module will use C(python -m pipx) to run the tool, - using the same Python interpreter as ansible itself. - type: path - editable: - description: - - Install the project in editable mode. - type: bool - default: false - version_added: 4.6.0 - pip_args: - description: - - Arbitrary arguments to pass directly to C(pip). - type: str - version_added: 4.6.0 -notes: - - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). - - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module. - - Please note that C(pipx) requires Python 3.6 or above. - - > - This first implementation does not verify whether a specified version constraint has been installed or not. - Hence, when using version operators, C(pipx) module will always try to execute the operation, - even when the application was previously installed. - This feature will be added in the future. - - See also the C(pipx) documentation at U(https://pypa.github.io/pipx/). -author: - - "Alexei Znamensky (@russoz)" -''' - -EXAMPLES = ''' -- name: Install tox - community.general.pipx: - name: tox - -- name: Install tox from git repository - community.general.pipx: - name: tox - source: git+https://github.com/tox-dev/tox.git - -- name: Upgrade tox - community.general.pipx: - name: tox - state: upgrade - -- name: Reinstall black with specific Python version - community.general.pipx: - name: black - state: reinstall - python: 3.7 - -- name: Uninstall pycowsay - community.general.pipx: - name: pycowsay - state: absent -''' - - -import json - -from ansible_collections.community.general.plugins.module_utils.module_helper import ( - CmdStateModuleHelper, ArgFormat -) -from ansible.module_utils.facts.compat import ansible_facts - - -_state_map = dict( - present='install', - absent='uninstall', - uninstall_all='uninstall-all', - upgrade_all='upgrade-all', - reinstall_all='reinstall-all', -) - - -class PipX(CmdStateModuleHelper): - output_params = ['name', 'source', 'index_url', 'force', 'installdeps'] - module = dict( - argument_spec=dict( - state=dict(type='str', default='install', - choices=['present', 'absent', 'install', 'uninstall', 'uninstall_all', - 'inject', 'upgrade', 'upgrade_all', 'reinstall', 'reinstall_all']), - name=dict(type='str'), - source=dict(type='str'), - install_deps=dict(type='bool', default=False), - inject_packages=dict(type='list', elements='str'), - force=dict(type='bool', default=False), - include_injected=dict(type='bool', default=False), - index_url=dict(type='str'), - python=dict(type='str'), - executable=dict(type='path'), - editable=dict(type='bool', default=False), - pip_args=dict(type='str'), - ), - required_if=[ - ('state', 'present', ['name']), - ('state', 'install', ['name']), - ('state', 'absent', ['name']), - ('state', 'uninstall', ['name']), - ('state', 'inject', ['name', 'inject_packages']), - ], - supports_check_mode=True, - ) - command_args_formats = dict( - state=dict(fmt=lambda v: [_state_map.get(v, v)]), - name_source=dict(fmt=lambda n, s: [s] if s else [n], stars=1), - install_deps=dict(fmt="--include-deps", style=ArgFormat.BOOLEAN), - inject_packages=dict(fmt=lambda v: v), - force=dict(fmt="--force", style=ArgFormat.BOOLEAN), - include_injected=dict(fmt="--include-injected", style=ArgFormat.BOOLEAN), - index_url=dict(fmt=('--index-url', '{0}'),), - python=dict(fmt=('--python', '{0}'),), - _list=dict(fmt=('list', '--include-injected', '--json'), style=ArgFormat.BOOLEAN), - editable=dict(fmt="--editable", style=ArgFormat.BOOLEAN), - pip_args=dict(fmt=('--pip-args', '{0}'),), - ) - check_rc = True - run_command_fixed_options = dict( - environ_update={'USE_EMOJI': '0'} - ) - - def _retrieve_installed(self): - def process_list(rc, out, err): - if not out: - return {} - - results = {} - raw_data = json.loads(out) - for venv_name, venv in raw_data['venvs'].items(): - results[venv_name] = { - 'version': venv['metadata']['main_package']['package_version'], - 'injected': dict( - (k, v['package_version']) for k, v in venv['metadata']['injected_packages'].items() - ), - } - return results - - installed = self.run_command(params=[{'_list': True}], process_output=process_list, - publish_rc=False, publish_out=False, publish_err=False, publish_cmd=False) - - if self.vars.name is not None: - app_list = installed.get(self.vars.name) - if app_list: - return {self.vars.name: app_list} - else: - return {} - - return installed - - def __init_module__(self): - if self.vars.executable: - self.command = [self.vars.executable] - else: - facts = ansible_facts(self.module, gather_subset=['python']) - self.command = [facts['python']['executable'], '-m', 'pipx'] - - self.vars.set('application', self._retrieve_installed(), change=True, diff=True) - - def __quit_module__(self): - self.vars.application = self._retrieve_installed() - - def state_install(self): - if not self.vars.application or self.vars.force: - self.changed = True - if not self.module.check_mode: - self.run_command(params=[ - 'state', 'index_url', 'install_deps', 'force', 'python', 'editable', 'pip_args', - {'name_source': [self.vars.name, self.vars.source]}]) - - state_present = state_install - - def state_upgrade(self): - if not self.vars.application: - self.do_raise("Trying to upgrade a non-existent application: {0}".format(self.vars.name)) - if self.vars.force: - self.changed = True - if not self.module.check_mode: - self.run_command(params=['state', 'index_url', 'install_deps', 'force', 'editable', 'pip_args', 'name']) - - def state_uninstall(self): - if self.vars.application and not self.module.check_mode: - self.run_command(params=['state', 'name']) - - state_absent = state_uninstall - - def state_reinstall(self): - if not self.vars.application: - self.do_raise("Trying to reinstall a non-existent application: {0}".format(self.vars.name)) - self.changed = True - if not self.module.check_mode: - self.run_command(params=['state', 'name', 'python']) - - def state_inject(self): - if not self.vars.application: - self.do_raise("Trying to inject packages into a non-existent application: {0}".format(self.vars.name)) - if self.vars.force: - self.changed = True - if not self.module.check_mode: - self.run_command(params=['state', 'index_url', 'force', 'editable', 'pip_args', 'name', 'inject_packages']) - - def state_uninstall_all(self): - if not self.module.check_mode: - self.run_command(params=['state']) - - def state_reinstall_all(self): - if not self.module.check_mode: - self.run_command(params=['state', 'python']) - - def state_upgrade_all(self): - if self.vars.force: - self.changed = True - if not self.module.check_mode: - self.run_command(params=['state', 'include_injected', 'force']) - - -def main(): - PipX.execute() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/apt_rpm.py b/plugins/modules/packaging/os/apt_rpm.py deleted file mode 100644 index 95d0f64109..0000000000 --- a/plugins/modules/packaging/os/apt_rpm.py +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2013, Evgenii Terechkov -# Written by Evgenii Terechkov -# Based on urpmi module written by Philippe Makowski - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: apt_rpm -short_description: apt_rpm package manager -description: - - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required. -options: - package: - description: - - list of packages to install, upgrade or remove. - required: true - aliases: [ name, pkg ] - type: list - elements: str - state: - description: - - Indicates the desired package state. - choices: [ absent, present, installed, removed ] - default: present - type: str - update_cache: - description: - - update the package database first C(apt-get update). - type: bool - default: no -author: -- Evgenii Terechkov (@evgkrsk) -''' - -EXAMPLES = ''' -- name: Install package foo - community.general.apt_rpm: - pkg: foo - state: present - -- name: Install packages foo and bar - community.general.apt_rpm: - pkg: - - foo - - bar - state: present - -- name: Remove package foo - community.general.apt_rpm: - pkg: foo - state: absent - -- name: Remove packages foo and bar - community.general.apt_rpm: - pkg: foo,bar - state: absent - -# bar will be the updated if a newer version exists -- name: Update the package database and install bar - community.general.apt_rpm: - name: bar - state: present - update_cache: yes -''' - -import json -import os -import shlex -import sys - -from ansible.module_utils.basic import AnsibleModule - -APT_PATH = "/usr/bin/apt-get" -RPM_PATH = "/usr/bin/rpm" - - -def query_package(module, name): - # rpm -q returns 0 if the package is installed, - # 1 if it is not installed - rc, out, err = module.run_command("%s -q %s" % (RPM_PATH, name)) - if rc == 0: - return True - else: - return False - - -def query_package_provides(module, name): - # rpm -q returns 0 if the package is installed, - # 1 if it is not installed - rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name)) - return rc == 0 - - -def update_package_db(module): - rc, out, err = module.run_command("%s update" % APT_PATH) - - if rc != 0: - module.fail_json(msg="could not update package db: %s" % err) - - -def remove_packages(module, packages): - - remove_c = 0 - # Using a for loop in case of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, package): - continue - - rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package)) - - if rc != 0: - module.fail_json(msg="failed to remove %s: %s" % (package, err)) - - remove_c += 1 - - if remove_c > 0: - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, pkgspec): - - packages = "" - for package in pkgspec: - if not query_package_provides(module, package): - packages += "'%s' " % package - - if len(packages) != 0: - - rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages)) - - installed = True - for packages in pkgspec: - if not query_package_provides(module, package): - installed = False - - # apt-rpm always have 0 for exit code if --force is used - if rc or not installed: - module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err)) - else: - module.exit_json(changed=True, msg="%s present(s)" % packages) - else: - module.exit_json(changed=False) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']), - update_cache=dict(type='bool', default=False), - package=dict(type='list', elements='str', required=True, aliases=['name', 'pkg']), - ), - ) - - if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH): - module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm") - - p = module.params - - if p['update_cache']: - update_package_db(module) - - packages = p['package'] - - if p['state'] in ['installed', 'present']: - install_packages(module, packages) - - elif p['state'] in ['absent', 'removed']: - remove_packages(module, packages) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/flatpak_remote.py b/plugins/modules/packaging/os/flatpak_remote.py deleted file mode 100644 index e0e4170f47..0000000000 --- a/plugins/modules/packaging/os/flatpak_remote.py +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) -# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) -# Copyright: (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: flatpak_remote -short_description: Manage flatpak repository remotes -description: -- Allows users to add or remove flatpak remotes. -- The flatpak remotes concept is comparable to what is called repositories in other packaging - formats. -- Currently, remote addition is only supported via I(flatpakrepo) file URLs. -- Existing remotes will not be updated. -- See the M(community.general.flatpak) module for managing flatpaks. -author: -- John Kwiatkoski (@JayKayy) -- Alexander Bethke (@oolongbrothers) -requirements: -- flatpak -options: - executable: - description: - - The path to the C(flatpak) executable to use. - - By default, this module looks for the C(flatpak) executable on the path. - type: str - default: flatpak - flatpakrepo_url: - description: - - The URL to the I(flatpakrepo) file representing the repository remote to add. - - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url) - is added using the specified installation C(method). - - When used with I(state=absent), this is not required. - - Required when I(state=present). - type: str - method: - description: - - The installation method to use. - - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system) - or only for the current C(user). - type: str - choices: [ system, user ] - default: system - name: - description: - - The desired name for the flatpak remote to be registered under on the managed host. - - When used with I(state=present), the remote will be added to the managed host under - the specified I(name). - - When used with I(state=absent) the remote with that name will be removed. - type: str - required: true - state: - description: - - Indicates the desired package state. - type: str - choices: [ absent, present ] - default: present -''' - -EXAMPLES = r''' -- name: Add the Gnome flatpak remote to the system installation - community.general.flatpak_remote: - name: gnome - state: present - flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo - -- name: Add the flathub flatpak repository remote to the user installation - community.general.flatpak_remote: - name: flathub - state: present - flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo - method: user - -- name: Remove the Gnome flatpak remote from the user installation - community.general.flatpak_remote: - name: gnome - state: absent - method: user - -- name: Remove the flathub remote from the system installation - community.general.flatpak_remote: - name: flathub - state: absent -''' - -RETURN = r''' -command: - description: The exact flatpak command that was executed - returned: When a flatpak command has been executed - type: str - sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo" -msg: - description: Module error message - returned: failure - type: str - sample: "Executable '/usr/local/bin/flatpak' was not found on the system." -rc: - description: Return code from flatpak binary - returned: When a flatpak command has been executed - type: int - sample: 0 -stderr: - description: Error output from flatpak binary - returned: When a flatpak command has been executed - type: str - sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n" -stdout: - description: Output from flatpak binary - returned: When a flatpak command has been executed - type: str - sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes, to_native - - -def add_remote(module, binary, name, flatpakrepo_url, method): - """Add a new remote.""" - global result - command = [binary, "remote-add", "--{0}".format(method), name, flatpakrepo_url] - _flatpak_command(module, module.check_mode, command) - result['changed'] = True - - -def remove_remote(module, binary, name, method): - """Remove an existing remote.""" - global result - command = [binary, "remote-delete", "--{0}".format(method), "--force", name] - _flatpak_command(module, module.check_mode, command) - result['changed'] = True - - -def remote_exists(module, binary, name, method): - """Check if the remote exists.""" - command = [binary, "remote-list", "-d", "--{0}".format(method)] - # The query operation for the remote needs to be run even in check mode - output = _flatpak_command(module, False, command) - for line in output.splitlines(): - listed_remote = line.split() - if len(listed_remote) == 0: - continue - if listed_remote[0] == to_native(name): - return True - return False - - -def _flatpak_command(module, noop, command): - global result - result['command'] = ' '.join(command) - if noop: - result['rc'] = 0 - return "" - - result['rc'], result['stdout'], result['stderr'] = module.run_command( - command, check_rc=True - ) - return result['stdout'] - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - flatpakrepo_url=dict(type='str'), - method=dict(type='str', default='system', - choices=['user', 'system']), - state=dict(type='str', default="present", - choices=['absent', 'present']), - executable=dict(type='str', default="flatpak") - ), - # This module supports check mode - supports_check_mode=True, - ) - - name = module.params['name'] - flatpakrepo_url = module.params['flatpakrepo_url'] - method = module.params['method'] - state = module.params['state'] - executable = module.params['executable'] - binary = module.get_bin_path(executable, None) - - if flatpakrepo_url is None: - flatpakrepo_url = '' - - global result - result = dict( - changed=False - ) - - # If the binary was not found, fail the operation - if not binary: - module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) - - remote_already_exists = remote_exists(module, binary, to_bytes(name), method) - - if state == 'present' and not remote_already_exists: - add_remote(module, binary, name, flatpakrepo_url, method) - elif state == 'absent' and remote_already_exists: - remove_remote(module, binary, name, method) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/opkg.py b/plugins/modules/packaging/os/opkg.py deleted file mode 100644 index f7bc9ae842..0000000000 --- a/plugins/modules/packaging/os/opkg.py +++ /dev/null @@ -1,199 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, Patrick Pelletier -# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: opkg -author: "Patrick Pelletier (@skinp)" -short_description: Package manager for OpenWrt -description: - - Manages OpenWrt packages -options: - name: - description: - - Name of package(s) to install/remove. - aliases: [pkg] - required: true - type: list - elements: str - state: - description: - - State of the package. - choices: [ 'present', 'absent', 'installed', 'removed' ] - default: present - type: str - force: - description: - - The C(opkg --force) parameter used. - choices: - - "" - - "depends" - - "maintainer" - - "reinstall" - - "overwrite" - - "downgrade" - - "space" - - "postinstall" - - "remove" - - "checksum" - - "removal-of-dependent-packages" - type: str - update_cache: - description: - - Update the package DB first. - default: false - type: bool -requirements: - - opkg - - python -''' -EXAMPLES = ''' -- name: Install foo - community.general.opkg: - name: foo - state: present - -- name: Update cache and install foo - community.general.opkg: - name: foo - state: present - update_cache: yes - -- name: Remove foo - community.general.opkg: - name: foo - state: absent - -- name: Remove foo and bar - community.general.opkg: - name: - - foo - - bar - state: absent - -- name: Install foo using overwrite option forcibly - community.general.opkg: - name: foo - state: present - force: overwrite -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import shlex_quote - - -def update_package_db(module, opkg_path): - """ Updates packages list. """ - - rc, out, err = module.run_command("%s update" % opkg_path) - - if rc != 0: - module.fail_json(msg="could not update package db") - - -def query_package(module, opkg_path, name, state="present"): - """ Returns whether a package is installed or not. """ - - if state == "present": - - rc, out, err = module.run_command("%s list-installed | grep -q \"^%s \"" % (shlex_quote(opkg_path), shlex_quote(name)), use_unsafe_shell=True) - if rc == 0: - return True - - return False - - -def remove_packages(module, opkg_path, packages): - """ Uninstalls one or more packages if installed. """ - - p = module.params - force = p["force"] - if force: - force = "--force-%s" % force - - remove_c = 0 - # Using a for loop in case of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove - if not query_package(module, opkg_path, package): - continue - - rc, out, err = module.run_command("%s remove %s %s" % (opkg_path, force, package)) - - if query_package(module, opkg_path, package): - module.fail_json(msg="failed to remove %s: %s" % (package, out)) - - remove_c += 1 - - if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) - - module.exit_json(changed=False, msg="package(s) already absent") - - -def install_packages(module, opkg_path, packages): - """ Installs one or more packages if not already installed. """ - - p = module.params - force = p["force"] - if force: - force = "--force-%s" % force - - install_c = 0 - - for package in packages: - if query_package(module, opkg_path, package): - continue - - rc, out, err = module.run_command("%s install %s %s" % (opkg_path, force, package)) - - if not query_package(module, opkg_path, package): - module.fail_json(msg="failed to install %s: %s" % (package, out)) - - install_c += 1 - - if install_c > 0: - module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) - - module.exit_json(changed=False, msg="package(s) already present") - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(aliases=["pkg"], required=True, type="list", elements="str"), - state=dict(default="present", choices=["present", "installed", "absent", "removed"]), - force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", - "checksum", "removal-of-dependent-packages"]), - update_cache=dict(default=False, type='bool'), - ) - ) - - opkg_path = module.get_bin_path('opkg', True, ['/bin']) - - p = module.params - - if p["update_cache"]: - update_package_db(module, opkg_path) - - pkgs = p["name"] - - if p["state"] in ["present", "installed"]: - install_packages(module, opkg_path, pkgs) - - elif p["state"] in ["absent", "removed"]: - remove_packages(module, opkg_path, pkgs) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/pacman_key.py b/plugins/modules/packaging/os/pacman_key.py deleted file mode 100644 index a40575b697..0000000000 --- a/plugins/modules/packaging/os/pacman_key.py +++ /dev/null @@ -1,314 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2019, George Rawlinson -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: pacman_key -author: -- George Rawlinson (@grawlinson) -version_added: "3.2.0" -short_description: Manage pacman's list of trusted keys -description: -- Add or remove gpg keys from the pacman keyring. -notes: -- Use full-length key ID (40 characters). -- Keys will be verified when using I(data), I(file), or I(url) unless I(verify) is overridden. -- Keys will be locally signed after being imported into the keyring. -- If the key ID exists in the keyring, the key will not be added unless I(force_update) is specified. -- I(data), I(file), I(url), and I(keyserver) are mutually exclusive. -- Supports C(check_mode). -requirements: -- gpg -- pacman-key -options: - id: - description: - - The 40 character identifier of the key. - - Including this allows check mode to correctly report the changed state. - - Do not specify a subkey ID, instead specify the primary key ID. - required: true - type: str - data: - description: - - The keyfile contents to add to the keyring. - - Must be of C(PGP PUBLIC KEY BLOCK) type. - type: str - file: - description: - - The path to a keyfile on the remote server to add to the keyring. - - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. - type: path - url: - description: - - The URL to retrieve keyfile from. - - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. - type: str - keyserver: - description: - - The keyserver used to retrieve key from. - type: str - verify: - description: - - Whether or not to verify the keyfile's key ID against specified key ID. - type: bool - default: true - force_update: - description: - - This forces the key to be updated if it already exists in the keyring. - type: bool - default: false - keyring: - description: - - The full path to the keyring folder on the remote server. - - If not specified, module will use pacman's default (C(/etc/pacman.d/gnupg)). - - Useful if the remote system requires an alternative gnupg directory. - type: path - default: /etc/pacman.d/gnupg - state: - description: - - Ensures that the key is present (added) or absent (revoked). - default: present - choices: [ absent, present ] - type: str -''' - -EXAMPLES = ''' -- name: Import a key via local file - community.general.pacman_key: - data: "{{ lookup('file', 'keyfile.asc') }}" - state: present - -- name: Import a key via remote file - community.general.pacman_key: - file: /tmp/keyfile.asc - state: present - -- name: Import a key via url - community.general.pacman_key: - id: 01234567890ABCDE01234567890ABCDE12345678 - url: https://domain.tld/keys/keyfile.asc - state: present - -- name: Import a key via keyserver - community.general.pacman_key: - id: 01234567890ABCDE01234567890ABCDE12345678 - keyserver: keyserver.domain.tld - -- name: Import a key into an alternative keyring - community.general.pacman_key: - id: 01234567890ABCDE01234567890ABCDE12345678 - file: /tmp/keyfile.asc - keyring: /etc/pacman.d/gnupg-alternative - -- name: Remove a key from the keyring - community.general.pacman_key: - id: 01234567890ABCDE01234567890ABCDE12345678 - state: absent -''' - -RETURN = r''' # ''' - -import os.path -import tempfile -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.common.text.converters import to_native - - -class PacmanKey(object): - def __init__(self, module): - self.module = module - # obtain binary paths for gpg & pacman-key - self.gpg = module.get_bin_path('gpg', required=True) - self.pacman_key = module.get_bin_path('pacman-key', required=True) - - # obtain module parameters - keyid = module.params['id'] - url = module.params['url'] - data = module.params['data'] - file = module.params['file'] - keyserver = module.params['keyserver'] - verify = module.params['verify'] - force_update = module.params['force_update'] - keyring = module.params['keyring'] - state = module.params['state'] - self.keylength = 40 - - # sanitise key ID & check if key exists in the keyring - keyid = self.sanitise_keyid(keyid) - key_present = self.key_in_keyring(keyring, keyid) - - # check mode - if module.check_mode: - if state == "present": - changed = (key_present and force_update) or not key_present - module.exit_json(changed=changed) - elif state == "absent": - if key_present: - module.exit_json(changed=True) - module.exit_json(changed=False) - - if state == "present": - if key_present and not force_update: - module.exit_json(changed=False) - - if data: - file = self.save_key(data) - self.add_key(keyring, file, keyid, verify) - module.exit_json(changed=True) - elif file: - self.add_key(keyring, file, keyid, verify) - module.exit_json(changed=True) - elif url: - data = self.fetch_key(url) - file = self.save_key(data) - self.add_key(keyring, file, keyid, verify) - module.exit_json(changed=True) - elif keyserver: - self.recv_key(keyring, keyid, keyserver) - module.exit_json(changed=True) - elif state == "absent": - if key_present: - self.remove_key(keyring, keyid) - module.exit_json(changed=True) - module.exit_json(changed=False) - - def is_hexadecimal(self, string): - """Check if a given string is valid hexadecimal""" - try: - int(string, 16) - except ValueError: - return False - return True - - def sanitise_keyid(self, keyid): - """Sanitise given key ID. - - Strips whitespace, uppercases all characters, and strips leading `0X`. - """ - sanitised_keyid = keyid.strip().upper().replace(' ', '').replace('0X', '') - if len(sanitised_keyid) != self.keylength: - self.module.fail_json(msg="key ID is not full-length: %s" % sanitised_keyid) - if not self.is_hexadecimal(sanitised_keyid): - self.module.fail_json(msg="key ID is not hexadecimal: %s" % sanitised_keyid) - return sanitised_keyid - - def fetch_key(self, url): - """Downloads a key from url""" - response, info = fetch_url(self.module, url) - if info['status'] != 200: - self.module.fail_json(msg="failed to fetch key at %s, error was %s" % (url, info['msg'])) - return to_native(response.read()) - - def recv_key(self, keyring, keyid, keyserver): - """Receives key via keyserver""" - cmd = [self.pacman_key, '--gpgdir', keyring, '--keyserver', keyserver, '--recv-keys', keyid] - self.module.run_command(cmd, check_rc=True) - self.lsign_key(keyring, keyid) - - def lsign_key(self, keyring, keyid): - """Locally sign key""" - cmd = [self.pacman_key, '--gpgdir', keyring] - self.module.run_command(cmd + ['--lsign-key', keyid], check_rc=True) - - def save_key(self, data): - "Saves key data to a temporary file" - tmpfd, tmpname = tempfile.mkstemp() - self.module.add_cleanup_file(tmpname) - tmpfile = os.fdopen(tmpfd, "w") - tmpfile.write(data) - tmpfile.close() - return tmpname - - def add_key(self, keyring, keyfile, keyid, verify): - """Add key to pacman's keyring""" - if verify: - self.verify_keyfile(keyfile, keyid) - cmd = [self.pacman_key, '--gpgdir', keyring, '--add', keyfile] - self.module.run_command(cmd, check_rc=True) - self.lsign_key(keyring, keyid) - - def remove_key(self, keyring, keyid): - """Remove key from pacman's keyring""" - cmd = [self.pacman_key, '--gpgdir', keyring, '--delete', keyid] - self.module.run_command(cmd, check_rc=True) - - def verify_keyfile(self, keyfile, keyid): - """Verify that keyfile matches the specified key ID""" - if keyfile is None: - self.module.fail_json(msg="expected a key, got none") - elif keyid is None: - self.module.fail_json(msg="expected a key ID, got none") - - rc, stdout, stderr = self.module.run_command( - [ - self.gpg, - '--with-colons', - '--with-fingerprint', - '--batch', - '--no-tty', - '--show-keys', - keyfile - ], - check_rc=True, - ) - - extracted_keyid = None - for line in stdout.splitlines(): - if line.startswith('fpr:'): - extracted_keyid = line.split(':')[9] - break - - if extracted_keyid != keyid: - self.module.fail_json(msg="key ID does not match. expected %s, got %s" % (keyid, extracted_keyid)) - - def key_in_keyring(self, keyring, keyid): - "Check if the key ID is in pacman's keyring" - rc, stdout, stderr = self.module.run_command( - [ - self.gpg, - '--with-colons', - '--batch', - '--no-tty', - '--no-default-keyring', - '--keyring=%s/pubring.gpg' % keyring, - '--list-keys', keyid - ], - check_rc=False, - ) - if rc != 0: - if stderr.find("No public key") >= 0: - return False - else: - self.module.fail_json(msg="gpg returned an error: %s" % stderr) - return True - - -def main(): - module = AnsibleModule( - argument_spec=dict( - id=dict(type='str', required=True), - data=dict(type='str'), - file=dict(type='path'), - url=dict(type='str'), - keyserver=dict(type='str'), - verify=dict(type='bool', default=True), - force_update=dict(type='bool', default=False), - keyring=dict(type='path', default='/etc/pacman.d/gnupg'), - state=dict(type='str', default='present', choices=['absent', 'present']), - ), - supports_check_mode=True, - mutually_exclusive=(('data', 'file', 'url', 'keyserver'),), - required_if=[('state', 'present', ('data', 'file', 'url', 'keyserver'), True)], - ) - PacmanKey(module) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/redhat_subscription.py b/plugins/modules/packaging/os/redhat_subscription.py deleted file mode 100644 index 7bb540b3f1..0000000000 --- a/plugins/modules/packaging/os/redhat_subscription.py +++ /dev/null @@ -1,947 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# James Laska (jlaska@redhat.com) -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: redhat_subscription -short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command -description: - - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command -author: "Barnaby Court (@barnabycourt)" -notes: - - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID. - - Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl), - I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and - I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf) - config file and default to None. -requirements: - - subscription-manager -options: - state: - description: - - whether to register and subscribe (C(present)), or unregister (C(absent)) a system - choices: [ "present", "absent" ] - default: "present" - type: str - username: - description: - - access.redhat.com or Sat6 username - type: str - password: - description: - - access.redhat.com or Sat6 password - type: str - server_hostname: - description: - - Specify an alternative Red Hat Subscription Management or Sat6 server - type: str - server_insecure: - description: - - Enable or disable https server certificate verification when connecting to C(server_hostname) - type: str - server_prefix: - description: - - Specify the prefix when registering to the Red Hat Subscription Management or Sat6 server. - type: str - version_added: 3.3.0 - server_port: - description: - - Specify the port when registering to the Red Hat Subscription Management or Sat6 server. - type: str - version_added: 3.3.0 - rhsm_baseurl: - description: - - Specify CDN baseurl - type: str - rhsm_repo_ca_cert: - description: - - Specify an alternative location for a CA certificate for CDN - type: str - server_proxy_hostname: - description: - - Specify an HTTP proxy hostname. - type: str - server_proxy_port: - description: - - Specify an HTTP proxy port. - type: str - server_proxy_user: - description: - - Specify a user for HTTP proxy with basic authentication - type: str - server_proxy_password: - description: - - Specify a password for HTTP proxy with basic authentication - type: str - auto_attach: - description: - - Upon successful registration, auto-consume available subscriptions - - Added in favor of deprecated autosubscribe in 2.5. - type: bool - aliases: [autosubscribe] - activationkey: - description: - - supply an activation key for use with registration - type: str - org_id: - description: - - Organization ID to use in conjunction with activationkey - type: str - environment: - description: - - Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello - type: str - pool: - description: - - | - Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if - possible, as it is much faster. Mutually exclusive with I(pool_ids). - default: '^$' - type: str - pool_ids: - description: - - | - Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster. - A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)), - or as a C(dict) with the pool ID as the key, and a quantity as the value (ex. - C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple - entitlements from a pool (the pool must support this). Mutually exclusive with I(pool). - default: [] - type: list - elements: raw - consumer_type: - description: - - The type of unit to register, defaults to system - type: str - consumer_name: - description: - - Name of the system to register, defaults to the hostname - type: str - consumer_id: - description: - - | - References an existing consumer ID to resume using a previous registration - for this system. If the system's identity certificate is lost or corrupted, - this option allows it to resume using its previous identity and subscriptions. - The default is to not specify a consumer ID so a new ID is created. - type: str - force_register: - description: - - Register the system even if it is already registered - type: bool - default: no - release: - description: - - Set a release version - type: str - syspurpose: - description: - - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json) - and synchronize these attributes with RHSM server. Syspurpose attributes help attach - the most appropriate subscriptions to the system automatically. When C(syspurpose.json) file - already contains some attributes, then new attributes overwrite existing attributes. - When some attribute is not listed in the new list of attributes, the existing - attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored. - type: dict - default: {} - suboptions: - usage: - description: Syspurpose attribute usage - type: str - role: - description: Syspurpose attribute role - type: str - service_level_agreement: - description: Syspurpose attribute service_level_agreement - type: str - addons: - description: Syspurpose attribute addons - type: list - elements: str - sync: - description: - - When this option is true, then syspurpose attributes are synchronized with - RHSM server immediately. When this option is false, then syspurpose attributes - will be synchronized with RHSM server by rhsmcertd daemon. - type: bool - default: no -''' - -EXAMPLES = ''' -- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content. - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - auto_attach: true - -- name: Same as above but subscribe to a specific pool by ID. - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - pool_ids: 0123456789abcdef0123456789abcdef - -- name: Register and subscribe to multiple pools. - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - pool_ids: - - 0123456789abcdef0123456789abcdef - - 1123456789abcdef0123456789abcdef - -- name: Same as above but consume multiple entitlements. - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - pool_ids: - - 0123456789abcdef0123456789abcdef: 2 - - 1123456789abcdef0123456789abcdef: 4 - -- name: Register and pull existing system data. - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - -- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization - community.general.redhat_subscription: - state: present - activationkey: 1-222333444 - org_id: 222333444 - pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$' - -- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription) - community.general.redhat_subscription: - state: present - activationkey: 1-222333444 - org_id: 222333444 - pool: '^Red Hat Enterprise Server$' - -- name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe. - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - environment: Library - auto_attach: true - -- name: Register as user (joe_user) with password (somepass) and a specific release - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - release: 7.4 - -- name: Register as user (joe_user) with password (somepass), set syspurpose attributes and synchronize them with server - community.general.redhat_subscription: - state: present - username: joe_user - password: somepass - auto_attach: true - syspurpose: - usage: "Production" - role: "Red Hat Enterprise Server" - service_level_agreement: "Premium" - addons: - - addon1 - - addon2 - sync: true -''' - -RETURN = ''' -subscribed_pool_ids: - description: List of pool IDs to which system is now subscribed - returned: success - type: complex - sample: { - "8a85f9815ab905d3015ab928c7005de4": "1" - } -''' - -from os.path import isfile -from os import unlink -import re -import shutil -import tempfile -import json - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.six.moves import configparser - - -SUBMAN_CMD = None - - -class RegistrationBase(object): - - REDHAT_REPO = "/etc/yum.repos.d/redhat.repo" - - def __init__(self, module, username=None, password=None): - self.module = module - self.username = username - self.password = password - - def configure(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def enable(self): - # Remove any existing redhat.repo - if isfile(self.REDHAT_REPO): - unlink(self.REDHAT_REPO) - - def register(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unregister(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unsubscribe(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def update_plugin_conf(self, plugin, enabled=True): - plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin - - if isfile(plugin_conf): - tmpfd, tmpfile = tempfile.mkstemp() - shutil.copy2(plugin_conf, tmpfile) - cfg = configparser.ConfigParser() - cfg.read([tmpfile]) - - if enabled: - cfg.set('main', 'enabled', '1') - else: - cfg.set('main', 'enabled', '0') - - fd = open(tmpfile, 'w+') - cfg.write(fd) - fd.close() - self.module.atomic_move(tmpfile, plugin_conf) - - def subscribe(self, **kwargs): - raise NotImplementedError("Must be implemented by a sub-class") - - -class Rhsm(RegistrationBase): - def __init__(self, module, username=None, password=None): - RegistrationBase.__init__(self, module, username, password) - self.module = module - - def enable(self): - ''' - Enable the system to receive updates from subscription-manager. - This involves updating affected yum plugins and removing any - conflicting yum repositories. - ''' - RegistrationBase.enable(self) - self.update_plugin_conf('rhnplugin', False) - self.update_plugin_conf('subscription-manager', True) - - def configure(self, **kwargs): - ''' - Configure the system as directed for registration with RHSM - Raises: - * Exception - if error occurs while running command - ''' - - args = [SUBMAN_CMD, 'config'] - - # Pass supplied **kwargs as parameters to subscription-manager. Ignore - # non-configuration parameters and replace '_' with '.'. For example, - # 'server_hostname' becomes '--server.hostname'. - options = [] - for k, v in sorted(kwargs.items()): - if re.search(r'^(server|rhsm)_', k) and v is not None: - options.append('--%s=%s' % (k.replace('_', '.', 1), v)) - - # When there is nothing to configure, then it is not necessary - # to run config command, because it only returns current - # content of current configuration file - if len(options) == 0: - return - - args.extend(options) - - self.module.run_command(args, check_rc=True) - - @property - def is_registered(self): - ''' - Determine whether the current system - Returns: - * Boolean - whether the current system is currently registered to - RHSM. - ''' - - args = [SUBMAN_CMD, 'identity'] - rc, stdout, stderr = self.module.run_command(args, check_rc=False) - if rc == 0: - return True - else: - return False - - def register(self, username, password, auto_attach, activationkey, org_id, - consumer_type, consumer_name, consumer_id, force_register, environment, - rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname, - server_proxy_port, server_proxy_user, server_proxy_password, release): - ''' - Register the current system to the provided RHSM or Sat6 server - Raises: - * Exception - if error occurs while running command - ''' - args = [SUBMAN_CMD, 'register'] - - # Generate command arguments - if force_register: - args.extend(['--force']) - - if rhsm_baseurl: - args.extend(['--baseurl', rhsm_baseurl]) - - if server_insecure: - args.extend(['--insecure']) - - if server_hostname: - args.extend(['--serverurl', server_hostname]) - - if org_id: - args.extend(['--org', org_id]) - - if server_proxy_hostname and server_proxy_port: - args.extend(['--proxy', server_proxy_hostname + ':' + server_proxy_port]) - - if server_proxy_user: - args.extend(['--proxyuser', server_proxy_user]) - - if server_proxy_password: - args.extend(['--proxypassword', server_proxy_password]) - - if activationkey: - args.extend(['--activationkey', activationkey]) - else: - if auto_attach: - args.append('--auto-attach') - if username: - args.extend(['--username', username]) - if password: - args.extend(['--password', password]) - if consumer_type: - args.extend(['--type', consumer_type]) - if consumer_name: - args.extend(['--name', consumer_name]) - if consumer_id: - args.extend(['--consumerid', consumer_id]) - if environment: - args.extend(['--environment', environment]) - - if release: - args.extend(['--release', release]) - - rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False) - - def unsubscribe(self, serials=None): - ''' - Unsubscribe a system from subscribed channels - Args: - serials(list or None): list of serials to unsubscribe. If - serials is none or an empty list, then - all subscribed channels will be removed. - Raises: - * Exception - if error occurs while running command - ''' - items = [] - if serials is not None and serials: - items = ["--serial=%s" % s for s in serials] - if serials is None: - items = ["--all"] - - if items: - args = [SUBMAN_CMD, 'unsubscribe'] + items - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - return serials - - def unregister(self): - ''' - Unregister a currently registered system - Raises: - * Exception - if error occurs while running command - ''' - args = [SUBMAN_CMD, 'unregister'] - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - self.update_plugin_conf('rhnplugin', False) - self.update_plugin_conf('subscription-manager', False) - - def subscribe(self, regexp): - ''' - Subscribe current system to available pools matching the specified - regular expression. It matches regexp against available pool ids first. - If any pool ids match, subscribe to those pools and return. - - If no pool ids match, then match regexp against available pool product - names. Note this can still easily match many many pools. Then subscribe - to those pools. - - Since a pool id is a more specific match, we only fallback to matching - against names if we didn't match pool ids. - - Raises: - * Exception - if error occurs while running command - ''' - # See https://github.com/ansible/ansible/issues/19466 - - # subscribe to pools whose pool id matches regexp (and only the pool id) - subscribed_pool_ids = self.subscribe_pool(regexp) - - # If we found any matches, we are done - # Don't attempt to match pools by product name - if subscribed_pool_ids: - return subscribed_pool_ids - - # We didn't match any pool ids. - # Now try subscribing to pools based on product name match - # Note: This can match lots of product names. - subscribed_by_product_pool_ids = self.subscribe_product(regexp) - if subscribed_by_product_pool_ids: - return subscribed_by_product_pool_ids - - # no matches - return [] - - def subscribe_by_pool_ids(self, pool_ids): - """ - Try to subscribe to the list of pool IDs - """ - available_pools = RhsmPools(self.module) - - available_pool_ids = [p.get_pool_id() for p in available_pools] - - for pool_id, quantity in sorted(pool_ids.items()): - if pool_id in available_pool_ids: - args = [SUBMAN_CMD, 'attach', '--pool', pool_id] - if quantity is not None: - args.extend(['--quantity', to_native(quantity)]) - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - else: - self.module.fail_json(msg='Pool ID: %s not in list of available pools' % pool_id) - return pool_ids - - def subscribe_pool(self, regexp): - ''' - Subscribe current system to available pools matching the specified - regular expression - Raises: - * Exception - if error occurs while running command - ''' - - # Available pools ready for subscription - available_pools = RhsmPools(self.module) - - subscribed_pool_ids = [] - for pool in available_pools.filter_pools(regexp): - pool.subscribe() - subscribed_pool_ids.append(pool.get_pool_id()) - return subscribed_pool_ids - - def subscribe_product(self, regexp): - ''' - Subscribe current system to available pools matching the specified - regular expression - Raises: - * Exception - if error occurs while running command - ''' - - # Available pools ready for subscription - available_pools = RhsmPools(self.module) - - subscribed_pool_ids = [] - for pool in available_pools.filter_products(regexp): - pool.subscribe() - subscribed_pool_ids.append(pool.get_pool_id()) - return subscribed_pool_ids - - def update_subscriptions(self, regexp): - changed = False - consumed_pools = RhsmPools(self.module, consumed=True) - pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)] - pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)]) - - serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep] - serials = self.unsubscribe(serials=serials_to_remove) - - subscribed_pool_ids = self.subscribe(regexp) - - if subscribed_pool_ids or serials: - changed = True - return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids, - 'unsubscribed_serials': serials} - - def update_subscriptions_by_pool_ids(self, pool_ids): - changed = False - consumed_pools = RhsmPools(self.module, consumed=True) - - existing_pools = {} - for p in consumed_pools: - existing_pools[p.get_pool_id()] = p.QuantityUsed - - serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed] - serials = self.unsubscribe(serials=serials_to_remove) - - missing_pools = {} - for pool_id, quantity in sorted(pool_ids.items()): - if existing_pools.get(pool_id, 0) != quantity: - missing_pools[pool_id] = quantity - - self.subscribe_by_pool_ids(missing_pools) - - if missing_pools or serials: - changed = True - return {'changed': changed, 'subscribed_pool_ids': list(missing_pools.keys()), - 'unsubscribed_serials': serials} - - def sync_syspurpose(self): - """ - Try to synchronize syspurpose attributes with server - """ - args = [SUBMAN_CMD, 'status'] - rc, stdout, stderr = self.module.run_command(args, check_rc=False) - - -class RhsmPool(object): - ''' - Convenience class for housing subscription information - ''' - - def __init__(self, module, **kwargs): - self.module = module - for k, v in kwargs.items(): - setattr(self, k, v) - - def __str__(self): - return str(self.__getattribute__('_name')) - - def get_pool_id(self): - return getattr(self, 'PoolId', getattr(self, 'PoolID')) - - def subscribe(self): - args = "subscription-manager attach --pool %s" % self.get_pool_id() - rc, stdout, stderr = self.module.run_command(args, check_rc=True) - if rc == 0: - return True - else: - return False - - -class RhsmPools(object): - """ - This class is used for manipulating pools subscriptions with RHSM - """ - - def __init__(self, module, consumed=False): - self.module = module - self.products = self._load_product_list(consumed) - - def __iter__(self): - return self.products.__iter__() - - def _load_product_list(self, consumed=False): - """ - Loads list of all available or consumed pools for system in data structure - - Args: - consumed(bool): if True list consumed pools, else list available pools (default False) - """ - args = "subscription-manager list" - if consumed: - args += " --consumed" - else: - args += " --available" - lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - rc, stdout, stderr = self.module.run_command(args, check_rc=True, environ_update=lang_env) - - products = [] - for line in stdout.split('\n'): - # Remove leading+trailing whitespace - line = line.strip() - # An empty line implies the end of a output group - if len(line) == 0: - continue - # If a colon ':' is found, parse - elif ':' in line: - (key, value) = line.split(':', 1) - key = key.strip().replace(" ", "") # To unify - value = value.strip() - if key in ['ProductName', 'SubscriptionName']: - # Remember the name for later processing - products.append(RhsmPool(self.module, _name=value, key=value)) - elif products: - # Associate value with most recently recorded product - products[-1].__setattr__(key, value) - # FIXME - log some warning? - # else: - # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) - return products - - def filter_pools(self, regexp='^$'): - ''' - Return a list of RhsmPools whose pool id matches the provided regular expression - ''' - r = re.compile(regexp) - for product in self.products: - if r.search(product.get_pool_id()): - yield product - - def filter_products(self, regexp='^$'): - ''' - Return a list of RhsmPools whose product name matches the provided regular expression - ''' - r = re.compile(regexp) - for product in self.products: - if r.search(product._name): - yield product - - -class SysPurpose(object): - """ - This class is used for reading and writing to syspurpose.json file - """ - - SYSPURPOSE_FILE_PATH = "/etc/rhsm/syspurpose/syspurpose.json" - - ALLOWED_ATTRIBUTES = ['role', 'usage', 'service_level_agreement', 'addons'] - - def __init__(self, path=None): - """ - Initialize class used for reading syspurpose json file - """ - self.path = path or self.SYSPURPOSE_FILE_PATH - - def update_syspurpose(self, new_syspurpose): - """ - Try to update current syspurpose with new attributes from new_syspurpose - """ - syspurpose = {} - syspurpose_changed = False - for key, value in new_syspurpose.items(): - if key in self.ALLOWED_ATTRIBUTES: - if value is not None: - syspurpose[key] = value - elif key == 'sync': - pass - else: - raise KeyError("Attribute: %s not in list of allowed attributes: %s" % - (key, self.ALLOWED_ATTRIBUTES)) - current_syspurpose = self._read_syspurpose() - if current_syspurpose != syspurpose: - syspurpose_changed = True - # Update current syspurpose with new values - current_syspurpose.update(syspurpose) - # When some key is not listed in new syspurpose, then delete it from current syspurpose - # and ignore custom attributes created by user (e.g. "foo": "bar") - for key in list(current_syspurpose): - if key in self.ALLOWED_ATTRIBUTES and key not in syspurpose: - del current_syspurpose[key] - self._write_syspurpose(current_syspurpose) - return syspurpose_changed - - def _write_syspurpose(self, new_syspurpose): - """ - This function tries to update current new_syspurpose attributes to - json file. - """ - with open(self.path, "w") as fp: - fp.write(json.dumps(new_syspurpose, indent=2, ensure_ascii=False, sort_keys=True)) - - def _read_syspurpose(self): - """ - Read current syspurpuse from json file. - """ - current_syspurpose = {} - try: - with open(self.path, "r") as fp: - content = fp.read() - except IOError: - pass - else: - current_syspurpose = json.loads(content) - return current_syspurpose - - -def main(): - - # Load RHSM configuration from file - rhsm = Rhsm(None) - - # Note: the default values for parameters are: - # 'type': 'str', 'default': None, 'required': False - # So there is no need to repeat these values for each parameter. - module = AnsibleModule( - argument_spec={ - 'state': {'default': 'present', 'choices': ['present', 'absent']}, - 'username': {}, - 'password': {'no_log': True}, - 'server_hostname': {}, - 'server_insecure': {}, - 'server_prefix': {}, - 'server_port': {}, - 'rhsm_baseurl': {}, - 'rhsm_repo_ca_cert': {}, - 'auto_attach': {'aliases': ['autosubscribe'], 'type': 'bool'}, - 'activationkey': {'no_log': True}, - 'org_id': {}, - 'environment': {}, - 'pool': {'default': '^$'}, - 'pool_ids': {'default': [], 'type': 'list', 'elements': 'raw'}, - 'consumer_type': {}, - 'consumer_name': {}, - 'consumer_id': {}, - 'force_register': {'default': False, 'type': 'bool'}, - 'server_proxy_hostname': {}, - 'server_proxy_port': {}, - 'server_proxy_user': {}, - 'server_proxy_password': {'no_log': True}, - 'release': {}, - 'syspurpose': { - 'type': 'dict', - 'options': { - 'role': {}, - 'usage': {}, - 'service_level_agreement': {}, - 'addons': {'type': 'list', 'elements': 'str'}, - 'sync': {'type': 'bool', 'default': False} - } - } - }, - required_together=[['username', 'password'], - ['server_proxy_hostname', 'server_proxy_port'], - ['server_proxy_user', 'server_proxy_password']], - mutually_exclusive=[['activationkey', 'username'], - ['activationkey', 'consumer_id'], - ['activationkey', 'environment'], - ['activationkey', 'auto_attach'], - ['pool', 'pool_ids']], - required_if=[['state', 'present', ['username', 'activationkey'], True]], - ) - - rhsm.module = module - state = module.params['state'] - username = module.params['username'] - password = module.params['password'] - server_hostname = module.params['server_hostname'] - server_insecure = module.params['server_insecure'] - server_prefix = module.params['server_prefix'] - server_port = module.params['server_port'] - rhsm_baseurl = module.params['rhsm_baseurl'] - rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert'] - auto_attach = module.params['auto_attach'] - activationkey = module.params['activationkey'] - org_id = module.params['org_id'] - if activationkey and not org_id: - module.fail_json(msg='org_id is required when using activationkey') - environment = module.params['environment'] - pool = module.params['pool'] - pool_ids = {} - for value in module.params['pool_ids']: - if isinstance(value, dict): - if len(value) != 1: - module.fail_json(msg='Unable to parse pool_ids option.') - pool_id, quantity = list(value.items())[0] - else: - pool_id, quantity = value, None - pool_ids[pool_id] = quantity - consumer_type = module.params["consumer_type"] - consumer_name = module.params["consumer_name"] - consumer_id = module.params["consumer_id"] - force_register = module.params["force_register"] - server_proxy_hostname = module.params['server_proxy_hostname'] - server_proxy_port = module.params['server_proxy_port'] - server_proxy_user = module.params['server_proxy_user'] - server_proxy_password = module.params['server_proxy_password'] - release = module.params['release'] - syspurpose = module.params['syspurpose'] - - global SUBMAN_CMD - SUBMAN_CMD = module.get_bin_path('subscription-manager', True) - - syspurpose_changed = False - if syspurpose is not None: - try: - syspurpose_changed = SysPurpose().update_syspurpose(syspurpose) - except Exception as err: - module.fail_json(msg="Failed to update syspurpose attributes: %s" % to_native(err)) - - # Ensure system is registered - if state == 'present': - - # Register system - if rhsm.is_registered and not force_register: - if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True: - try: - rhsm.sync_syspurpose() - except Exception as e: - module.fail_json(msg="Failed to synchronize syspurpose attributes: %s" % to_native(e)) - if pool != '^$' or pool_ids: - try: - if pool_ids: - result = rhsm.update_subscriptions_by_pool_ids(pool_ids) - else: - result = rhsm.update_subscriptions(pool) - except Exception as e: - module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e))) - else: - module.exit_json(**result) - else: - if syspurpose_changed is True: - module.exit_json(changed=True, msg="Syspurpose attributes changed.") - else: - module.exit_json(changed=False, msg="System already registered.") - else: - try: - rhsm.enable() - rhsm.configure(**module.params) - rhsm.register(username, password, auto_attach, activationkey, org_id, - consumer_type, consumer_name, consumer_id, force_register, - environment, rhsm_baseurl, server_insecure, server_hostname, - server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password, release) - if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True: - rhsm.sync_syspurpose() - if pool_ids: - subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids) - elif pool != '^$': - subscribed_pool_ids = rhsm.subscribe(pool) - else: - subscribed_pool_ids = [] - except Exception as e: - module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e))) - else: - module.exit_json(changed=True, - msg="System successfully registered to '%s'." % server_hostname, - subscribed_pool_ids=subscribed_pool_ids) - - # Ensure system is *not* registered - if state == 'absent': - if not rhsm.is_registered: - module.exit_json(changed=False, msg="System already unregistered.") - else: - try: - rhsm.unsubscribe() - rhsm.unregister() - except Exception as e: - module.fail_json(msg="Failed to unregister: %s" % to_native(e)) - else: - module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/rhn_channel.py b/plugins/modules/packaging/os/rhn_channel.py deleted file mode 100644 index e3a1ae3098..0000000000 --- a/plugins/modules/packaging/os/rhn_channel.py +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) Vincent Van de Kussen -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: rhn_channel -short_description: Adds or removes Red Hat software channels -description: - - Adds or removes Red Hat software channels. -author: -- Vincent Van der Kussen (@vincentvdk) -notes: - - This module fetches the system id from RHN. - - This module doesn't support I(check_mode). -options: - name: - description: - - Name of the software channel. - required: true - type: str - sysname: - description: - - Name of the system as it is known in RHN/Satellite. - required: true - type: str - state: - description: - - Whether the channel should be present or not, taking action if the state is different from what is stated. - default: present - choices: [ present, absent ] - type: str - url: - description: - - The full URL to the RHN/Satellite API. - required: true - type: str - user: - description: - - RHN/Satellite login. - required: true - type: str - password: - description: - - RHN/Satellite password. - aliases: [pwd] - required: true - type: str - validate_certs: - description: - - If C(False), SSL certificates will not be validated. - - This should only set to C(False) when used on self controlled sites - using self-signed certificates, and you are absolutely sure that nobody - can modify traffic between the module and the site. - type: bool - default: true - version_added: '0.2.0' -''' - -EXAMPLES = ''' -- name: Add a Red Hat software channel - community.general.rhn_channel: - name: rhel-x86_64-server-v2vwin-6 - sysname: server01 - url: https://rhn.redhat.com/rpc/api - user: rhnuser - password: guessme - delegate_to: localhost -''' - -import ssl -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client - - -def get_systemid(client, session, sysname): - systems = client.system.listUserSystems(session) - for system in systems: - if system.get('name') == sysname: - idres = system.get('id') - idd = int(idres) - return idd - - -def subscribe_channels(channelname, client, session, sysname, sys_id): - channels = base_channels(client, session, sys_id) - channels.append(channelname) - return client.system.setChildChannels(session, sys_id, channels) - - -def unsubscribe_channels(channelname, client, session, sysname, sys_id): - channels = base_channels(client, session, sys_id) - channels.remove(channelname) - return client.system.setChildChannels(session, sys_id, channels) - - -def base_channels(client, session, sys_id): - basechan = client.channel.software.listSystemChannels(session, sys_id) - try: - chans = [item['label'] for item in basechan] - except KeyError: - chans = [item['channel_label'] for item in basechan] - return chans - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['present', 'absent']), - name=dict(type='str', required=True), - sysname=dict(type='str', required=True), - url=dict(type='str', required=True), - user=dict(type='str', required=True), - password=dict(type='str', required=True, aliases=['pwd'], no_log=True), - validate_certs=dict(type='bool', default=True), - ) - ) - - state = module.params['state'] - channelname = module.params['name'] - systname = module.params['sysname'] - saturl = module.params['url'] - user = module.params['user'] - password = module.params['password'] - validate_certs = module.params['validate_certs'] - - ssl_context = None - if not validate_certs: - try: # Python 2.7.9 and newer - ssl_context = ssl.create_unverified_context() - except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default - ssl_context = ssl._create_unverified_context() - else: # Python 2.7.8 and older - ssl._create_default_https_context = ssl._create_unverified_https_context - - # initialize connection - if ssl_context: - client = xmlrpc_client.ServerProxy(saturl, context=ssl_context) - else: - client = xmlrpc_client.Server(saturl) - - try: - session = client.auth.login(user, password) - except Exception as e: - module.fail_json(msg="Unable to establish session with Satellite server: %s " % to_text(e)) - - if not session: - module.fail_json(msg="Failed to establish session with Satellite server.") - - # get systemid - try: - sys_id = get_systemid(client, session, systname) - except Exception as e: - module.fail_json(msg="Unable to get system id: %s " % to_text(e)) - - if not sys_id: - module.fail_json(msg="Failed to get system id.") - - # get channels for system - try: - chans = base_channels(client, session, sys_id) - except Exception as e: - module.fail_json(msg="Unable to get channel information: %s " % to_text(e)) - - try: - if state == 'present': - if channelname in chans: - module.exit_json(changed=False, msg="Channel %s already exists" % channelname) - else: - subscribe_channels(channelname, client, session, systname, sys_id) - module.exit_json(changed=True, msg="Channel %s added" % channelname) - - if state == 'absent': - if channelname not in chans: - module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname) - else: - unsubscribe_channels(channelname, client, session, systname, sys_id) - module.exit_json(changed=True, msg="Channel %s removed" % channelname) - except Exception as e: - module.fail_json(msg='Unable to %s channel (%s): %s' % ('add' if state == 'present' else 'remove', channelname, to_text(e))) - finally: - client.auth.logout(session) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/rhn_register.py b/plugins/modules/packaging/os/rhn_register.py deleted file mode 100644 index 08e9a99e9a..0000000000 --- a/plugins/modules/packaging/os/rhn_register.py +++ /dev/null @@ -1,447 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) James Laska -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: rhn_register -short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command -description: - - Manage registration to the Red Hat Network. -author: -- James Laska (@jlaska) -notes: - - This is for older Red Hat products. You probably want the M(community.general.redhat_subscription) module instead. - - In order to register a system, C(rhnreg_ks) requires either a username and password, or an activationkey. -requirements: - - rhnreg_ks - - either libxml2 or lxml -options: - state: - description: - - Whether to register (C(present)), or unregister (C(absent)) a system. - type: str - choices: [ absent, present ] - default: present - username: - description: - - Red Hat Network username. - type: str - password: - description: - - Red Hat Network password. - type: str - server_url: - description: - - Specify an alternative Red Hat Network server URL. - - The default is the current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date). - type: str - activationkey: - description: - - Supply an activation key for use with registration. - type: str - profilename: - description: - - Supply an profilename for use with registration. - type: str - force: - description: - - Force registration, even if system is already registered. - type: bool - default: no - version_added: 2.0.0 - ca_cert: - description: - - Supply a custom ssl CA certificate file for use with registration. - type: path - aliases: [ sslcacert ] - systemorgid: - description: - - Supply an organizational id for use with registration. - type: str - channels: - description: - - Optionally specify a list of channels to subscribe to upon successful registration. - type: list - elements: str - default: [] - enable_eus: - description: - - If C(no), extended update support will be requested. - type: bool - default: no - nopackages: - description: - - If C(yes), the registered node will not upload its installed packages information to Satellite server. - type: bool - default: no -''' - -EXAMPLES = r''' -- name: Unregister system from RHN - community.general.rhn_register: - state: absent - username: joe_user - password: somepass - -- name: Register as user with password and auto-subscribe to available content - community.general.rhn_register: - state: present - username: joe_user - password: somepass - -- name: Register with activationkey and enable extended update support - community.general.rhn_register: - state: present - activationkey: 1-222333444 - enable_eus: yes - -- name: Register with activationkey and set a profilename which may differ from the hostname - community.general.rhn_register: - state: present - activationkey: 1-222333444 - profilename: host.example.com.custom - -- name: Register as user with password against a satellite server - community.general.rhn_register: - state: present - username: joe_user - password: somepass - server_url: https://xmlrpc.my.satellite/XMLRPC - -- name: Register as user with password and enable channels - community.general.rhn_register: - state: present - username: joe_user - password: somepass - channels: rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1 - -- name: Force-register as user with password to ensure registration is current on server - community.general.rhn_register: - state: present - username: joe_user - password: somepass - server_url: https://xmlrpc.my.satellite/XMLRPC - force: yes -''' - -RETURN = r''' -# Default return values -''' - -import os -import sys - -# Attempt to import rhn client tools -sys.path.insert(0, '/usr/share/rhn') -try: - import up2date_client - import up2date_client.config - HAS_UP2DATE_CLIENT = True -except ImportError: - HAS_UP2DATE_CLIENT = False - -# INSERT REDHAT SNIPPETS -from ansible_collections.community.general.plugins.module_utils import redhat -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import urllib, xmlrpc_client - - -class Rhn(redhat.RegistrationBase): - - def __init__(self, module=None, username=None, password=None): - redhat.RegistrationBase.__init__(self, module, username, password) - self.config = self.load_config() - self.server = None - self.session = None - - def logout(self): - if self.session is not None: - self.server.auth.logout(self.session) - - def load_config(self): - ''' - Read configuration from /etc/sysconfig/rhn/up2date - ''' - if not HAS_UP2DATE_CLIENT: - return None - - config = up2date_client.config.initUp2dateConfig() - - return config - - @property - def server_url(self): - return self.config['serverURL'] - - @property - def hostname(self): - ''' - Return the non-xmlrpc RHN hostname. This is a convenience method - used for displaying a more readable RHN hostname. - - Returns: str - ''' - url = urllib.parse.urlparse(self.server_url) - return url[1].replace('xmlrpc.', '') - - @property - def systemid(self): - systemid = None - xpath_str = "//member[name='system_id']/value/string" - - if os.path.isfile(self.config['systemIdPath']): - fd = open(self.config['systemIdPath'], 'r') - xml_data = fd.read() - fd.close() - - # Ugh, xml parsing time ... - # First, try parsing with libxml2 ... - if systemid is None: - try: - import libxml2 - doc = libxml2.parseDoc(xml_data) - ctxt = doc.xpathNewContext() - systemid = ctxt.xpathEval(xpath_str)[0].content - doc.freeDoc() - ctxt.xpathFreeContext() - except ImportError: - pass - - # m-kay, let's try with lxml now ... - if systemid is None: - try: - from lxml import etree - root = etree.fromstring(xml_data) - systemid = root.xpath(xpath_str)[0].text - except ImportError: - raise Exception('"libxml2" or "lxml" is required for this module.') - - # Strip the 'ID-' prefix - if systemid is not None and systemid.startswith('ID-'): - systemid = systemid[3:] - - return int(systemid) - - @property - def is_registered(self): - ''' - Determine whether the current system is registered. - - Returns: True|False - ''' - return os.path.isfile(self.config['systemIdPath']) - - def configure_server_url(self, server_url): - ''' - Configure server_url for registration - ''' - - self.config.set('serverURL', server_url) - self.config.save() - - def enable(self): - ''' - Prepare the system for RHN registration. This includes ... - * enabling the rhnplugin yum plugin - * disabling the subscription-manager yum plugin - ''' - redhat.RegistrationBase.enable(self) - self.update_plugin_conf('rhnplugin', True) - self.update_plugin_conf('subscription-manager', False) - - def register(self, enable_eus=False, activationkey=None, profilename=None, sslcacert=None, systemorgid=None, nopackages=False): - ''' - Register system to RHN. If enable_eus=True, extended update - support will be requested. - ''' - register_cmd = ['/usr/sbin/rhnreg_ks', '--force'] - if self.username: - register_cmd.extend(['--username', self.username, '--password', self.password]) - if self.server_url: - register_cmd.extend(['--serverUrl', self.server_url]) - if enable_eus: - register_cmd.append('--use-eus-channel') - if nopackages: - register_cmd.append('--nopackages') - if activationkey is not None: - register_cmd.extend(['--activationkey', activationkey]) - if profilename is not None: - register_cmd.extend(['--profilename', profilename]) - if sslcacert is not None: - register_cmd.extend(['--sslCACert', sslcacert]) - if systemorgid is not None: - register_cmd.extend(['--systemorgid', systemorgid]) - rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True) - - def api(self, method, *args): - ''' - Convenience RPC wrapper - ''' - if self.server is None: - if self.hostname != 'rhn.redhat.com': - url = "https://%s/rpc/api" % self.hostname - else: - url = "https://xmlrpc.%s/rpc/api" % self.hostname - self.server = xmlrpc_client.ServerProxy(url) - self.session = self.server.auth.login(self.username, self.password) - - func = getattr(self.server, method) - return func(self.session, *args) - - def unregister(self): - ''' - Unregister a previously registered system - ''' - - # Initiate RPC connection - self.api('system.deleteSystems', [self.systemid]) - - # Remove systemid file - os.unlink(self.config['systemIdPath']) - - def subscribe(self, channels): - if not channels: - return - - if self._is_hosted(): - current_channels = self.api('channel.software.listSystemChannels', self.systemid) - new_channels = [item['channel_label'] for item in current_channels] - new_channels.extend(channels) - return self.api('channel.software.setSystemChannels', self.systemid, list(new_channels)) - - else: - current_channels = self.api('channel.software.listSystemChannels', self.systemid) - current_channels = [item['label'] for item in current_channels] - new_base = None - new_childs = [] - for ch in channels: - if ch in current_channels: - continue - if self.api('channel.software.getDetails', ch)['parent_channel_label'] == '': - new_base = ch - else: - if ch not in new_childs: - new_childs.append(ch) - out_base = 0 - out_childs = 0 - - if new_base: - out_base = self.api('system.setBaseChannel', self.systemid, new_base) - - if new_childs: - out_childs = self.api('system.setChildChannels', self.systemid, new_childs) - - return out_base and out_childs - - def _is_hosted(self): - ''' - Return True if we are running against Hosted (rhn.redhat.com) or - False otherwise (when running against Satellite or Spacewalk) - ''' - return 'rhn.redhat.com' in self.hostname - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - username=dict(type='str'), - password=dict(type='str', no_log=True), - server_url=dict(type='str'), - activationkey=dict(type='str', no_log=True), - profilename=dict(type='str'), - ca_cert=dict(type='path', aliases=['sslcacert']), - systemorgid=dict(type='str'), - enable_eus=dict(type='bool', default=False), - force=dict(type='bool', default=False), - nopackages=dict(type='bool', default=False), - channels=dict(type='list', elements='str', default=[]), - ), - # username/password is required for state=absent, or if channels is not empty - # (basically anything that uses self.api requires username/password) but it doesn't - # look like we can express that with required_if/required_together/mutually_exclusive - - # only username+password can be used for unregister - required_if=[['state', 'absent', ['username', 'password']]], - ) - - if not HAS_UP2DATE_CLIENT: - module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?") - - server_url = module.params['server_url'] - username = module.params['username'] - password = module.params['password'] - - state = module.params['state'] - force = module.params['force'] - activationkey = module.params['activationkey'] - profilename = module.params['profilename'] - sslcacert = module.params['ca_cert'] - systemorgid = module.params['systemorgid'] - channels = module.params['channels'] - enable_eus = module.params['enable_eus'] - nopackages = module.params['nopackages'] - - rhn = Rhn(module=module, username=username, password=password) - - # use the provided server url and persist it to the rhn config. - if server_url: - rhn.configure_server_url(server_url) - - if not rhn.server_url: - module.fail_json( - msg="No serverURL was found (from either the 'server_url' module arg or the config file option 'serverURL' in /etc/sysconfig/rhn/up2date)" - ) - - # Ensure system is registered - if state == 'present': - - # Check for missing parameters ... - if not (activationkey or rhn.username or rhn.password): - module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username, - rhn.password)) - if not activationkey and not (rhn.username and rhn.password): - module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password") - - # Register system - if rhn.is_registered and not force: - module.exit_json(changed=False, msg="System already registered.") - - try: - rhn.enable() - rhn.register(enable_eus, activationkey, profilename, sslcacert, systemorgid, nopackages) - rhn.subscribe(channels) - except Exception as exc: - module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, exc)) - finally: - rhn.logout() - - module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname) - - # Ensure system is *not* registered - if state == 'absent': - if not rhn.is_registered: - module.exit_json(changed=False, msg="System already unregistered.") - - if not (rhn.username and rhn.password): - module.fail_json(msg="Missing arguments, the system is currently registered and unregistration requires a username and password") - - try: - rhn.unregister() - except Exception as exc: - module.fail_json(msg="Failed to unregister: %s" % exc) - finally: - rhn.logout() - - module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/rpm_ostree_pkg.py b/plugins/modules/packaging/os/rpm_ostree_pkg.py deleted file mode 100644 index 38e2486ddc..0000000000 --- a/plugins/modules/packaging/os/rpm_ostree_pkg.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Dusty Mabe -# Copyright: (c) 2018, Ansible Project -# Copyright: (c) 2021, Abhijeet Kasurde -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: rpm_ostree_pkg -short_description: Install or uninstall overlay additional packages -version_added: "2.0.0" -description: - - Install or uninstall overlay additional packages using C(rpm-ostree) command. -options: - name: - description: - - Name of overlay package to install or remove. - required: true - type: list - elements: str - aliases: [ pkg ] - state: - description: - - State of the overlay package. - - C(present) simply ensures that a desired package is installed. - - C(absent) removes the specified package. - choices: [ 'absent', 'present' ] - default: 'present' - type: str -author: -- Dusty Mabe (@dustymabe) -- Abhijeet Kasurde (@Akasurde) -notes: -- Does not support C(check_mode). -''' - -EXAMPLES = r''' -- name: Install overlay package - community.general.rpm_ostree_pkg: - name: nfs-utils - state: present - -- name: Remove overlay package - community.general.rpm_ostree_pkg: - name: nfs-utils - state: absent -''' - -RETURN = r''' -rc: - description: Return code of rpm-ostree command. - returned: always - type: int - sample: 0 -changed: - description: State changes. - returned: always - type: bool - sample: True -action: - description: Action performed. - returned: always - type: str - sample: 'install' -packages: - description: A list of packages specified. - returned: always - type: list - sample: ['nfs-utils'] -stdout: - description: Stdout of rpm-ostree command. - returned: always - type: str - sample: 'Staging deployment...done\n...' -stderr: - description: Stderr of rpm-ostree command. - returned: always - type: str - sample: '' -cmd: - description: Full command used for performed action. - returned: always - type: str - sample: 'rpm-ostree uninstall --allow-inactive --idempotent --unchanged-exit-77 nfs-utils' -''' - -from ansible.module_utils.basic import AnsibleModule - - -class RpmOstreePkg: - def __init__(self, module): - self.module = module - self.params = module.params - self.state = module.params['state'] - - def ensure(self): - results = dict( - rc=0, - changed=False, - action='', - packages=[], - stdout='', - stderr='', - cmd='', - ) - - # Ensure rpm-ostree command exists - cmd = [self.module.get_bin_path('rpm-ostree', required=True)] - - # Decide action to perform - if self.state in ('present'): - results['action'] = 'install' - cmd.append('install') - elif self.state in ('absent'): - results['action'] = 'uninstall' - cmd.append('uninstall') - - # Additional parameters - cmd.extend(['--allow-inactive', '--idempotent', '--unchanged-exit-77']) - for pkg in self.params['name']: - cmd.append(pkg) - results['packages'].append(pkg) - - rc, out, err = self.module.run_command(cmd) - - results.update(dict( - rc=rc, - cmd=' '.join(cmd), - stdout=out, - stderr=err, - )) - - # A few possible options: - # - rc=0 - succeeded in making a change - # - rc=77 - no change was needed - # - rc=? - error - if rc == 0: - results['changed'] = True - elif rc == 77: - results['changed'] = False - results['rc'] = 0 - else: - self.module.fail_json(msg='non-zero return code', **results) - - self.module.exit_json(**results) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict( - default="present", - choices=['absent', 'present'] - ), - name=dict( - aliases=["pkg"], - required=True, - type='list', - elements='str', - ), - ), - ) - - rpm_ostree_pkg = RpmOstreePkg(module) - rpm_ostree_pkg.ensure() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/snap.py b/plugins/modules/packaging/os/snap.py deleted file mode 100644 index 9ac56d09bd..0000000000 --- a/plugins/modules/packaging/os/snap.py +++ /dev/null @@ -1,406 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Alexei Znamensky (russoz) -# Copyright: (c) 2021, Marcus Rickert -# Copyright: (c) 2018, Stanislas Lange (angristan) -# Copyright: (c) 2018, Victor Carceler - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: snap -short_description: Manages snaps -description: - - "Manages snaps packages." -options: - name: - description: - - Name of the snaps. - required: true - type: list - elements: str - state: - description: - - Desired state of the package. - required: false - default: present - choices: [ absent, present, enabled, disabled ] - type: str - classic: - description: - - Confinement policy. The classic confinement allows a snap to have - the same level of access to the system as "classic" packages, - like those managed by APT. This option corresponds to the --classic argument. - This option can only be specified if there is a single snap in the task. - type: bool - required: false - default: no - channel: - description: - - Define which release of a snap is installed and tracked for updates. - This option can only be specified if there is a single snap in the task. - type: str - required: false - default: stable - options: - description: - - Set options with pattern C(key=value) or C(snap:key=value). If a snap name is given, the option will be applied - to that snap only. If the snap name is omitted, the options will be applied to all snaps listed in I(name). Options will - only be applied to active snaps. - required: false - type: list - elements: str - version_added: 4.4.0 - -author: - - Victor Carceler (@vcarceler) - - Stanislas Lange (@angristan) - -seealso: - - module: community.general.snap_alias -''' - -EXAMPLES = ''' -# Install "foo" and "bar" snap -- name: Install foo - community.general.snap: - name: - - foo - - bar - -# Install "foo" snap with options par1=A and par2=B -- name: Install "foo" with options - community.general.snap: - name: - - foo - options: - - par1=A - - par2=B - -# Install "foo" and "bar" snaps with common option com=A and specific options fooPar=X and barPar=Y -- name: Install "foo" and "bar" with options - community.general.snap: - name: - - foo - - bar - options: - - com=A - - foo:fooPar=X - - bar:barPar=Y - -# Remove "foo" snap -- name: Remove foo - community.general.snap: - name: foo - state: absent - -# Install a snap with classic confinement -- name: Install "foo" with option --classic - community.general.snap: - name: foo - classic: yes - -# Install a snap with from a specific channel -- name: Install "foo" with option --channel=latest/edge - community.general.snap: - name: foo - channel: latest/edge -''' - -RETURN = ''' -classic: - description: Whether or not the snaps were installed with the classic confinement - type: bool - returned: When snaps are installed -channel: - description: The channel the snaps were installed from - type: str - returned: When snaps are installed -cmd: - description: The command that was executed on the host - type: str - returned: When changed is true -snaps_installed: - description: The list of actually installed snaps - type: list - returned: When any snaps have been installed -snaps_removed: - description: The list of actually removed snaps - type: list - returned: When any snaps have been removed -options_changed: - description: The list of options set/changed in format C(snap:key=value). - type: list - returned: When any options have been changed/set - version_added: 4.4.0 -''' - -import re -import json -import numbers - -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.module_helper import ( - CmdStateModuleHelper, ArgFormat -) - - -__state_map = dict( - present='install', - absent='remove', - enabled='enable', - disabled='disable', - info='info', # not public - list='list', # not public - set='set', # not public - get='get', # not public -) - - -def _state_map(value): - return [__state_map[value]] - - -class Snap(CmdStateModuleHelper): - __disable_re = re.compile(r'(?:\S+\s+){5}(?P\S+)') - __set_param_re = re.compile(r'(?P\S+:)?(?P\S+)=(?P\S+)') - module = dict( - argument_spec={ - 'name': dict(type='list', elements='str', required=True), - 'state': dict(type='str', default='present', - choices=['absent', 'present', 'enabled', 'disabled']), - 'classic': dict(type='bool', default=False), - 'channel': dict(type='str', default='stable'), - 'options': dict(type='list', elements='str'), - }, - supports_check_mode=True, - ) - command = "snap" - command_args_formats = dict( - actionable_snaps=dict(fmt=lambda v: v), - state=dict(fmt=_state_map), - classic=dict(fmt="--classic", style=ArgFormat.BOOLEAN), - channel=dict(fmt=lambda v: [] if v == 'stable' else ['--channel', '{0}'.format(v)]), - options=dict(fmt=list), - json_format=dict(fmt="-d", style=ArgFormat.BOOLEAN), - ) - check_rc = False - - @staticmethod - def _first_non_zero(a): - for elem in a: - if elem != 0: - return elem - - return 0 - - def _run_multiple_commands(self, commands): - outputs = [(c,) + self.run_command(params=c) for c in commands] - results = ([], [], [], []) - for output in outputs: - for i in range(4): - results[i].append(output[i]) - - return [ - '; '.join([to_native(x) for x in results[0]]), - self._first_non_zero(results[1]), - '\n'.join(results[2]), - '\n'.join(results[3]), - ] - - def convert_json_subtree_to_map(self, json_subtree, prefix=None): - option_map = {} - - if not isinstance(json_subtree, dict): - self.do_raise("Non-dict non-leaf element encountered while parsing option map. " - "The output format of 'snap set' may have changed. Aborting!") - - for key, value in json_subtree.items(): - full_key = key if prefix is None else prefix + "." + key - - if isinstance(value, (str, float, bool, numbers.Integral)): - option_map[full_key] = str(value) - - else: - option_map.update(self.convert_json_subtree_to_map(json_subtree=value, prefix=full_key)) - - return option_map - - def convert_json_to_map(self, json_string): - json_object = json.loads(json_string) - return self.convert_json_subtree_to_map(json_object) - - def retrieve_option_map(self, snap_name): - params = [{'state': 'get'}, {'name': snap_name}, {'json_format': True}] - rc, out, err = self.run_command(params=params) - - if rc != 0: - return {} - - result = out.splitlines() - - if "has no configuration" in result[0]: - return {} - - try: - option_map = self.convert_json_to_map(out) - - except Exception as e: - self.do_raise( - msg="Parsing option map returned by 'snap get {0}' triggers exception '{1}', output:\n'{2}'".format(snap_name, str(e), out)) - - return option_map - - def is_snap_installed(self, snap_name): - return 0 == self.run_command(params=[{'state': 'list'}, {'name': snap_name}])[0] - - def is_snap_enabled(self, snap_name): - rc, out, err = self.run_command(params=[{'state': 'list'}, {'name': snap_name}]) - if rc != 0: - return None - result = out.splitlines()[1] - match = self.__disable_re.match(result) - if not match: - self.do_raise(msg="Unable to parse 'snap list {0}' output:\n{1}".format(snap_name, out)) - notes = match.group('notes') - return "disabled" not in notes.split(',') - - def process_actionable_snaps(self, actionable_snaps): - self.changed = True - self.vars.snaps_installed = actionable_snaps - - if self.module.check_mode: - return - - params = ['state', 'classic', 'channel'] # get base cmd parts - has_one_pkg_params = bool(self.vars.classic) or self.vars.channel != 'stable' - has_multiple_snaps = len(actionable_snaps) > 1 - - if has_one_pkg_params and has_multiple_snaps: - commands = [params + [{'actionable_snaps': [s]}] for s in actionable_snaps] - else: - commands = [params + [{'actionable_snaps': actionable_snaps}]] - self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) - - if rc == 0: - return - - classic_snap_pattern = re.compile(r'^error: This revision of snap "(?P\w+)"' - r' was published using classic confinement') - match = classic_snap_pattern.match(err) - if match: - err_pkg = match.group('package_name') - msg = "Couldn't install {name} because it requires classic confinement".format(name=err_pkg) - else: - msg = "Ooops! Snap installation failed while executing '{cmd}', please examine logs and " \ - "error output for more details.".format(cmd=self.vars.cmd) - self.do_raise(msg=msg) - - def state_present(self): - - self.vars.meta('classic').set(output=True) - self.vars.meta('channel').set(output=True) - actionable_snaps = [s for s in self.vars.name if not self.is_snap_installed(s)] - - if actionable_snaps: - self.process_actionable_snaps(actionable_snaps) - - self.set_options() - - def set_options(self): - if self.vars.options is None: - return - - actionable_snaps = [s for s in self.vars.name if self.is_snap_installed(s)] - overall_options_changed = [] - - for snap_name in actionable_snaps: - option_map = self.retrieve_option_map(snap_name=snap_name) - - options_changed = [] - - for option_string in self.vars.options: - match = self.__set_param_re.match(option_string) - - if not match: - msg = "Cannot parse set option '{option_string}'".format(option_string=option_string) - self.do_raise(msg) - - snap_prefix = match.group("snap_prefix") - selected_snap_name = snap_prefix[:-1] if snap_prefix else None - - if selected_snap_name is not None and selected_snap_name not in self.vars.name: - msg = "Snap option '{option_string}' refers to snap which is not in the list of snap names".format(option_string=option_string) - self.do_raise(msg) - - if selected_snap_name is None or (snap_name is not None and snap_name == selected_snap_name): - key = match.group("key") - value = match.group("value") - - if key not in option_map or key in option_map and option_map[key] != value: - option_without_prefix = key + "=" + value - option_with_prefix = option_string if selected_snap_name is not None else snap_name + ":" + option_string - options_changed.append(option_without_prefix) - overall_options_changed.append(option_with_prefix) - - if options_changed: - self.changed = True - - if not self.module.check_mode: - params = [{'state': 'set'}, {'name': snap_name}, {'options': options_changed}] - - rc, out, err = self.run_command(params=params) - - if rc != 0: - if 'has no "configure" hook' in err: - msg = "Snap '{snap}' does not have any configurable options".format(snap=snap_name) - self.do_raise(msg) - - msg = "Cannot set options '{options}' for snap '{snap}': error={error}".format( - options=" ".join(options_changed), snap=snap_name, error=err) - self.do_raise(msg) - - if overall_options_changed: - self.vars.options_changed = overall_options_changed - - def _generic_state_action(self, actionable_func, actionable_var, params=None): - actionable_snaps = [s for s in self.vars.name if actionable_func(s)] - if not actionable_snaps: - return - self.changed = True - self.vars[actionable_var] = actionable_snaps - if self.module.check_mode: - return - if params is None: - params = ['state'] - commands = [params + [{'actionable_snaps': actionable_snaps}]] - self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) - if rc == 0: - return - msg = "Ooops! Snap operation failed while executing '{cmd}', please examine logs and " \ - "error output for more details.".format(cmd=self.vars.cmd) - self.do_raise(msg=msg) - - def state_absent(self): - self._generic_state_action(self.is_snap_installed, "snaps_removed", ['classic', 'channel', 'state']) - - def state_enabled(self): - self._generic_state_action(lambda s: not self.is_snap_enabled(s), "snaps_enabled", ['classic', 'channel', 'state']) - - def state_disabled(self): - self._generic_state_action(self.is_snap_enabled, "snaps_disabled", ['classic', 'channel', 'state']) - - -def main(): - snap = Snap() - snap.run() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/packaging/os/snap_alias.py b/plugins/modules/packaging/os/snap_alias.py deleted file mode 100644 index 036be12004..0000000000 --- a/plugins/modules/packaging/os/snap_alias.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2021, Alexei Znamensky (russoz) -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: snap_alias -short_description: Manages snap aliases -version_added: 4.0.0 -description: - - "Manages snaps aliases." -options: - state: - description: - - Desired state of the alias. - type: str - choices: [ absent, present ] - default: present - name: - description: - - Name of the snap. - type: str - alias: - description: - - Aliases to be created or removed. - type: list - elements: str - aliases: [aliases] - -author: - - Alexei Znamensky (@russoz) - -seealso: - - module: community.general.snap -''' - -EXAMPLES = ''' -# Install "foo" and "bar" snap -- name: Create snap alias - community.general.snap_alias: - name: hello-world - alias: hw - -- name: Create multiple aliases - community.general.snap_alias: - name: hello-world - aliases: - - hw - - hw2 - - hw3 - state: present # optional - -- name: Remove one specific aliases - community.general.snap_alias: - name: hw - state: absent - -- name: Remove all aliases for snap - community.general.snap_alias: - name: hello-world - state: absent -''' - -RETURN = ''' -snap_aliases: - description: The snap aliases after execution. If called in check mode, then the list represents the state before execution. - type: list - elements: str - returned: always -''' - - -import re - -from ansible_collections.community.general.plugins.module_utils.module_helper import ( - CmdStateModuleHelper -) - - -_state_map = dict( - present='alias', - absent='unalias', - info='aliases', -) - - -class SnapAlias(CmdStateModuleHelper): - _RE_ALIAS_LIST = re.compile(r"^(?P[\w-]+)\s+(?P[\w-]+)\s+.*$") - - module = dict( - argument_spec={ - 'state': dict(type='str', choices=['absent', 'present'], default='present'), - 'name': dict(type='str'), - 'alias': dict(type='list', elements='str', aliases=['aliases']), - }, - required_if=[ - ('state', 'present', ['name', 'alias']), - ('state', 'absent', ['name', 'alias'], True), - ], - supports_check_mode=True, - ) - command = "snap" - command_args_formats = dict( - _alias=dict(fmt=lambda v: [v]), - state=dict(fmt=lambda v: [_state_map[v]]), - ) - check_rc = False - - def _aliases(self): - n = self.vars.name - return {n: self._get_aliases_for(n)} if n else self._get_aliases() - - def __init_module__(self): - self.vars.set("snap_aliases", self._aliases(), change=True, diff=True) - - def __quit_module__(self): - self.vars.snap_aliases = self._aliases() - - def _get_aliases(self): - def process_get_aliases(rc, out, err): - if err: - return {} - aliases = [self._RE_ALIAS_LIST.match(a.strip()) for a in out.splitlines()[1:]] - snap_alias_list = [(entry.group("snap"), entry.group("alias")) for entry in aliases] - results = {} - for snap, alias in snap_alias_list: - results[snap] = results.get(snap, []) + [alias] - return results - - return self.run_command(params=[{'state': 'info'}, 'name'], check_rc=True, - publish_rc=False, publish_out=False, publish_err=False, publish_cmd=False, - process_output=process_get_aliases) - - def _get_aliases_for(self, name): - return self._get_aliases().get(name, []) - - def _has_alias(self, name=None, alias=None): - if name: - if name not in self.vars.snap_aliases: - return False - if alias is None: - return bool(self.vars.snap_aliases[name]) - return alias in self.vars.snap_aliases[name] - - return any(alias in aliases for aliases in self.vars.snap_aliases.values()) - - def state_present(self): - for alias in self.vars.alias: - if not self._has_alias(self.vars.name, alias): - self.changed = True - if not self.module.check_mode: - self.run_command(params=['state', 'name', {'_alias': alias}]) - - def state_absent(self): - if not self.vars.alias: - if self._has_alias(self.vars.name): - self.changed = True - if not self.module.check_mode: - self.run_command(params=['state', 'name']) - else: - for alias in self.vars.alias: - if self._has_alias(self.vars.name, alias): - self.changed = True - if not self.module.check_mode: - self.run_command(params=['state', {'_alias': alias}]) - - -def main(): - SnapAlias.execute() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/packet/packet_device.py b/plugins/modules/packet_device.py similarity index 78% rename from plugins/modules/cloud/packet/packet_device.py rename to plugins/modules/packet_device.py index abafa51870..575d377b56 100644 --- a/plugins/modules/cloud/packet/packet_device.py +++ b/plugins/modules/packet_device.py @@ -1,42 +1,46 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2016, Tomas Karasek -# (c) 2016, Matt Baldwin -# (c) 2016, Thibaud Morel l'Horset -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Tomas Karasek +# Copyright (c) 2016, Matt Baldwin +# Copyright (c) 2016, Thibaud Morel l'Horset +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_device -short_description: Manage a bare metal server in the Packet Host. +short_description: Manage a bare metal server in the Packet Host description: - - Manage a bare metal server in the Packet Host (a "device" in the API terms). - - When the machine is created it can optionally wait for public IP address, or for active state. - - This module has a dependency on packet >= 1.0. - - API is documented at U(https://www.packet.net/developers/api/devices). - - + - Manage a bare metal server in the Packet Host (a "device" in the API terms). + - When the machine is created it can optionally wait for public IP address, or for active state. + - This module has a dependency on packet >= 1.0. + - API is documented at U(https://www.packet.net/developers/api/devices). author: - - Tomas Karasek (@t0mk) - - Matt Baldwin (@baldwinSPC) - - Thibaud Morel l'Horset (@teebes) + - Tomas Karasek (@t0mk) + - Matt Baldwin (@baldwinSPC) + - Thibaud Morel l'Horset (@teebes) + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: none + diff_mode: + support: none options: auth_token: description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). + - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). type: str count: description: - - The number of devices to create. Count number can be included in hostname via the %d string formatter. + - The number of devices to create. Count number can be included in hostname using the C(%d) string formatter. default: 1 type: int @@ -73,8 +77,8 @@ options: hostnames: description: - A hostname of a device, or a list of hostnames. - - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count). - - If only one hostname, it might be expanded to list if I(count)>1. + - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from O(count). + - If only one hostname, it might be expanded to list if O(count)>1. aliases: [name] type: list elements: str @@ -105,29 +109,32 @@ options: state: description: - Desired state of the device. - - If set to C(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns. - - If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout). + - If set to V(present) (the default), the module call returns immediately after the device-creating HTTP request successfully + returns. + - If set to V(active), the module call blocks until all the specified devices are in state active due to the Packet + API, or until O(wait_timeout). choices: [present, absent, active, inactive, rebooted] default: present type: str user_data: description: - - Userdata blob made available to the machine + - Userdata blob made available to the machine. type: str wait_for_public_IPv: description: - Whether to wait for the instance to be assigned a public IPv4/IPv6 address. - - If set to 4, it will wait until IPv4 is assigned to the instance. - - If set to 6, wait until public IPv6 is assigned to the instance. - choices: [4,6] + - If set to V(4), it waits until IPv4 is assigned to the instance. + - If set to V(6), it waits until public IPv6 is assigned to the instance. + choices: [4, 6] type: int wait_timeout: description: - - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state). - - If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice. + - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the V(active) state. + - If O(wait_for_public_IPv) is set and O(state=active), the module waits for both events consequently, applying the + timeout twice. default: 900 type: int @@ -136,6 +143,7 @@ options: - URL of custom iPXE script for provisioning. - More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe). type: str + default: '' always_pxe: description: @@ -146,15 +154,11 @@ options: requirements: - - "packet-python >= 1.35" + - "packet-python >= 1.35" +""" -notes: - - Doesn't support check mode. - -''' - -EXAMPLES = ''' -# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. +EXAMPLES = r""" +# All the examples assume that you have your Packet API token in environment variable PACKET_API_TOKEN. # You can also pass it to the auth_token parameter of the module instead. # Creating devices @@ -162,79 +166,79 @@ EXAMPLES = ''' - name: Create 1 device hosts: localhost tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: myserver - tags: ci-xyz - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + tags: ci-xyz + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 -# Create the same device and wait until it is in state "active", (when it's +# Create the same device and wait until it is in state "active", (when it is # ready for other API operations). Fail if the device is not "active" in # 10 minutes. - name: Create device and wait up to 10 minutes for active state hosts: localhost tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: myserver - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 - state: active - wait_timeout: 600 + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + state: active + wait_timeout: 600 - name: Create 3 ubuntu devices called server-01, server-02 and server-03 hosts: localhost tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: server-%02d - count: 3 - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: server-%02d + count: 3 + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 - name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH hosts: localhost tasks: - - name: Create 3 devices and register their facts - community.general.packet_device: - hostnames: [coreos-one, coreos-two, coreos-three] - operating_system: coreos_stable - plan: baremetal_0 - facility: ewr1 - locked: true - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - wait_for_public_IPv: 4 - user_data: | - #cloud-config - ssh_authorized_keys: - - {{ lookup('file', 'my_packet_sshkey') }} - coreos: - etcd: - discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3 - addr: $private_ipv4:4001 - peer-addr: $private_ipv4:7001 - fleet: - public-ip: $private_ipv4 - units: - - name: etcd.service - command: start - - name: fleet.service - command: start - register: newhosts + - name: Create 3 devices and register their facts + community.general.packet_device: + hostnames: [coreos-one, coreos-two, coreos-three] + operating_system: coreos_stable + plan: baremetal_0 + facility: ewr1 + locked: true + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + wait_for_public_IPv: 4 + user_data: | + #cloud-config + ssh_authorized_keys: + - {{ lookup('file', 'my_packet_sshkey') }} + coreos: + etcd: + discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3 + addr: $private_ipv4:4001 + peer-addr: $private_ipv4:7001 + fleet: + public-ip: $private_ipv4 + units: + - name: etcd.service + command: start + - name: fleet.service + command: start + register: newhosts - - name: Wait for ssh - ansible.builtin.wait_for: - delay: 1 - host: "{{ item.public_ipv4 }}" - port: 22 - state: started - timeout: 500 - with_items: "{{ newhosts.devices }}" + - name: Wait for ssh + ansible.builtin.wait_for: + delay: 1 + host: "{{ item.public_ipv4 }}" + port: 22 + state: started + timeout: 500 + with_items: "{{ newhosts.devices }}" # Other states of devices @@ -242,31 +246,30 @@ EXAMPLES = ''' - name: Remove 3 devices by uuid hosts: localhost tasks: - - community.general.packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - state: absent - device_ids: - - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8 - - 2eb4faf8-a638-4ac7-8f47-86fe514c3043 - - 6bb4faf8-a638-4ac7-8f47-86fe514c301f -''' - -RETURN = ''' -changed: - description: True if a device was altered in any way (created, modified or removed) - type: bool - sample: True - returned: success + - community.general.packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + state: absent + device_ids: + - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8 + - 2eb4faf8-a638-4ac7-8f47-86fe514c3043 + - 6bb4faf8-a638-4ac7-8f47-86fe514c301f +""" +RETURN = r""" devices: - description: Information about each device that was processed - type: list - sample: '[{"hostname": "my-server.com", "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7", - "public_ipv4": "147.229.15.12", "private-ipv4": "10.0.15.12", - "tags": [], "locked": false, "state": "provisioning", - "public_ipv6": ""2604:1380:2:5200::3"}]' - returned: success -''' # NOQA + description: Information about each device that was processed. + type: list + sample: + - "hostname": "my-server.com" + "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7" + "public_ipv4": "147.229.15.12" + "private-ipv4": "10.0.15.12" + "tags": [] + "locked": false + "state": "provisioning" + "public_ipv6": "2604:1380:2:5200::3" + returned: success +""" import os @@ -284,8 +287,6 @@ try: except ImportError: HAS_PACKET_SDK = False -from ansible.module_utils.basic import AnsibleModule - NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]') HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE) @@ -411,12 +412,12 @@ def get_hostname_list(module): # at this point, hostnames is a list hostnames = [h.strip() for h in hostnames] - if (len(hostnames) > 1) and (count > 1): + if len(hostnames) > 1 and count > 1: _msg = ("If you set count>1, you should only specify one hostname " "with the %d formatter, not a list of hostnames.") raise Exception(_msg) - if (len(hostnames) == 1) and (count > 0): + if len(hostnames) == 1 and count > 0: hostname_spec = hostnames[0] count_range = range(count_offset, count_offset + count) if re.search(r"%\d{0,2}d", hostname_spec): diff --git a/plugins/modules/cloud/packet/packet_ip_subnet.py b/plugins/modules/packet_ip_subnet.py similarity index 78% rename from plugins/modules/cloud/packet/packet_ip_subnet.py rename to plugins/modules/packet_ip_subnet.py index 718de36f22..e3b0204158 100644 --- a/plugins/modules/cloud/packet/packet_ip_subnet.py +++ b/plugins/modules/packet_ip_subnet.py @@ -1,48 +1,54 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Nurfet Becirevic -# Copyright: (c) 2017, Tomas Karasek -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Nurfet Becirevic +# Copyright (c) 2017, Tomas Karasek +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_ip_subnet -short_description: Assign IP subnet to a bare metal server. +short_description: Assign IP subnet to a bare metal server description: - - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host. - - IPv4 subnets must come from already reserved block. - - IPv6 subnets must come from publicly routable /56 block from your project. - - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation. - + - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host. + - IPv4 subnets must come from already reserved block. + - IPv6 subnets must come from publicly routable /56 block from your project. + - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation. version_added: '0.2.0' author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: auth_token: description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). + - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). type: str hostname: description: - A hostname of a device to/from which to assign/remove a subnet. - required: False + required: false type: str device_id: description: - UUID of a device to/from which to assign/remove a subnet. - required: False + required: false type: str project_id: @@ -67,19 +73,20 @@ options: state: description: - Desired state of the IP subnet on the specified device. - - With state == C(present), you must specify either hostname or device_id. Subnet with given CIDR will then be assigned to the specified device. - - With state == C(absent), you can specify either hostname or device_id. The subnet will be removed from specified devices. - - If you leave both hostname and device_id empty, the subnet will be removed from any device it's assigned to. + - With O(state=present), you must specify either O(hostname) or O(device_id). Subnet with given CIDR is then assigned + to the specified device. + - With O(state=absent), you can specify either O(hostname) or O(device_id). The subnet is then removed from specified + devices. + - If you leave both O(hostname) and O(device_id) empty, the subnet is then removed from any device it is assigned to. choices: ['present', 'absent'] default: 'present' type: str requirements: - - "packet-python >= 1.35" - - "python >= 2.6" -''' + - "packet-python >= 1.35" +""" -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. # You can also pass it to the auth_token parameter of the module instead. @@ -87,39 +94,33 @@ EXAMPLES = ''' hosts: localhost tasks: - - packet_device: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostnames: myserver - operating_system: ubuntu_16_04 - plan: baremetal_0 - facility: sjc1 - state: active + - packet_device: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + state: active # Pick an IPv4 address from a block allocated to your project. - - community.general.packet_ip_subnet: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - hostname: myserver - cidr: "147.75.201.78/32" + - community.general.packet_ip_subnet: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + hostname: myserver + cidr: "147.75.201.78/32" # Release IP address 147.75.201.78 - name: Unassign IP address from any device in your project hosts: localhost tasks: - - community.general.packet_ip_subnet: - project_id: 89b497ee-5afc-420a-8fb5-56984898f4df - cidr: "147.75.201.78/32" - state: absent -''' - -RETURN = ''' -changed: - description: True if an IP address assignments were altered in any way (created or removed). - type: bool - sample: True - returned: success + - community.general.packet_ip_subnet: + project_id: 89b497ee-5afc-420a-8fb5-56984898f4df + cidr: "147.75.201.78/32" + state: absent +""" +RETURN = r""" device_id: type: str description: UUID of the device associated with the specified IP address. @@ -131,20 +132,20 @@ subnet: sample: address: 147.75.90.241 address_family: 4 - assigned_to: { href : /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0 } + assigned_to: {href: /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0} cidr: 31 created_at: '2017-08-07T15:15:30Z' - enabled: True + enabled: true gateway: 147.75.90.240 href: /ips/31eda960-0a16-4c0f-b196-f3dc4928529f id: 1eda960-0a16-4c0f-b196-f3dc4928529f - manageable: True - management: True + manageable: true + management: true netmask: 255.255.255.254 network: 147.75.90.240 - public: True + public: true returned: success -''' +""" import uuid @@ -216,7 +217,7 @@ def parse_subnet_cidr(cidr): try: prefixlen = int(prefixlen) except ValueError: - raise("Wrong prefix length in CIDR expression {0}".format(cidr)) + raise Exception("Wrong prefix length in CIDR expression {0}".format(cidr)) return addr, prefixlen diff --git a/plugins/modules/cloud/packet/packet_project.py b/plugins/modules/packet_project.py similarity index 85% rename from plugins/modules/cloud/packet/packet_project.py rename to plugins/modules/packet_project.py index c6502c6ea6..be69c3b5b8 100644 --- a/plugins/modules/cloud/packet/packet_project.py +++ b/plugins/modules/packet_project.py @@ -1,29 +1,35 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Nurfet Becirevic -# Copyright: (c) 2019, Tomas Karasek -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Nurfet Becirevic +# Copyright (c) 2019, Tomas Karasek +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_project -short_description: Create/delete a project in Packet host. +short_description: Create/delete a project in Packet host description: - - Create/delete a project in Packet host. - - API is documented at U(https://www.packet.com/developers/api/#projects). - + - Create/delete a project in Packet host. + - API is documented at U(https://www.packet.com/developers/api/#projects). version_added: '0.2.0' author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: @@ -41,13 +47,13 @@ options: auth_token: description: - - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN). + - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). type: str name: - description: - - Name for/of the project. - type: str + description: + - Name for/of the project. + type: str org_id: description: @@ -66,12 +72,10 @@ options: type: str requirements: - - "python >= 2.6" - - "packet-python >= 1.40" + - "packet-python >= 1.40" +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. # You can also pass the api token in module param auth_token. @@ -101,15 +105,9 @@ EXAMPLES = ''' community.general.packet_project: name: "newer project" payment_method: "the other visa" -''' - -RETURN = ''' -changed: - description: True if a project was created or removed. - type: bool - sample: True - returned: success +""" +RETURN = r""" name: description: Name of addressed project. type: str @@ -119,7 +117,7 @@ id: description: UUID of addressed project. type: str returned: success -''' +""" from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible.module_utils.common.text.converters import to_native diff --git a/plugins/modules/cloud/packet/packet_sshkey.py b/plugins/modules/packet_sshkey.py similarity index 76% rename from plugins/modules/cloud/packet/packet_sshkey.py rename to plugins/modules/packet_sshkey.py index 77f3a70201..a6962e34c0 100644 --- a/plugins/modules/cloud/packet/packet_sshkey.py +++ b/plugins/modules/packet_sshkey.py @@ -1,60 +1,63 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright 2016 Tomas Karasek -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_sshkey -short_description: Create/delete an SSH key in Packet host. +short_description: Create/delete an SSH key in Packet host description: - - Create/delete an SSH key in Packet host. - - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post). + - Create/delete an SSH key in Packet host. + - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post). author: "Tomas Karasek (@t0mk) " +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: state: description: - - Indicate desired state of the target. + - Indicate desired state of the target. default: present choices: ['present', 'absent'] type: str auth_token: description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). + - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). type: str label: description: - - Label for the key. If you keep it empty, it will be read from key string. + - Label for the key. If you keep it empty, it is read from key string. type: str aliases: [name] id: description: - - UUID of the key which you want to remove. + - UUID of the key which you want to remove. type: str fingerprint: description: - - Fingerprint of the key which you want to remove. + - Fingerprint of the key which you want to remove. type: str key: description: - - Public Key string ({type} {base64 encoded key} {description}). + - Public Key string (V({type} {base64 encoded key} {description})). type: str key_file: description: - - File with the public key. + - File with the public key. type: path requirements: - - "python >= 2.6" - packet-python +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. # You can also pass the api token in module param auth_token. @@ -77,27 +80,23 @@ EXAMPLES = ''' community.general.packet_sshkey: state: absent id: eef49903-7a09-4ca1-af67-4087c29ab5b6 -''' +""" -RETURN = ''' -changed: - description: True if a sshkey was created or removed. - type: bool - sample: True - returned: always +RETURN = r""" sshkeys: - description: Information about sshkeys that were created/removed. - type: list - sample: [ - { - "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46", - "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7", - "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2", - "label": "mynewkey33" - } + description: Information about sshkeys that were created/removed. + type: list + sample: + [ + { + "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46", + "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7", + "key": "ssh-dss AAAAB3NzaC1kc3MAAACBA ... MdDxfmcsCslJKgoRKSmQpCwXQtN2g== user@server", + "label": "mynewkey33" + } ] - returned: always -''' # NOQA + returned: always +""" import os import uuid @@ -175,7 +174,7 @@ def get_sshkey_selector(module): def act_on_sshkeys(target_state, module, packet_conn): selector = get_sshkey_selector(module) existing_sshkeys = packet_conn.list_ssh_keys() - matching_sshkeys = filter(selector, existing_sshkeys) + matching_sshkeys = list(filter(selector, existing_sshkeys)) changed = False if target_state == 'present': if matching_sshkeys == []: diff --git a/plugins/modules/cloud/packet/packet_volume.py b/plugins/modules/packet_volume.py similarity index 81% rename from plugins/modules/cloud/packet/packet_volume.py rename to plugins/modules/packet_volume.py index 4f10bdf45b..826d9bc854 100644 --- a/plugins/modules/cloud/packet/packet_volume.py +++ b/plugins/modules/packet_volume.py @@ -1,28 +1,34 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Nurfet Becirevic -# Copyright: (c) 2017, Tomas Karasek -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Nurfet Becirevic +# Copyright (c) 2017, Tomas Karasek +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_volume -short_description: Create/delete a volume in Packet host. +short_description: Create/delete a volume in Packet host description: - - Create/delete a volume in Packet host. - - API is documented at U(https://www.packet.com/developers/api/#volumes). - + - Create/delete a volume in Packet host. + - API is documented at U(https://www.packet.com/developers/api/#volumes). version_added: '0.2.0' author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: @@ -40,19 +46,18 @@ options: auth_token: description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). + - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). type: str name: description: - - Selector for API-generated name of the volume + - Selector for API-generated name of the volume. type: str description: description: - User-defined description attribute for Packet volume. - - "It is used used as idempotent identifier - if volume with given - description exists, new one is not created." + - It is used used as idempotent identifier - if volume with given description exists, new one is not created. type: str id: @@ -62,7 +67,7 @@ options: plan: description: - - storage_1 for standard tier, storage_2 for premium (performance) tier. + - V(storage_1) for standard tier, V(storage_2) for premium (performance) tier. - Tiers are described at U(https://www.packet.com/cloud/storage/). choices: ['storage_1', 'storage_2'] default: 'storage_1' @@ -81,9 +86,9 @@ options: locked: description: - - Create new volume locked. + - Create new volume locked. type: bool - default: False + default: false billing_cycle: description: @@ -101,23 +106,21 @@ options: snapshot_count: description: - How many snapshots to keep, a positive integer. - required: True + required: true type: int snapshot_frequency: description: - Frequency of snapshots. - required: True + required: true choices: ["15min", "1hour", "1day", "1week", "1month", "1year"] type: str requirements: - - "python >= 2.6" - "packet-python >= 1.35" +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. # You can also pass the api token in module param auth_token. @@ -145,25 +148,25 @@ EXAMPLES = ''' id: "{{ result_create.id }}" project_id: "{{ project_id }}" state: absent -''' +""" -RETURN = ''' +RETURN = r""" id: - description: UUID of specified volume - type: str - returned: success - sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c + description: UUID of specified volume. + type: str + returned: success + sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c name: - description: The API-generated name of the volume resource. - type: str - returned: if volume is attached/detached to/from some device - sample: "volume-a91dc506" + description: The API-generated name of the volume resource. + type: str + returned: if volume is attached/detached to/from some device + sample: "volume-a91dc506" description: - description: The user-defined description of the volume resource. - type: str - returned: success - sample: "Just another volume" -''' + description: The user-defined description of the volume resource. + type: str + returned: success + sample: "Just another volume" +""" import uuid diff --git a/plugins/modules/cloud/packet/packet_volume_attachment.py b/plugins/modules/packet_volume_attachment.py similarity index 84% rename from plugins/modules/cloud/packet/packet_volume_attachment.py rename to plugins/modules/packet_volume_attachment.py index 9044fbcffa..4308233bc1 100644 --- a/plugins/modules/cloud/packet/packet_volume_attachment.py +++ b/plugins/modules/packet_volume_attachment.py @@ -1,32 +1,37 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Nurfet Becirevic -# Copyright: (c) 2017, Tomas Karasek -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Nurfet Becirevic +# Copyright (c) 2017, Tomas Karasek +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: packet_volume_attachment -short_description: Attach/detach a volume to a device in the Packet host. +short_description: Attach/detach a volume to a device in the Packet host description: - - Attach/detach a volume to a device in the Packet host. - - API is documented at U(https://www.packet.com/developers/api/volumes/). - - "This module creates the attachment route in the Packet API. In order to discover - the block devices on the server, you have to run the Attach Scripts, - as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux)." - + - Attach/detach a volume to a device in the Packet host. + - API is documented at U(https://www.packet.com/developers/api/volumes/). + - This module creates the attachment route in the Packet API. In order to discover the block devices on the server, you + have to run the Attach Scripts, as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux). version_added: '0.2.0' author: - - Tomas Karasek (@t0mk) - - Nurfet Becirevic (@nurfet-becirevic) + - Tomas Karasek (@t0mk) + - Nurfet Becirevic (@nurfet-becirevic) + +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: @@ -38,7 +43,7 @@ options: auth_token: description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). + - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN). type: str project_id: @@ -51,7 +56,7 @@ options: description: - Selector for the volume. - It can be a UUID, an API-generated volume name, or user-defined description string. - - 'Example values: 4a347482-b546-4f67-8300-fb5018ef0c5, volume-4a347482, "my volume"' + - 'Example values: V(4a347482-b546-4f67-8300-fb5018ef0c5), V(volume-4a347482), V(my volume).' type: str required: true @@ -59,16 +64,14 @@ options: description: - Selector for the device. - It can be a UUID of the device, or a hostname. - - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device"' + - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device".' type: str requirements: - - "python >= 2.6" - "packet-python >= 1.35" +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN. # You can also pass the api token in module param auth_token. @@ -113,19 +116,19 @@ EXAMPLES = ''' volume: "{{ volname }}" device: "{{ devname }}" state: absent -''' +""" -RETURN = ''' +RETURN = r""" volume_id: - description: UUID of volume addressed by the module call. - type: str - returned: success + description: UUID of volume addressed by the module call. + type: str + returned: success device_id: - description: UUID of device addressed by the module call. - type: str - returned: success -''' + description: UUID of device addressed by the module call. + type: str + returned: success +""" import uuid diff --git a/plugins/modules/packaging/os/pacman.py b/plugins/modules/pacman.py similarity index 68% rename from plugins/modules/packaging/os/pacman.py rename to plugins/modules/pacman.py index a3e0b2f589..49d6c9a571 100644 --- a/plugins/modules/packaging/os/pacman.py +++ b/plugins/modules/pacman.py @@ -1,156 +1,159 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2012, Afterburn -# Copyright: (c) 2013, Aaron Bull Schaefer -# Copyright: (c) 2015, Indrajit Raychaudhuri -# Copyright: (c) 2022, Jean Raby -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2012, Afterburn +# Copyright (c) 2013, Aaron Bull Schaefer +# Copyright (c) 2015, Indrajit Raychaudhuri +# Copyright (c) 2022, Jean Raby +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: pacman short_description: Manage packages with I(pacman) description: - - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants. + - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants. author: - - Indrajit Raychaudhuri (@indrajitr) - - Aaron Bull Schaefer (@elasticdog) - - Maxime de Roucy (@tchernomax) - - Jean Raby (@jraby) + - Indrajit Raychaudhuri (@indrajitr) + - Aaron Bull Schaefer (@elasticdog) + - Maxime de Roucy (@tchernomax) + - Jean Raby (@jraby) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full options: - name: - description: - - Name or list of names of the package(s) or file(s) to install, upgrade, or remove. - Can't be used in combination with C(upgrade). - aliases: [ package, pkg ] - type: list - elements: str - - state: - description: - - Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package. - - C(present) and C(installed) will simply ensure that a desired package is installed. - - C(latest) will update the specified package if it is not of the latest available version. - - C(absent) and C(removed) will remove the specified package. - default: present - choices: [ absent, installed, latest, present, removed ] - type: str - - force: - description: - - When removing packages, forcefully remove them, without any checks. - Same as C(extra_args="--nodeps --nodeps"). - When combined with I(update_cache), force a refresh of all package databases. - Same as C(update_cache_extra_args="--refresh --refresh"). - default: no - type: bool - - remove_nosave: - description: - - When removing packages, do not save modified configuration files as C(.pacsave) files. - (passes C(--nosave) to pacman) - version_added: 4.6.0 - default: no - type: bool - - executable: - description: - - Path of the binary to use. This can either be C(pacman) or a pacman compatible AUR helper. - - Pacman compatibility is unfortunately ill defined, in particular, this modules makes - extensive use of the C(--print-format) directive which is known not to be implemented by - some AUR helpers (notably, C(yay)). - - Beware that AUR helpers might behave unexpectedly and are therefore not recommended. - default: pacman - type: str - version_added: 3.1.0 - - extra_args: - description: - - Additional option to pass to pacman when enforcing C(state). - default: - type: str - - update_cache: - description: - - Whether or not to refresh the master package lists. - - This can be run as part of a package installation or as a separate step. - - If not specified, it defaults to C(false). - - Please note that this option only had an influence on the module's C(changed) state - if I(name) and I(upgrade) are not specified before community.general 5.0.0. - See the examples for how to keep the old behavior. - type: bool - - update_cache_extra_args: - description: - - Additional option to pass to pacman when enforcing C(update_cache). - default: - type: str - - upgrade: - description: - - Whether or not to upgrade the whole system. - Can't be used in combination with C(name). - - If not specified, it defaults to C(false). - type: bool - - upgrade_extra_args: - description: - - Additional option to pass to pacman when enforcing C(upgrade). - default: - type: str - -notes: - - When used with a C(loop:) each package will be processed individually, - it is much more efficient to pass the list directly to the I(name) option. - - To use an AUR helper (I(executable) option), a few extra setup steps might be required beforehand. - For example, a dedicated build user with permissions to install packages could be necessary. -""" - -RETURN = """ -packages: + name: description: - - A list of packages that have been changed. - - Before community.general 4.5.0 this was only returned when I(upgrade=true). - In community.general 4.5.0, it was sometimes omitted when the package list is empty, - but since community.general 4.6.0 it is always returned when I(name) is specified or - I(upgrade=true). - returned: success and I(name) is specified or I(upgrade=true) + - Name or list of names of the package(s) or file(s) to install, upgrade, or remove. Cannot be used in combination with + O(upgrade). + aliases: [package, pkg] type: list elements: str - sample: [ package, other-package ] -cache_updated: + state: description: - - The changed status of C(pacman -Sy). - - Useful when I(name) or I(upgrade=true) are specified next to I(update_cache=true). - returned: success, when I(update_cache=true) + - Whether to install (V(present) or V(installed), V(latest)), or remove (V(absent) or V(removed)) a package. + - V(present) and V(installed) simply ensure that a desired package is installed. + - V(latest) updates the specified package if it is not of the latest available version. + - V(absent) and V(removed) remove the specified package. + default: present + choices: [absent, installed, latest, present, removed] + type: str + + force: + description: + - When removing packages, forcefully remove them, without any checks. Same as O(extra_args="--nodeps --nodeps"). + - When combined with O(update_cache), force a refresh of all package databases. Same as O(update_cache_extra_args="--refresh + --refresh"). + default: false type: bool - sample: false + + remove_nosave: + description: + - When removing packages, do not save modified configuration files as C(.pacsave) files. (passes C(--nosave) to pacman). version_added: 4.6.0 + default: false + type: bool -stdout: + executable: description: - - Output from pacman. - returned: success, when needed + - Path of the binary to use. This can either be C(pacman) or a pacman compatible AUR helper. + - Pacman compatibility is unfortunately ill defined, in particular, this modules makes extensive use of the C(--print-format) + directive which is known not to be implemented by some AUR helpers (notably, C(yay)). + - Beware that AUR helpers might behave unexpectedly and are therefore not recommended. + default: pacman type: str - sample: ":: Synchronizing package databases... core is up to date :: Starting full system upgrade..." - version_added: 4.1.0 + version_added: 3.1.0 -stderr: + extra_args: description: - - Error output from pacman. - returned: success, when needed + - Additional option to pass to pacman when enforcing O(state). + default: '' type: str - sample: "warning: libtool: local (2.4.6+44+gb9b44533-14) is newer than core (2.4.6+42+gb88cebd5-15)\nwarning ..." - version_added: 4.1.0 + + update_cache: + description: + - Whether or not to refresh the master package lists. + - This can be run as part of a package installation or as a separate step. + - If not specified, it defaults to V(false). + - Please note that this option only had an influence on the module's C(changed) state if O(name) and O(upgrade) are + not specified before community.general 5.0.0. See the examples for how to keep the old behavior. + type: bool + + update_cache_extra_args: + description: + - Additional option to pass to pacman when enforcing O(update_cache). + default: '' + type: str + + upgrade: + description: + - Whether or not to upgrade the whole system. Cannot be used in combination with O(name). + - If not specified, it defaults to V(false). + type: bool + + upgrade_extra_args: + description: + - Additional option to pass to pacman when enforcing O(upgrade). + default: '' + type: str + + reason: + description: + - The install reason to set for the packages. + choices: [dependency, explicit] + type: str + version_added: 5.4.0 + + reason_for: + description: + - Set the install reason for V(all) packages or only for V(new) packages. + - In case of O(state=latest) already installed packages which are updated to a newer version are not counted as V(new). + default: new + choices: [all, new] + type: str + version_added: 5.4.0 + +notes: + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. + - To use an AUR helper (O(executable) option), a few extra setup steps might be required beforehand. For example, a dedicated + build user with permissions to install packages could be necessary. + - 'In the tests, while using C(yay) as the O(executable) option, the module failed to install AUR packages with the error: + C(error: target not found: ). This is caused by an incompatibility of yay with the arguments passed by this module. + See L(yay bug #1744 report for details, https://github.com/Jguer/yay/issues/1744).' + - The common return values `stdout` and `stderr` are returned upon success, when needed, since community.general 4.1.0. """ -EXAMPLES = """ +RETURN = r""" +packages: + description: + - A list of packages that have been changed. + - Before community.general 4.5.0 this was only returned when O(upgrade=true). In community.general 4.5.0, it was sometimes + omitted when the package list is empty, but since community.general 4.6.0 it is always returned when O(name) is specified + or O(upgrade=true). + returned: success and O(name) is specified or O(upgrade=true) + type: list + elements: str + sample: ["package", "other-package"] + +cache_updated: + description: + - The changed status of C(pacman -Sy). + - Useful when O(name) or O(upgrade=true) are specified next to O(update_cache=true). + returned: success, when O(update_cache=true) + type: bool + sample: false + version_added: 4.6.0 +""" + +EXAMPLES = r""" - name: Install package foo from repo community.general.pacman: name: foo @@ -181,7 +184,7 @@ EXAMPLES = """ community.general.pacman: name: foo state: latest - update_cache: yes + update_cache: true - name: Remove packages foo and bar community.general.pacman: @@ -198,11 +201,11 @@ EXAMPLES = """ - name: Run the equivalent of "pacman -Sy" as a separate step community.general.pacman: - update_cache: yes + update_cache: true - name: Run the equivalent of "pacman -Su" as a separate step community.general.pacman: - upgrade: yes + upgrade: true - name: Run the equivalent of "pacman -Syu" as a separate step # Since community.general 5.0.0 the 'changed' state of this call @@ -215,16 +218,31 @@ EXAMPLES = """ # register: result # changed_when: result.packages | length > 0 community.general.pacman: - update_cache: yes - upgrade: yes + update_cache: true + upgrade: true - name: Run the equivalent of "pacman -Rdd", force remove package baz community.general.pacman: name: baz state: absent - force: yes + force: true + +- name: Install foo as dependency and leave reason untouched if already installed + community.general.pacman: + name: foo + state: present + reason: dependency + reason_for: new + +- name: Run the equivalent of "pacman -S --asexplicit", mark foo as explicit and install it if not present + community.general.pacman: + name: foo + state: present + reason: explicit + reason_for: all """ +import re import shlex from ansible.module_utils.basic import AnsibleModule from collections import defaultdict, namedtuple @@ -325,13 +343,21 @@ class Pacman(object): self.install_packages(pkgs) self.success() - # This shouldn't happen... - self.fail("This is a bug") + # This happens if an empty list has been provided for name + self.add_exit_infos(msg='Nothing to do') + self.success() def install_packages(self, pkgs): pkgs_to_install = [] pkgs_to_install_from_url = [] + pkgs_to_set_reason = [] for p in pkgs: + if self.m.params["reason"] and ( + p.name not in self.inventory["pkg_reasons"] + or self.m.params["reason_for"] == "all" + and self.inventory["pkg_reasons"][p.name] != self.m.params["reason"] + ): + pkgs_to_set_reason.append(p.name) if p.source_is_URL: # URL packages bypass the latest / upgradable_pkgs test # They go through the dry-run to let pacman decide if they will be installed @@ -344,7 +370,7 @@ class Pacman(object): ): pkgs_to_install.append(p) - if len(pkgs_to_install) == 0 and len(pkgs_to_install_from_url) == 0: + if len(pkgs_to_install) == 0 and len(pkgs_to_install_from_url) == 0 and len(pkgs_to_set_reason) == 0: self.exit_params["packages"] = [] self.add_exit_infos("package(s) already installed") return @@ -373,12 +399,17 @@ class Pacman(object): for p in name_ver: # With Pacman v6.0.1 - libalpm v13.0.1, --upgrade outputs "loading packages..." on stdout. strip that. # When installing from URLs, pacman can also output a 'nothing to do' message. strip that too. - if "loading packages" in p or "there is nothing to do" in p: + if "loading packages" in p or "there is nothing to do" in p or 'Avoid running' in p: continue name, version = p.split() if name in self.inventory["installed_pkgs"]: - before.append("%s-%s" % (name, self.inventory["installed_pkgs"][name])) - after.append("%s-%s" % (name, version)) + before.append("%s-%s-%s" % (name, self.inventory["installed_pkgs"][name], self.inventory["pkg_reasons"][name])) + if name in pkgs_to_set_reason: + after.append("%s-%s-%s" % (name, version, self.m.params["reason"])) + elif name in self.inventory["pkg_reasons"]: + after.append("%s-%s-%s" % (name, version, self.inventory["pkg_reasons"][name])) + else: + after.append("%s-%s" % (name, version)) to_be_installed.append(name) return (to_be_installed, before, after) @@ -398,7 +429,7 @@ class Pacman(object): before.extend(b) after.extend(a) - if len(installed_pkgs) == 0: + if len(installed_pkgs) == 0 and len(pkgs_to_set_reason) == 0: # This can happen with URL packages if pacman decides there's nothing to do self.exit_params["packages"] = [] self.add_exit_infos("package(s) already installed") @@ -411,9 +442,11 @@ class Pacman(object): "after": "\n".join(sorted(after)) + "\n" if after else "", } + changed_reason_pkgs = [p for p in pkgs_to_set_reason if p not in installed_pkgs] + if self.m.check_mode: - self.add_exit_infos("Would have installed %d packages" % len(installed_pkgs)) - self.exit_params["packages"] = sorted(installed_pkgs) + self.add_exit_infos("Would have installed %d packages" % (len(installed_pkgs) + len(changed_reason_pkgs))) + self.exit_params["packages"] = sorted(installed_pkgs + changed_reason_pkgs) return # actually do it @@ -430,8 +463,22 @@ class Pacman(object): if pkgs_to_install_from_url: _install_packages_for_real("--upgrade", pkgs_to_install_from_url) - self.exit_params["packages"] = installed_pkgs - self.add_exit_infos("Installed %d package(s)" % len(installed_pkgs)) + # set reason + if pkgs_to_set_reason: + cmd = [self.pacman_path, "--noconfirm", "--database"] + if self.m.params["reason"] == "dependency": + cmd.append("--asdeps") + else: + cmd.append("--asexplicit") + cmd.extend(pkgs_to_set_reason) + + rc, stdout, stderr = self.m.run_command(cmd, check_rc=False) + if rc != 0: + self.fail("Failed to install package(s)", cmd=cmd, stdout=stdout, stderr=stderr) + self.add_exit_infos(stdout=stdout, stderr=stderr) + + self.exit_params["packages"] = sorted(installed_pkgs + changed_reason_pkgs) + self.add_exit_infos("Installed %d package(s)" % (len(installed_pkgs) + len(changed_reason_pkgs))) def remove_packages(self, pkgs): # filter out pkgs that are already absent @@ -613,8 +660,9 @@ class Pacman(object): stderr=stderr, rc=rc, ) - # With Pacman v6.0.1 - libalpm v13.0.1, --upgrade outputs "loading packages..." on stdout. strip that - stdout = stdout.replace("loading packages...\n", "") + # With Pacman v6.0.1 - libalpm v13.0.1, --upgrade outputs " filename_without_extension downloading..." if the URL is unseen. + # In all cases, pacman outputs "loading packages..." on stdout. strip both + stdout = stdout.splitlines()[-1] is_URL = True pkg_name = stdout.strip() pkg_list.append(Package(name=pkg_name, source=pkg, source_is_URL=is_URL)) @@ -630,6 +678,7 @@ class Pacman(object): "available_pkgs": {pkgname: version}, "available_groups": {groupname: set(pkgnames)}, "upgradable_pkgs": {pkgname: (current_version,latest_version)}, + "pkg_reasons": {pkgname: reason}, } Fails the module if a package requested for install cannot be found @@ -638,11 +687,12 @@ class Pacman(object): installed_pkgs = {} dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query"], check_rc=True) # Format of a line: "pacman 6.0.1-2" + query_re = re.compile(r'^\s*(?P\S+)\s+(?P\S+)\s*$') for l in stdout.splitlines(): - l = l.strip() - if not l: + query_match = query_re.match(l) + if not query_match: continue - pkg, ver = l.split() + pkg, ver = query_match.groups() installed_pkgs[pkg] = ver installed_groups = defaultdict(set) @@ -653,11 +703,12 @@ class Pacman(object): # base-devel file # base-devel findutils # ... + query_groups_re = re.compile(r'^\s*(?P\S+)\s+(?P\S+)\s*$') for l in stdout.splitlines(): - l = l.strip() - if not l: + query_groups_match = query_groups_re.match(l) + if not query_groups_match: continue - group, pkgname = l.split() + group, pkgname = query_groups_match.groups() installed_groups[group].add(pkgname) available_pkgs = {} @@ -679,11 +730,12 @@ class Pacman(object): # vim-plugins vim-airline-themes # vim-plugins vim-ale # ... + sync_groups_re = re.compile(r'^\s*(?P\S+)\s+(?P\S+)\s*$') for l in stdout.splitlines(): - l = l.strip() - if not l: + sync_groups_match = sync_groups_re.match(l) + if not sync_groups_match: continue - group, pkg = l.split() + group, pkg = sync_groups_match.groups() available_groups[group].add(pkg) upgradable_pkgs = {} @@ -691,9 +743,14 @@ class Pacman(object): [self.pacman_path, "--query", "--upgrades"], check_rc=False ) + stdout = stdout.splitlines() + if stdout and "Avoid running" in stdout[0]: + stdout = stdout[1:] + stdout = "\n".join(stdout) + # non-zero exit with nothing in stdout -> nothing to upgrade, all good # stderr can have warnings, so not checked here - if rc == 1 and stdout == "": + if rc == 1 and not stdout: pass # nothing to upgrade elif rc == 0: # Format of lines: @@ -703,7 +760,7 @@ class Pacman(object): l = l.strip() if not l: continue - if "[ignored]" in l: + if "[ignored]" in l or "Avoid running" in l: continue s = l.split() if len(s) != 4: @@ -722,12 +779,31 @@ class Pacman(object): rc=rc, ) + pkg_reasons = {} + dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query", "--explicit"], check_rc=True) + # Format of a line: "pacman 6.0.1-2" + for l in stdout.splitlines(): + l = l.strip() + if not l: + continue + pkg = l.split()[0] + pkg_reasons[pkg] = "explicit" + dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query", "--deps"], check_rc=True) + # Format of a line: "pacman 6.0.1-2" + for l in stdout.splitlines(): + l = l.strip() + if not l: + continue + pkg = l.split()[0] + pkg_reasons[pkg] = "dependency" + return dict( installed_pkgs=installed_pkgs, installed_groups=installed_groups, available_pkgs=available_pkgs, available_groups=available_groups, upgradable_pkgs=upgradable_pkgs, + pkg_reasons=pkg_reasons, ) @@ -748,6 +824,8 @@ def setup_module(): upgrade_extra_args=dict(type="str", default=""), update_cache=dict(type="bool"), update_cache_extra_args=dict(type="str", default=""), + reason=dict(type="str", choices=["explicit", "dependency"]), + reason_for=dict(type="str", default="new", choices=["new", "all"]), ), required_one_of=[["name", "update_cache", "upgrade"]], mutually_exclusive=[["name", "upgrade"]], diff --git a/plugins/modules/pacman_key.py b/plugins/modules/pacman_key.py new file mode 100644 index 0000000000..001416e855 --- /dev/null +++ b/plugins/modules/pacman_key.py @@ -0,0 +1,369 @@ +#!/usr/bin/python + +# Copyright (c) 2019, George Rawlinson +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: pacman_key +author: + - George Rawlinson (@grawlinson) +version_added: "3.2.0" +short_description: Manage pacman's list of trusted keys +description: + - Add or remove gpg keys from the pacman keyring. +notes: + - Use full-length key ID (40 characters). + - Keys are verified when using O(data), O(file), or O(url) unless O(verify) is overridden. + - Keys are locally signed after being imported into the keyring. + - If the key ID exists in the keyring, the key is not added unless O(force_update) is specified. + - O(data), O(file), O(url), and O(keyserver) are mutually exclusive. +requirements: + - gpg + - pacman-key +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + id: + description: + - The 40 character identifier of the key. + - Including this allows check mode to correctly report the changed state. + - Do not specify a subkey ID, instead specify the primary key ID. + required: true + type: str + data: + description: + - The keyfile contents to add to the keyring. + - Must be of C(PGP PUBLIC KEY BLOCK) type. + type: str + file: + description: + - The path to a keyfile on the remote server to add to the keyring. + - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. + type: path + url: + description: + - The URL to retrieve keyfile from. + - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. + type: str + keyserver: + description: + - The keyserver used to retrieve key from. + type: str + verify: + description: + - Whether or not to verify the keyfile's key ID against specified key ID. + type: bool + default: true + force_update: + description: + - This forces the key to be updated if it already exists in the keyring. + type: bool + default: false + keyring: + description: + - The full path to the keyring folder on the remote server. + - If not specified, module uses pacman's default (V(/etc/pacman.d/gnupg)). + - Useful if the remote system requires an alternative gnupg directory. + type: path + default: /etc/pacman.d/gnupg + state: + description: + - Ensures that the key is V(present) (added) or V(absent) (revoked). + default: present + choices: [absent, present] + type: str + ensure_trusted: + description: + - Ensure that the key is trusted (signed by the Pacman machine key and not expired). + type: bool + default: false + version_added: 11.0.0 +""" + +EXAMPLES = r""" +- name: Import a key via local file + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + data: "{{ lookup('file', 'keyfile.asc') }}" + state: present + +- name: Import a key via remote file + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + file: /tmp/keyfile.asc + state: present + +- name: Import a key via url + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + url: https://domain.tld/keys/keyfile.asc + state: present + +- name: Import a key via keyserver + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + keyserver: keyserver.domain.tld + +- name: Import a key into an alternative keyring + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + file: /tmp/keyfile.asc + keyring: /etc/pacman.d/gnupg-alternative + +- name: Remove a key from the keyring + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + state: absent +""" + +RETURN = r""" # """ + +import os.path +import tempfile +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.common.text.converters import to_native + + +class GpgListResult(object): + """Wraps gpg --list-* output.""" + + def __init__(self, line): + self._parts = line.split(':') + + @property + def kind(self): + return self._parts[0] + + @property + def valid(self): + return self._parts[1] + + @property + def is_fully_valid(self): + return self.valid == 'f' + + @property + def key(self): + return self._parts[4] + + @property + def user_id(self): + return self._parts[9] + + +def gpg_get_first_attr_of_kind(lines, kind, attr): + for line in lines: + glr = GpgListResult(line) + if glr.kind == kind: + return getattr(glr, attr) + + +def gpg_get_all_attrs_of_kind(lines, kind, attr): + result = [] + for line in lines: + glr = GpgListResult(line) + if glr.kind == kind: + result.append(getattr(glr, attr)) + return result + + +class PacmanKey(object): + def __init__(self, module): + self.module = module + # obtain binary paths for gpg & pacman-key + self.gpg_binary = module.get_bin_path('gpg', required=True) + self.pacman_key_binary = module.get_bin_path('pacman-key', required=True) + + # obtain module parameters + keyid = module.params['id'] + url = module.params['url'] + data = module.params['data'] + file = module.params['file'] + keyserver = module.params['keyserver'] + verify = module.params['verify'] + force_update = module.params['force_update'] + keyring = module.params['keyring'] + state = module.params['state'] + ensure_trusted = module.params['ensure_trusted'] + self.keylength = 40 + + # sanitise key ID & check if key exists in the keyring + keyid = self.sanitise_keyid(keyid) + key_validity = self.key_validity(keyring, keyid) + key_present = len(key_validity) > 0 + key_valid = any(key_validity) + + # check mode + if module.check_mode: + if state == 'present': + changed = (key_present and force_update) or not key_present + if not changed and ensure_trusted: + changed = not (key_valid and self.key_is_trusted(keyring, keyid)) + module.exit_json(changed=changed) + if state == 'absent': + module.exit_json(changed=key_present) + + if state == 'present': + trusted = key_valid and self.key_is_trusted(keyring, keyid) + if not force_update and key_present and (not ensure_trusted or trusted): + module.exit_json(changed=False) + changed = False + if data: + file = self.save_key(data) + self.add_key(keyring, file, keyid, verify) + changed = True + elif file: + self.add_key(keyring, file, keyid, verify) + changed = True + elif url: + data = self.fetch_key(url) + file = self.save_key(data) + self.add_key(keyring, file, keyid, verify) + changed = True + elif keyserver: + self.recv_key(keyring, keyid, keyserver) + changed = True + if changed or (ensure_trusted and not trusted): + self.lsign_key(keyring=keyring, keyid=keyid) + changed = True + module.exit_json(changed=changed) + elif state == 'absent': + if key_present: + self.remove_key(keyring, keyid) + module.exit_json(changed=True) + module.exit_json(changed=False) + + def gpg(self, args, keyring=None, **kwargs): + cmd = [self.gpg_binary] + if keyring: + cmd.append('--homedir={keyring}'.format(keyring=keyring)) + cmd.extend(['--no-permission-warning', '--with-colons', '--quiet', '--batch', '--no-tty']) + return self.module.run_command(cmd + args, **kwargs) + + def pacman_key(self, args, keyring, **kwargs): + return self.module.run_command( + [self.pacman_key_binary, '--gpgdir', keyring] + args, + **kwargs) + + def pacman_machine_key(self, keyring): + unused_rc, stdout, unused_stderr = self.gpg(['--list-secret-key'], keyring=keyring) + return gpg_get_first_attr_of_kind(stdout.splitlines(), 'sec', 'key') + + def is_hexadecimal(self, string): + """Check if a given string is valid hexadecimal""" + try: + int(string, 16) + except ValueError: + return False + return True + + def sanitise_keyid(self, keyid): + """Sanitise given key ID. + + Strips whitespace, uppercases all characters, and strips leading `0X`. + """ + sanitised_keyid = keyid.strip().upper().replace(' ', '').replace('0X', '') + if len(sanitised_keyid) != self.keylength: + self.module.fail_json(msg="key ID is not full-length: %s" % sanitised_keyid) + if not self.is_hexadecimal(sanitised_keyid): + self.module.fail_json(msg="key ID is not hexadecimal: %s" % sanitised_keyid) + return sanitised_keyid + + def fetch_key(self, url): + """Downloads a key from url""" + response, info = fetch_url(self.module, url) + if info['status'] != 200: + self.module.fail_json(msg="failed to fetch key at %s, error was %s" % (url, info['msg'])) + return to_native(response.read()) + + def recv_key(self, keyring, keyid, keyserver): + """Receives key via keyserver""" + self.pacman_key(['--keyserver', keyserver, '--recv-keys', keyid], keyring=keyring, check_rc=True) + + def lsign_key(self, keyring, keyid): + """Locally sign key""" + self.pacman_key(['--lsign-key', keyid], keyring=keyring, check_rc=True) + + def save_key(self, data): + "Saves key data to a temporary file" + tmpfd, tmpname = tempfile.mkstemp() + self.module.add_cleanup_file(tmpname) + tmpfile = os.fdopen(tmpfd, "w") + tmpfile.write(data) + tmpfile.close() + return tmpname + + def add_key(self, keyring, keyfile, keyid, verify): + """Add key to pacman's keyring""" + if verify: + self.verify_keyfile(keyfile, keyid) + self.pacman_key(['--add', keyfile], keyring=keyring, check_rc=True) + + def remove_key(self, keyring, keyid): + """Remove key from pacman's keyring""" + self.pacman_key(['--delete', keyid], keyring=keyring, check_rc=True) + + def verify_keyfile(self, keyfile, keyid): + """Verify that keyfile matches the specified key ID""" + if keyfile is None: + self.module.fail_json(msg="expected a key, got none") + elif keyid is None: + self.module.fail_json(msg="expected a key ID, got none") + + rc, stdout, stderr = self.gpg( + ['--with-fingerprint', '--show-keys', keyfile], + check_rc=True, + ) + + extracted_keyid = gpg_get_first_attr_of_kind(stdout.splitlines(), 'fpr', 'user_id') + if extracted_keyid != keyid: + self.module.fail_json(msg="key ID does not match. expected %s, got %s" % (keyid, extracted_keyid)) + + def key_validity(self, keyring, keyid): + "Check if the key ID is in pacman's keyring and not expired" + rc, stdout, stderr = self.gpg(['--no-default-keyring', '--list-keys', keyid], keyring=keyring, check_rc=False) + if rc != 0: + if stderr.find("No public key") >= 0: + return [] + else: + self.module.fail_json(msg="gpg returned an error: %s" % stderr) + return gpg_get_all_attrs_of_kind(stdout.splitlines(), 'uid', 'is_fully_valid') + + def key_is_trusted(self, keyring, keyid): + """Check if key is signed and not expired.""" + unused_rc, stdout, unused_stderr = self.gpg(['--check-signatures', keyid], keyring=keyring) + return self.pacman_machine_key(keyring) in gpg_get_all_attrs_of_kind(stdout.splitlines(), 'sig', 'key') + + +def main(): + module = AnsibleModule( + argument_spec=dict( + id=dict(type='str', required=True), + data=dict(type='str'), + file=dict(type='path'), + url=dict(type='str'), + keyserver=dict(type='str'), + verify=dict(type='bool', default=True), + force_update=dict(type='bool', default=False), + keyring=dict(type='path', default='/etc/pacman.d/gnupg'), + ensure_trusted=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + mutually_exclusive=(('data', 'file', 'url', 'keyserver'),), + required_if=[('state', 'present', ('data', 'file', 'url', 'keyserver'), True)], + ) + PacmanKey(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/pagerduty.py b/plugins/modules/pagerduty.py similarity index 66% rename from plugins/modules/monitoring/pagerduty.py rename to plugins/modules/pagerduty.py index dba931ab96..2219d87928 100644 --- a/plugins/modules/monitoring/pagerduty.py +++ b/plugins/modules/pagerduty.py @@ -1,84 +1,89 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: pagerduty short_description: Create PagerDuty maintenance windows description: - - This module will let you create PagerDuty maintenance windows + - This module lets you create PagerDuty maintenance windows. author: - - "Andrew Newdigate (@suprememoocow)" - - "Dylan Silva (@thaumos)" - - "Justin Johns (!UNKNOWN)" - - "Bruce Pennypacker (@bpennypacker)" + - "Andrew Newdigate (@suprememoocow)" + - "Dylan Silva (@thaumos)" + - "Justin Johns (!UNKNOWN)" + - "Bruce Pennypacker (@bpennypacker)" requirements: - - PagerDuty API access + - PagerDuty API access +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - state: - type: str - description: - - Create a maintenance window or get a list of ongoing windows. - required: true - choices: [ "running", "started", "ongoing", "absent" ] - name: - type: str - description: - - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. - user: - type: str - description: - - PagerDuty user ID. Obsolete. Please, use I(token) for authorization. - token: - type: str - description: - - A pagerduty token, generated on the pagerduty site. It is used for authorization. - required: true - requester_id: - type: str - description: - - ID of user making the request. Only needed when creating a maintenance_window. - service: - type: list - elements: str - description: - - A comma separated list of PagerDuty service IDs. - aliases: [ services ] - window_id: - type: str - description: - - ID of maintenance window. Only needed when absent a maintenance_window. - hours: - type: str - description: - - Length of maintenance window in hours. - default: '1' - minutes: - type: str - description: - - Maintenance window in minutes (this is added to the hours). - default: '0' - desc: - type: str - description: - - Short description of maintenance window. - default: Created by Ansible - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' -''' + state: + type: str + description: + - Create a maintenance window or get a list of ongoing windows. + required: true + choices: ["running", "started", "ongoing", "absent"] + name: + type: str + description: + - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. + user: + type: str + description: + - PagerDuty user ID. Obsolete. Please, use O(token) for authorization. + token: + type: str + description: + - A pagerduty token, generated on the pagerduty site. It is used for authorization. + required: true + requester_id: + type: str + description: + - ID of user making the request. Only needed when creating a maintenance_window. + service: + type: list + elements: str + description: + - A comma separated list of PagerDuty service IDs. + aliases: [services] + window_id: + type: str + description: + - ID of maintenance window. Only needed when absent a maintenance_window. + hours: + type: str + description: + - Length of maintenance window in hours. + default: '1' + minutes: + type: str + description: + - Maintenance window in minutes (this is added to the hours). + default: '0' + desc: + type: str + description: + - Short description of maintenance window. + default: Created by Ansible + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + type: bool + default: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: List ongoing maintenance windows using a token community.general.pagerduty: name: companyabc @@ -135,7 +140,7 @@ EXAMPLES = ''' token: yourtoken state: absent window_id: "{{ pd_window.result.maintenance_windows[0].id }}" -''' +""" import datetime import json @@ -143,6 +148,10 @@ import json from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class PagerDutyRequest(object): def __init__(self, module, name, user, token): @@ -192,15 +201,15 @@ class PagerDutyRequest(object): return False, json_out, True def _create_services_payload(self, service): - if (isinstance(service, list)): + if isinstance(service, list): return [{'id': s, 'type': 'service_reference'} for s in service] else: return [{'id': service, 'type': 'service_reference'}] def _compute_start_end_time(self, hours, minutes): - now = datetime.datetime.utcnow() - later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes)) - start = now.strftime("%Y-%m-%dT%H:%M:%SZ") + now_t = now() + later = now_t + datetime.timedelta(hours=int(hours), minutes=int(minutes)) + start = now_t.strftime("%Y-%m-%dT%H:%M:%SZ") end = later.strftime("%Y-%m-%dT%H:%M:%SZ") return start, end @@ -231,15 +240,15 @@ def main(): module = AnsibleModule( argument_spec=dict( state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']), - name=dict(required=False), - user=dict(required=False), + name=dict(), + user=dict(), token=dict(required=True, no_log=True), - service=dict(required=False, type='list', elements='str', aliases=["services"]), - window_id=dict(required=False), - requester_id=dict(required=False), - hours=dict(default='1', required=False), # @TODO change to int? - minutes=dict(default='0', required=False), # @TODO change to int? - desc=dict(default='Created by Ansible', required=False), + service=dict(type='list', elements='str', aliases=["services"]), + window_id=dict(), + requester_id=dict(), + hours=dict(default='1'), # @TODO change to int? + minutes=dict(default='0'), # @TODO change to int? + desc=dict(default='Created by Ansible'), validate_certs=dict(default=True, type='bool'), ) ) diff --git a/plugins/modules/pagerduty_alert.py b/plugins/modules/pagerduty_alert.py new file mode 100644 index 0000000000..215ad2d821 --- /dev/null +++ b/plugins/modules/pagerduty_alert.py @@ -0,0 +1,416 @@ +#!/usr/bin/python +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pagerduty_alert +short_description: Trigger, acknowledge or resolve PagerDuty incidents +description: + - This module lets you trigger, acknowledge or resolve a PagerDuty incident by sending events. +author: + - "Amanpreet Singh (@ApsOps)" + - "Xiao Shen (@xshen1)" +requirements: + - PagerDuty API access +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API. + api_key: + type: str + description: + - The pagerduty API key (readonly access), generated on the pagerduty site. + - Required if O(api_version=v1). + integration_key: + type: str + description: + - The GUID of one of your 'Generic API' services. + - This is the 'integration key' listed on a 'Integrations' tab of PagerDuty service. + service_id: + type: str + description: + - ID of PagerDuty service when incidents are triggered, acknowledged or resolved. + - Required if O(api_version=v1). + service_key: + type: str + description: + - The GUID of one of your 'Generic API' services. Obsolete. Please use O(integration_key). + state: + type: str + description: + - Type of event to be sent. + required: true + choices: + - 'triggered' + - 'acknowledged' + - 'resolved' + api_version: + type: str + description: + - The API version we want to use to run the module. + - V1 is more limited with option we can provide to trigger incident. + - V2 has more variables for example, O(severity), O(source), O(custom_details) and so on. + default: 'v1' + choices: + - 'v1' + - 'v2' + version_added: 7.4.0 + client: + type: str + description: + - The name of the monitoring client that is triggering this event. + client_url: + type: str + description: + - The URL of the monitoring client that is triggering this event. + component: + type: str + description: + - Component of the source machine that is responsible for the event, for example C(mysql) or C(eth0). + version_added: 7.4.0 + custom_details: + type: dict + description: + - Additional details about the event and affected system. + - A dictionary with custom keys and values. + version_added: 7.4.0 + desc: + type: str + description: + - For O(state=triggered) - Required. Short description of the problem that led to this trigger. This field (or a truncated + version) is used when generating phone calls, SMS messages and alert emails. It also appears on the incidents tables + in the PagerDuty UI. The maximum length is 1024 characters. + - For O(state=acknowledged) or O(state=resolved) - Text that appears in the incident's log associated with this event. + default: Created via Ansible + incident_class: + type: str + description: + - The class/type of the event, for example C(ping failure) or C(cpu load). + version_added: 7.4.0 + incident_key: + type: str + description: + - Identifies the incident to which this O(state) should be applied. + - For O(state=triggered) - If there is no open (in other words unresolved) incident with this key, a new one is created. + If there is already an open incident with a matching key, this event is appended to that incident's log. The event + key provides an easy way to 'de-dup' problem reports. If no O(incident_key) is provided, then it is generated by PagerDuty. + - For O(state=acknowledged) or O(state=resolved) - This should be the incident_key you received back when the incident + was first opened by a trigger event. Acknowledge events referencing resolved or nonexistent incidents is discarded. + link_url: + type: str + description: + - Relevant link URL to the alert. For example, the website or the job link. + version_added: 7.4.0 + link_text: + type: str + description: + - A short description of the O(link_url). + version_added: 7.4.0 + source: + type: str + description: + - The unique location of the affected system, preferably a hostname or FQDN. + - Required in case of O(state=trigger) and O(api_version=v2). + version_added: 7.4.0 + severity: + type: str + description: + - The perceived severity of the status the event is describing with respect to the affected system. + - Required in case of O(state=trigger) and O(api_version=v2). + default: 'critical' + choices: + - 'critical' + - 'warning' + - 'error' + - 'info' + version_added: 7.4.0 +""" + +EXAMPLES = r""" +- name: Trigger an incident with just the basic options + community.general.pagerduty_alert: + name: companyabc + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: triggered + desc: problem that led to this trigger + +- name: Trigger an incident with more options + community.general.pagerduty_alert: + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: triggered + desc: problem that led to this trigger + incident_key: somekey + client: Sample Monitoring Service + client_url: http://service.example.com + +- name: Acknowledge an incident based on incident_key + community.general.pagerduty_alert: + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: acknowledged + incident_key: somekey + desc: "some text for incident's log" + +- name: Resolve an incident based on incident_key + community.general.pagerduty_alert: + integration_key: xxx + api_key: yourapikey + service_id: PDservice + state: resolved + incident_key: somekey + desc: "some text for incident's log" + +- name: Trigger an v2 incident with just the basic options + community.general.pagerduty_alert: + integration_key: xxx + api_version: v2 + source: My Ansible Script + state: triggered + desc: problem that led to this trigger + +- name: Trigger an v2 incident with more options + community.general.pagerduty_alert: + integration_key: xxx + api_version: v2 + source: My Ansible Script + state: triggered + desc: problem that led to this trigger + incident_key: somekey + client: Sample Monitoring Service + client_url: http://service.example.com + component: mysql + incident_class: ping failure + link_url: https://pagerduty.com + link_text: PagerDuty + +- name: Acknowledge an incident based on incident_key using v2 + community.general.pagerduty_alert: + api_version: v2 + integration_key: xxx + incident_key: somekey + state: acknowledged + +- name: Resolve an incident based on incident_key + community.general.pagerduty_alert: + api_version: v2 + integration_key: xxx + incident_key: somekey + state: resolved +""" +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from datetime import datetime +from urllib.parse import urlparse, urlencode, urlunparse + + +def check(module, name, state, service_id, integration_key, api_key, incident_key=None, http_call=fetch_url): + url = 'https://api.pagerduty.com/incidents' + headers = { + "Content-type": "application/json", + "Authorization": "Token token=%s" % api_key, + 'Accept': 'application/vnd.pagerduty+json;version=2' + } + + params = { + 'service_ids[]': service_id, + 'sort_by': 'incident_number:desc', + 'time_zone': 'UTC' + } + if incident_key: + params['incident_key'] = incident_key + + url_parts = list(urlparse(url)) + url_parts[4] = urlencode(params, True) + + url = urlunparse(url_parts) + + response, info = http_call(module, url, method='get', headers=headers) + + if info['status'] != 200: + module.fail_json(msg="failed to check current incident status." + "Reason: %s" % info['msg']) + + incidents = json.loads(response.read())["incidents"] + msg = "No corresponding incident" + + if len(incidents) == 0: + if state in ('acknowledged', 'resolved'): + return msg, False + return msg, True + elif state != incidents[0]["status"]: + return incidents[0], True + + return incidents[0], False + + +def send_event_v1(module, service_key, event_type, desc, + incident_key=None, client=None, client_url=None): + url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" + headers = { + "Content-type": "application/json" + } + + data = { + "service_key": service_key, + "event_type": event_type, + "incident_key": incident_key, + "description": desc, + "client": client, + "client_url": client_url + } + + response, info = fetch_url(module, url, method='post', + headers=headers, data=json.dumps(data)) + if info['status'] != 200: + module.fail_json(msg="failed to %s. Reason: %s" % + (event_type, info['msg'])) + json_out = json.loads(response.read()) + return json_out + + +def send_event_v2(module, service_key, event_type, payload, link, + incident_key=None, client=None, client_url=None): + url = "https://events.pagerduty.com/v2/enqueue" + headers = { + "Content-type": "application/json" + } + data = { + "routing_key": service_key, + "event_action": event_type, + "payload": payload, + "client": client, + "client_url": client_url, + } + if link: + data["links"] = [link] + if incident_key: + data["dedup_key"] = incident_key + if event_type != "trigger": + data.pop("payload") + response, info = fetch_url(module, url, method="post", + headers=headers, data=json.dumps(data)) + if info["status"] != 202: + module.fail_json(msg="failed to %s. Reason: %s" % + (event_type, info['msg'])) + json_out = json.loads(response.read()) + return json_out, True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(), + api_key=dict(no_log=True), + integration_key=dict(no_log=True), + service_id=dict(), + service_key=dict(no_log=True), + state=dict( + required=True, choices=['triggered', 'acknowledged', 'resolved'] + ), + api_version=dict(type='str', default='v1', choices=['v1', 'v2']), + client=dict(), + client_url=dict(), + component=dict(), + custom_details=dict(type='dict'), + desc=dict(default='Created via Ansible'), + incident_class=dict(), + incident_key=dict(no_log=False), + link_url=dict(), + link_text=dict(), + source=dict(), + severity=dict( + default='critical', choices=['critical', 'warning', 'error', 'info'] + ), + ), + required_if=[ + ('api_version', 'v1', ['service_id', 'api_key']), + ('state', 'acknowledged', ['incident_key']), + ('state', 'resolved', ['incident_key']), + ], + required_one_of=[('service_key', 'integration_key')], + supports_check_mode=True, + ) + + name = module.params['name'] + service_id = module.params.get('service_id') + integration_key = module.params.get('integration_key') + service_key = module.params.get('service_key') + api_key = module.params.get('api_key') + state = module.params.get('state') + client = module.params.get('client') + client_url = module.params.get('client_url') + desc = module.params.get('desc') + incident_key = module.params.get('incident_key') + payload = { + 'summary': desc, + 'source': module.params.get('source'), + 'timestamp': datetime.now().isoformat(), + 'severity': module.params.get('severity'), + 'component': module.params.get('component'), + 'class': module.params.get('incident_class'), + 'custom_details': module.params.get('custom_details'), + } + link = {} + if module.params.get('link_url'): + link['href'] = module.params.get('link_url') + if module.params.get('link_text'): + link['text'] = module.params.get('link_text') + if integration_key is None: + integration_key = service_key + module.warn( + '"service_key" is obsolete parameter and will be removed.' + ' Please, use "integration_key" instead' + ) + + state_event_dict = { + 'triggered': 'trigger', + 'acknowledged': 'acknowledge', + 'resolved': 'resolve', + } + + event_type = state_event_dict[state] + if module.params.get('api_version') == 'v1': + out, changed = check(module, name, state, service_id, + integration_key, api_key, incident_key) + if not module.check_mode and changed is True: + out = send_event_v1(module, integration_key, event_type, desc, + incident_key, client, client_url) + else: + changed = True + if event_type == 'trigger' and not payload['source']: + module.fail_json(msg='"service" is a required variable for v2 api endpoint.') + out, changed = send_event_v2( + module, + integration_key, + event_type, + payload, + link, + incident_key, + client, + client_url, + ) + + module.exit_json(result=out, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/monitoring/pagerduty_change.py b/plugins/modules/pagerduty_change.py similarity index 73% rename from plugins/modules/monitoring/pagerduty_change.py rename to plugins/modules/pagerduty_change.py index 358a69612e..2cd33a0da8 100644 --- a/plugins/modules/monitoring/pagerduty_change.py +++ b/plugins/modules/pagerduty_change.py @@ -1,28 +1,36 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Adam Vaughan (@adamvaughan) avaughan@pagerduty.com -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, Adam Vaughan (@adamvaughan) avaughan@pagerduty.com +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: pagerduty_change short_description: Track a code or infrastructure change as a PagerDuty change event version_added: 1.3.0 description: - - This module will let you create a PagerDuty change event each time the module is run. - - This is not an idempotent action and a new change event will be created each time it is run. + - This module lets you create a PagerDuty change event each time the module is run. + - This is not an idempotent action and a new change event is created each time it is run. author: - Adam Vaughan (@adamvaughan) requirements: - PagerDuty integration key +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + details: + - Check mode simply does nothing except returning C(changed=true) in case the O(url) seems to be correct. + diff_mode: + support: none options: integration_key: description: - - The integration key that identifies the service the change was made to. - This can be found by adding an integration to a service in PagerDuty. + - The integration key that identifies the service the change was made to. This can be found by adding an integration + to a service in PagerDuty. required: true type: str summary: @@ -51,7 +59,7 @@ options: type: str environment: description: - - The environment name, typically C(production), C(staging), etc. + - The environment name, typically V(production), V(staging), and so on. required: false type: str link_url: @@ -72,16 +80,14 @@ options: type: str validate_certs: description: - - If C(no), SSL certificates for the target URL will not be validated. - This should only be used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled + sites using self-signed certificates. required: false - default: yes + default: true type: bool -notes: - - Supports C(check_mode). Note that check mode simply does nothing except returning C(changed=true) in case the I(url) seems to be correct. -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Track the deployment as a PagerDuty change event community.general.pagerduty_change: integration_key: abc123abc123abc123abc123abc123ab @@ -98,11 +104,14 @@ EXAMPLES = ''' environment: production link_url: https://github.com/ansible-collections/community.general/pull/1269 link_text: View changes on GitHub -''' +""" from ansible.module_utils.urls import fetch_url from ansible.module_utils.basic import AnsibleModule -from datetime import datetime + +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) def main(): @@ -110,15 +119,14 @@ def main(): argument_spec=dict( integration_key=dict(required=True, type='str', no_log=True), summary=dict(required=True, type='str'), - source=dict(required=False, default='Ansible', type='str'), - user=dict(required=False, type='str'), - repo=dict(required=False, type='str'), - revision=dict(required=False, type='str'), - environment=dict(required=False, type='str'), - link_url=dict(required=False, type='str'), - link_text=dict(required=False, type='str'), - url=dict(required=False, - default='https://events.pagerduty.com/v2/change/enqueue', type='str'), + source=dict(default='Ansible', type='str'), + user=dict(type='str'), + repo=dict(type='str'), + revision=dict(type='str'), + environment=dict(type='str'), + link_url=dict(type='str'), + link_text=dict(type='str'), + url=dict(default='https://events.pagerduty.com/v2/change/enqueue', type='str'), validate_certs=dict(default=True, type='bool') ), supports_check_mode=True @@ -153,8 +161,7 @@ def main(): if module.params['environment']: custom_details['environment'] = module.params['environment'] - now = datetime.utcnow() - timestamp = now.strftime("%Y-%m-%dT%H:%M:%S.%fZ") + timestamp = now().strftime("%Y-%m-%dT%H:%M:%S.%fZ") payload = { 'summary': module.params['summary'], diff --git a/plugins/modules/monitoring/pagerduty_user.py b/plugins/modules/pagerduty_user.py similarity index 73% rename from plugins/modules/monitoring/pagerduty_user.py rename to plugins/modules/pagerduty_user.py index 4b20a32108..7e000f1e8f 100644 --- a/plugins/modules/monitoring/pagerduty_user.py +++ b/plugins/modules/pagerduty_user.py @@ -1,65 +1,68 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Zainab Alsaffar -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, Zainab Alsaffar +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: pagerduty_user short_description: Manage a user account on PagerDuty description: - - This module manages the creation/removal of a user account on PagerDuty. + - This module manages the creation/removal of a user account on PagerDuty. version_added: '1.3.0' author: Zainab Alsaffar (@zanssa) requirements: - - pdpyras python module = 4.1.1 - - PagerDuty API Access + - pdpyras python module = 4.1.1 + - PagerDuty API Access +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - access_token: - description: - - An API access token to authenticate with the PagerDuty REST API. - required: true - type: str - pd_user: - description: - - Name of the user in PagerDuty. - required: true - type: str - pd_email: - description: - - The user's email address. - - I(pd_email) is the unique identifier used and cannot be updated using this module. - required: true - type: str - pd_role: - description: - - The user's role. - choices: ['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access'] - default: 'responder' - type: str - state: - description: - - State of the user. - - On C(present), it creates a user if the user doesn't exist. - - On C(absent), it removes a user if the account exists. - choices: ['present', 'absent'] - default: 'present' - type: str - pd_teams: - description: - - The teams to which the user belongs. - - Required if I(state=present). - type: list - elements: str -notes: - - Supports C(check_mode). -''' + access_token: + description: + - An API access token to authenticate with the PagerDuty REST API. + required: true + type: str + pd_user: + description: + - Name of the user in PagerDuty. + required: true + type: str + pd_email: + description: + - The user's email address. + - O(pd_email) is the unique identifier used and cannot be updated using this module. + required: true + type: str + pd_role: + description: + - The user's role. + choices: ['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access'] + default: 'responder' + type: str + state: + description: + - State of the user. + - On V(present), it creates a user if the user does not exist. + - On V(absent), it removes a user if the account exists. + choices: ['present', 'absent'] + default: 'present' + type: str + pd_teams: + description: + - The teams to which the user belongs. + - Required if O(state=present). + type: list + elements: str +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a user account on PagerDuty community.general.pagerduty_user: access_token: 'Your_Access_token' @@ -75,27 +78,16 @@ EXAMPLES = r''' pd_user: user_full_name pd_email: user_email state: "absent" -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -import traceback from os import path +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils import deps -try: - from pdpyras import APISession - HAS_PD_PY = True -except ImportError: - HAS_PD_PY = False - PD_IMPORT_ERR = traceback.format_exc() - -try: - from pdpyras import PDClientError - HAS_PD_CLIENT_ERR = True -except ImportError: - HAS_PD_CLIENT_ERR = False - PD_CLIENT_ERR_IMPORT_ERR = traceback.format_exc() +with deps.declare("pdpyras", url="https://github.com/PagerDuty/pdpyras"): + from pdpyras import APISession, PDClientError class PagerDutyUser(object): @@ -194,16 +186,12 @@ def main(): state=dict(type='str', default='present', choices=['present', 'absent']), pd_role=dict(type='str', default='responder', choices=['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']), - pd_teams=dict(type='list', elements='str', required=False)), + pd_teams=dict(type='list', elements='str')), required_if=[['state', 'present', ['pd_teams']], ], supports_check_mode=True, ) - if not HAS_PD_PY: - module.fail_json(msg=missing_required_lib('pdpyras', url='https://github.com/PagerDuty/pdpyras'), exception=PD_IMPORT_ERR) - - if not HAS_PD_CLIENT_ERR: - module.fail_json(msg=missing_required_lib('PDClientError', url='https://github.com/PagerDuty/pdpyras'), exception=PD_CLIENT_ERR_IMPORT_ERR) + deps.validate(module) access_token = module.params['access_token'] pd_user = module.params['pd_user'] diff --git a/plugins/modules/system/pam_limits.py b/plugins/modules/pam_limits.py similarity index 80% rename from plugins/modules/system/pam_limits.py rename to plugins/modules/pam_limits.py index 17b1ea1304..33a4eb7909 100644 --- a/plugins/modules/system/pam_limits.py +++ b/plugins/modules/pam_limits.py @@ -1,22 +1,29 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2014, Sebastien Rohaut -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Sebastien Rohaut +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: pam_limits author: -- "Sebastien Rohaut (@usawa)" + - "Sebastien Rohaut (@usawa)" short_description: Modify Linux PAM limits description: - - The C(pam_limits) module modifies PAM limits. - - The default file is C(/etc/security/limits.conf). + - The M(community.general.pam_limits) module modifies PAM limits. + - The default file is V(/etc/security/limits.conf). - For the full documentation, see C(man 5 limits.conf). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + version_added: 2.0.0 + diff_mode: + support: full + version_added: 2.0.0 options: domain: type: str @@ -28,63 +35,63 @@ options: description: - Limit type, see C(man 5 limits.conf) for an explanation. required: true - choices: [ "hard", "soft", "-" ] + choices: ["hard", "soft", "-"] limit_item: type: str description: - The limit to be set. required: true choices: - - "core" - - "data" - - "fsize" - - "memlock" - - "nofile" - - "rss" - - "stack" - - "cpu" - - "nproc" - - "as" - - "maxlogins" - - "maxsyslogins" - - "priority" - - "locks" - - "sigpending" - - "msgqueue" - - "nice" - - "rtprio" - - "chroot" + - "core" + - "data" + - "fsize" + - "memlock" + - "nofile" + - "rss" + - "stack" + - "cpu" + - "nproc" + - "as" + - "maxlogins" + - "maxsyslogins" + - "priority" + - "locks" + - "sigpending" + - "msgqueue" + - "nice" + - "rtprio" + - "chroot" value: type: str description: - The value of the limit. - - Value must either be C(unlimited), C(infinity) or C(-1), all of which indicate no limit, or a limit of 0 or larger. - - Value must be a number in the range -20 to 19 inclusive, if I(limit_item) is set to C(nice) or C(priority). + - Value must either be V(unlimited), V(infinity) or V(-1), all of which indicate no limit, or a limit of 0 or larger. + - Value must be a number in the range -20 to 19 inclusive, if O(limit_item) is set to V(nice) or V(priority). - Refer to the C(man 5 limits.conf) manual pages for more details. required: true backup: description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered + it incorrectly. required: false type: bool - default: "no" + default: false use_min: description: - - If set to C(yes), the minimal value will be used or conserved. - - If the specified value is inferior to the value in the file, - file content is replaced with the new value, else content is not modified. + - If set to V(true), the minimal value is used or conserved. + - If the specified value is inferior to the value in the file, file content is replaced with the new value, else content + is not modified. required: false type: bool - default: "no" + default: false use_max: description: - - If set to C(yes), the maximal value will be used or conserved. - - If the specified value is superior to the value in the file, - file content is replaced with the new value, else content is not modified. + - If set to V(true), the maximal value is used or conserved. + - If the specified value is superior to the value in the file, file content is replaced with the new value, else content + is not modified. required: false type: bool - default: "no" + default: false dest: type: str description: @@ -98,10 +105,10 @@ options: required: false default: '' notes: - - If I(dest) file does not exist, it is created. -''' + - If O(dest) file does not exist, it is created. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add or modify nofile soft limit for the user joe community.general.pam_limits: domain: joe @@ -115,7 +122,7 @@ EXAMPLES = r''' limit_type: hard limit_item: fsize value: 1000000 - use_max: yes + use_max: true - name: Add or modify memlock, both soft and hard, limit for the user james with a comment community.general.pam_limits: @@ -131,7 +138,7 @@ EXAMPLES = r''' limit_type: hard limit_item: nofile value: 39693561 -''' +""" import os import re @@ -165,7 +172,6 @@ def main(): limits_conf = '/etc/security/limits.conf' module = AnsibleModule( - # not checking because of daisy chain to file module argument_spec=dict( domain=dict(required=True, type='str'), limit_type=dict(required=True, type='str', choices=pam_types), @@ -175,7 +181,7 @@ def main(): use_min=dict(default=False, type='bool'), backup=dict(default=False, type='bool'), dest=dict(default=limits_conf, type='str'), - comment=dict(required=False, default='', type='str') + comment=dict(default='', type='str') ), supports_check_mode=True, ) @@ -191,6 +197,7 @@ def main(): new_comment = module.params['comment'] changed = False + does_not_exist = False if os.path.isfile(limits_conf): if not os.access(limits_conf, os.W_OK): @@ -198,7 +205,7 @@ def main(): else: limits_conf_dir = os.path.dirname(limits_conf) if os.path.isdir(limits_conf_dir) and os.access(limits_conf_dir, os.W_OK): - open(limits_conf, 'a').close() + does_not_exist = True changed = True else: module.fail_json(msg="directory %s is not writable (check presence, access rights, use sudo)" % limits_conf_dir) @@ -214,15 +221,20 @@ def main(): space_pattern = re.compile(r'\s+') + if does_not_exist: + lines = [] + else: + with open(limits_conf, 'rb') as f: + lines = list(f) + message = '' - f = open(limits_conf, 'rb') # Tempfile nf = tempfile.NamedTemporaryFile(mode='w+') found = False new_value = value - for line in f: + for line in lines: line = to_native(line, errors='surrogate_or_strict') if line.startswith('#'): nf.write(line) @@ -313,18 +325,18 @@ def main(): message = new_limit nf.write(new_limit) - f.close() nf.flush() - with open(limits_conf, 'r') as content: - content_current = content.read() - with open(nf.name, 'r') as content: content_new = content.read() if not module.check_mode: - # Copy tempfile to newfile - module.atomic_move(nf.name, limits_conf) + if does_not_exist: + with open(limits_conf, 'a'): + pass + + # Move tempfile to newfile + module.atomic_move(os.path.abspath(nf.name), os.path.abspath(limits_conf)) try: nf.close() @@ -334,7 +346,7 @@ def main(): res_args = dict( changed=changed, msg=message, - diff=dict(before=content_current, after=content_new), + diff=dict(before=b''.join(lines), after=content_new), ) if backup: diff --git a/plugins/modules/system/pamd.py b/plugins/modules/pamd.py similarity index 87% rename from plugins/modules/system/pamd.py rename to plugins/modules/pamd.py index dda504974d..bf3bc40ef3 100644 --- a/plugins/modules/system/pamd.py +++ b/plugins/modules/pamd.py @@ -1,103 +1,102 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Kenneth D. Evensen -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Kenneth D. Evensen +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: pamd author: - - Kenneth D. Evensen (@kevensen) + - Kenneth D. Evensen (@kevensen) short_description: Manage PAM Modules description: - Edit PAM service's type, control, module path and module arguments. - - In order for a PAM rule to be modified, the type, control and - module_path must match an existing rule. See man(5) pam.d for details. + - In order for a PAM rule to be modified, the type, control and module_path must match an existing rule. See man(5) pam.d + for details. notes: - This module does not handle authselect profiles. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - - The name generally refers to the PAM service file to - change, for example system-auth. + - The name generally refers to the PAM service file to change, for example system-auth. type: str required: true type: description: - The type of the PAM rule being modified. - - The C(type), C(control) and C(module_path) all must match a rule to be modified. + - The O(type), O(control), and O(module_path) options all must match a rule to be modified. type: str required: true - choices: [ account, -account, auth, -auth, password, -password, session, -session ] + choices: [account, -account, auth, -auth, password, -password, session, -session] control: description: - The control of the PAM rule being modified. - - This may be a complicated control with brackets. If this is the case, be - sure to put "[bracketed controls]" in quotes. - - The C(type), C(control) and C(module_path) all must match a rule to be modified. + - This may be a complicated control with brackets. If this is the case, be sure to put "[bracketed controls]" in quotes. + - The O(type), O(control), and O(module_path) options all must match a rule to be modified. type: str required: true module_path: description: - The module path of the PAM rule being modified. - - The C(type), C(control) and C(module_path) all must match a rule to be modified. + - The O(type), O(control), and O(module_path) options all must match a rule to be modified. type: str required: true new_type: description: - - The new type to assign to the new rule. + - The new type to assign to the new rule. type: str - choices: [ account, -account, auth, -auth, password, -password, session, -session ] + choices: [account, -account, auth, -auth, password, -password, session, -session] new_control: description: - - The new control to assign to the new rule. + - The new control to assign to the new rule. type: str new_module_path: description: - - The new module path to be assigned to the new rule. + - The new module path to be assigned to the new rule. type: str module_arguments: description: - - When state is C(updated), the module_arguments will replace existing module_arguments. - - When state is C(args_absent) args matching those listed in module_arguments will be removed. - - When state is C(args_present) any args listed in module_arguments are added if - missing from the existing rule. - - Furthermore, if the module argument takes a value denoted by C(=), - the value will be changed to that specified in module_arguments. + - When O(state=updated), the O(module_arguments) replace existing module_arguments. + - When O(state=args_absent) args matching those listed in O(module_arguments) are removed. + - When O(state=args_present) any args listed in O(module_arguments) are added if missing from the existing rule. + - Furthermore, if the module argument takes a value denoted by C(=), the value changes to that specified in module_arguments. type: list elements: str state: description: - - The default of C(updated) will modify an existing rule if type, - control and module_path all match an existing rule. - - With C(before), the new rule will be inserted before a rule matching type, - control and module_path. - - Similarly, with C(after), the new rule will be inserted after an existing rulematching type, - control and module_path. - - With either C(before) or C(after) new_type, new_control, and new_module_path must all be specified. - - If state is C(args_absent) or C(args_present), new_type, new_control, and new_module_path will be ignored. - - State C(absent) will remove the rule. The 'absent' state was added in Ansible 2.4. + - The default of V(updated) modifies an existing rule if type, control and module_path all match an existing rule. + - With V(before), the new rule is inserted before a rule matching type, control and module_path. + - Similarly, with V(after), the new rule is inserted after an existing rulematching type, control and module_path. + - With either V(before) or V(after) O(new_type), O(new_control), and O(new_module_path) must all be specified. + - If state is V(args_absent) or V(args_present), O(new_type), O(new_control), and O(new_module_path) are ignored. + - State V(absent) removes the rule. type: str - choices: [ absent, before, after, args_absent, args_present, updated ] + choices: [absent, before, after, args_absent, args_present, updated] default: updated path: description: - - This is the path to the PAM service files. + - This is the path to the PAM service files. type: path default: /etc/pam.d backup: - description: - - Create a backup file including the timestamp information so you can - get the original file back if you somehow clobbered it incorrectly. - type: bool - default: no -''' + description: + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered + it incorrectly. + type: bool + default: false +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Update pamd rule's control in /etc/pam.d/system-auth community.general.pamd: name: system-auth @@ -125,8 +124,7 @@ EXAMPLES = r''' new_module_path: pam_faillock.so state: before -- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an \ - existing rule pam_rootok.so +- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an existing rule pam_rootok.so community.general.pamd: name: su type: auth @@ -153,11 +151,7 @@ EXAMPLES = r''' type: auth control: required module_path: pam_faillock.so - module_arguments: 'preauth - silent - deny=3 - unlock_time=604800 - fail_interval=900' + module_arguments: 'preauth silent deny=3 unlock_time=604800 fail_interval=900' state: updated - name: Remove specific arguments from a rule @@ -185,8 +179,8 @@ EXAMPLES = r''' control: '[success=1 default=ignore]' module_path: pam_succeed_if.so module_arguments: - - crond - - quiet + - crond + - quiet state: args_present - name: Module arguments requiring commas must be listed as a Yaml list @@ -196,7 +190,7 @@ EXAMPLES = r''' control: required module_path: pam_access.so module_arguments: - - listsep=, + - listsep=, state: args_present - name: Update specific argument value in a rule @@ -218,49 +212,20 @@ EXAMPLES = r''' type: auth module_path: pam_sss.so control: 'requisite' -''' +""" -RETURN = r''' +RETURN = r""" change_count: - description: How many rules were changed. - type: int - sample: 1 - returned: success -new_rule: - description: The changes to the rule. This was available in Ansible 2.4 and Ansible 2.5. It was removed in Ansible 2.6. - type: str - sample: None None None sha512 shadow try_first_pass use_authtok - returned: success -updated_rule_(n): - description: The rule(s) that was/were changed. This is only available in - Ansible 2.4 and was removed in Ansible 2.5. - type: str - sample: - - password sufficient pam_unix.so sha512 shadow try_first_pass - use_authtok - returned: success -action: - description: - - "That action that was taken and is one of: update_rule, - insert_before_rule, insert_after_rule, args_present, args_absent, - absent. This was available in Ansible 2.4 and removed in Ansible 2.8" - returned: always - type: str - sample: "update_rule" -dest: - description: - - "Path to pam.d service that was changed. This is only available in - Ansible 2.3 and was removed in Ansible 2.4." - returned: success - type: str - sample: "/etc/pam.d/system-auth" + description: How many rules were changed. + type: int + sample: 1 + returned: success backupdest: - description: - - "The file name of the backup file, if created." - returned: success - type: str -... -''' + description: + - The file name of the backup file, if created. + returned: success + type: str +""" from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/system/parted.py b/plugins/modules/parted.py similarity index 73% rename from plugins/modules/system/parted.py rename to plugins/modules/parted.py index 3796cfc40b..11e4577667 100644 --- a/plugins/modules/system/parted.py +++ b/plugins/modules/parted.py @@ -1,90 +1,94 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Fabrizio Colonna -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Fabrizio Colonna +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: - - Fabrizio Colonna (@ColOfAbRiX) + - Fabrizio Colonna (@ColOfAbRiX) module: parted short_description: Configure block device partitions description: - - This module allows configuring block device partition using the C(parted) - command line tool. For a full description of the fields and the options - check the GNU parted manual. + - This module allows configuring block device partition using the C(parted) command line tool. For a full description of + the fields and the options check the GNU parted manual. requirements: - - This module requires parted version 1.8.3 and above - - align option (except 'undefined') requires parted 2.1 and above - - If the version of parted is below 3.1, it requires a Linux version running - the sysfs file system C(/sys/). + - This module requires C(parted) version 1.8.3 and above. + - Option O(align) (except V(undefined)) requires C(parted) 2.1 or above. + - If the version of C(parted) is below 3.1, it requires a Linux version running the C(sysfs) file system C(/sys/). + - Requires the C(resizepart) command when using the O(resize) parameter. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: device: - description: The block device (disk) where to operate. + description: + - The block device (disk) where to operate. + - Regular files can also be partitioned, but it is recommended to create a loopback device using C(losetup) to easily + access its partitions. type: str - required: True + required: true align: - description: Set alignment for newly created partitions. Use 'undefined' for parted default aligment. + description: + - Set alignment for newly created partitions. Use V(undefined) for parted default alignment. type: str - choices: [ cylinder, minimal, none, optimal, undefined ] + choices: [cylinder, minimal, none, optimal, undefined] default: optimal number: description: - - The number of the partition to work with or the number of the partition - that will be created. - - Required when performing any action on the disk, except fetching information. + - The partition number being affected. + - Required when performing any action on the disk, except fetching information. type: int unit: description: - - Selects the current default unit that Parted will use to display - locations and capacities on the disk and to interpret those given by the - user if they are not suffixed by an unit. - - When fetching information about a disk, it is always recommended to specify a unit. + - Selects the current default unit that Parted uses to display locations and capacities on the disk and to interpret + those given by the user if they are not suffixed by an unit. + - When fetching information about a disk, it is recommended to always specify a unit. type: str - choices: [ s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact ] + choices: [s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact] default: KiB label: description: - - Disk label type to use. - - If C(device) already contains different label, it will be changed to C(label) and any previous partitions will be lost. + - Disk label type or partition table to use. + - If O(device) already contains a different label, it is changed to O(label) and any previous partitions are lost. + - A O(name) must be specified for a V(gpt) partition table. type: str - choices: [ aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun ] + choices: [aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun] default: msdos part_type: description: - - May be specified only with 'msdos' or 'dvh' partition tables. - - A C(name) must be specified for a 'gpt' partition table. - - Neither C(part_type) nor C(name) may be used with a 'sun' partition table. + - May be specified only with O(label=msdos) or O(label=dvh). + - Neither O(part_type) nor O(name) may be used with O(label=sun). type: str - choices: [ extended, logical, primary ] + choices: [extended, logical, primary] default: primary part_start: description: - - Where the partition will start as offset from the beginning of the disk, - that is, the "distance" from the start of the disk. Negative numbers - specify distance from the end of the disk. - - The distance can be specified with all the units supported by parted - (except compat) and it is case sensitive, e.g. C(10GiB), C(15%). - - Using negative values may require setting of C(fs_type) (see notes). + - Where the partition starts as offset from the beginning of the disk, that is, the "distance" from the start of the + disk. Negative numbers specify distance from the end of the disk. + - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for + example V(10GiB), V(15%). + - Using negative values may require setting of O(fs_type) (see notes). type: str default: 0% part_end: description: - - Where the partition will end as offset from the beginning of the disk, - that is, the "distance" from the start of the disk. Negative numbers - specify distance from the end of the disk. - - The distance can be specified with all the units supported by parted - (except compat) and it is case sensitive, e.g. C(10GiB), C(15%). + - Where the partition ends as offset from the beginning of the disk, that is, the "distance" from the start of the disk. + Negative numbers specify distance from the end of the disk. + - The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for + example V(10GiB), V(15%). type: str default: 100% name: description: - - Sets the name for the partition number (GPT, Mac, MIPS and PC98 only). + - Sets the name for the partition number (GPT, Mac, MIPS and PC98 only). type: str flags: description: A list of the flags that has to be set on the partition. @@ -92,37 +96,35 @@ options: elements: str state: description: - - Whether to create or delete a partition. - - If set to C(info) the module will only return the device information. + - Whether to create or delete a partition. + - If set to V(info) the module only returns the device information. type: str - choices: [ absent, present, info ] + choices: [absent, present, info] default: info fs_type: description: - - If specified and the partition does not exist, will set filesystem type to given partition. - - Parameter optional, but see notes below about negative C(part_start) values. + - If specified and the partition does not exist, sets filesystem type to given partition. + - Parameter optional, but see notes below about negative O(part_start) values. type: str version_added: '0.2.0' resize: description: - - Call C(resizepart) on existing partitions to match the size specified by I(part_end). + - Call C(resizepart) on existing partitions to match the size specified by O(part_end). type: bool default: false version_added: '1.3.0' notes: - - When fetching information about a new disk and when the version of parted - installed on the system is before version 3.1, the module queries the kernel - through C(/sys/) to obtain disk information. In this case the units CHS and - CYL are not supported. - - Negative C(part_start) start values were rejected if C(fs_type) was not given. - This bug was fixed in parted 3.2.153. If you want to use negative C(part_start), - specify C(fs_type) as well or make sure your system contains newer parted. -''' + - When fetching information about a new disk and when the version of parted installed on the system is before version 3.1, + the module queries the kernel through C(/sys/) to obtain disk information. In this case the units CHS and CYL are not + supported. + - Negative O(part_start) start values were rejected if O(fs_type) was not given. This bug was fixed in parted 3.2.153. If + you want to use negative O(part_start), specify O(fs_type) as well or make sure your system contains newer parted. +""" -RETURN = r''' +RETURN = r""" partition_info: - description: Current partition information + description: Current partition information. returned: success type: complex contains: @@ -133,40 +135,36 @@ partition_info: description: List of device partitions. type: list script: - description: parted script executed by module + description: Parted script executed by module. type: str - sample: { - "disk": { - "dev": "/dev/sdb", - "logical_block": 512, - "model": "VMware Virtual disk", - "physical_block": 512, - "size": 5.0, - "table": "msdos", - "unit": "gib" - }, - "partitions": [{ - "begin": 0.0, - "end": 1.0, - "flags": ["boot", "lvm"], - "fstype": "", - "name": "", - "num": 1, + sample: + "disk": + "dev": "/dev/sdb" + "logical_block": 512 + "model": "VMware Virtual disk" + "physical_block": 512 + "size": 5.0 + "table": "msdos" + "unit": "gib" + "partitions": + - "begin": 0.0 + "end": 1.0 + "flags": ["boot", "lvm"] + "fstype": "" + "name": "" + "num": 1 "size": 1.0 - }, { - "begin": 1.0, - "end": 5.0, - "flags": [], - "fstype": "", - "name": "", - "num": 2, + - "begin": 1.0 + "end": 5.0 + "flags": [] + "fstype": "" + "name": "" + "num": 2 "size": 4.0 - }], - "script": "unit KiB print " - } -''' + "script": "unit KiB print " +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a new ext4 primary partition community.general.parted: device: /dev/sdb @@ -191,7 +189,7 @@ EXAMPLES = r''' community.general.parted: device: /dev/sdb number: 2 - flags: [ lvm ] + flags: [lvm] state: present part_start: 1GiB @@ -222,7 +220,7 @@ EXAMPLES = r''' part_end: "100%" resize: true state: present -''' +""" from ansible.module_utils.basic import AnsibleModule @@ -362,7 +360,7 @@ def format_disk_size(size_bytes, unit): This function has been adapted from https://github.com/Distrotech/parted/blo b/279d9d869ff472c52b9ec2e180d568f0c99e30b0/libparted/unit.c """ - global units_si, units_iec + global units_si, units_iec # pylint: disable=global-variable-not-assigned unit = unit.lower() @@ -458,7 +456,7 @@ def get_device_info(device, unit): Fetches information about a disk and its partitions and it returns a dictionary. """ - global module, parted_exec + global module, parted_exec # pylint: disable=global-variable-not-assigned # If parted complains about missing labels, it means there are no partitions. # In this case only, use a custom function to fetch information and emulate @@ -467,12 +465,12 @@ def get_device_info(device, unit): if label_needed: return get_unlabeled_device_info(device, unit) - command = "%s -s -m %s -- unit '%s' print" % (parted_exec, device, unit) + command = [parted_exec, "-s", "-m", device, "--", "unit", unit, "print"] rc, out, err = module.run_command(command) if rc != 0 and 'unrecognised disk label' not in err: module.fail_json(msg=( "Error while getting device information with parted " - "script: '%s'" % command), + "script: '%s'" % " ".join(command)), rc=rc, out=out, err=err ) @@ -485,7 +483,7 @@ def check_parted_label(device): to 3.1 don't return data when there is no label. For more information see: http://upstream.rosalinux.ru/changelogs/libparted/3.1/changelog.html """ - global parted_exec + global parted_exec # pylint: disable=global-variable-not-assigned # Check the version parted_major, parted_minor, dummy = parted_version() @@ -493,7 +491,7 @@ def check_parted_label(device): return False # Older parted versions return a message in the stdout and RC > 0. - rc, out, err = module.run_command("%s -s -m %s print" % (parted_exec, device)) + rc, out, err = module.run_command([parted_exec, "-s", "-m", device, "print"]) if rc != 0 and 'unrecognised disk label' in out.lower(): return True @@ -531,9 +529,9 @@ def parted_version(): """ Returns the major and minor version of parted installed on the system. """ - global module, parted_exec + global module, parted_exec # pylint: disable=global-variable-not-assigned - rc, out, err = module.run_command("%s --version" % parted_exec) + rc, out, err = module.run_command([parted_exec, "--version"]) if rc != 0: module.fail_json( msg="Failed to get parted version.", rc=rc, out=out, err=err @@ -550,19 +548,29 @@ def parted(script, device, align): """ Runs a parted script. """ - global module, parted_exec + global module, parted_exec # pylint: disable=global-variable-not-assigned - align_option = '-a %s' % align + align_option = ['-a', align] if align == 'undefined': - align_option = '' + align_option = [] + + """ + Use option --fix (-f) if available. Versions prior + to 3.4.64 don't have it. For more information see: + http://savannah.gnu.org/news/?id=10114 + """ + if parted_version() >= (3, 4, 64): + script_option = ['-s', '-f'] + else: + script_option = ['-s'] if script and not module.check_mode: - command = "%s -s -m %s %s -- %s" % (parted_exec, align_option, device, script) + command = [parted_exec] + script_option + ['-m'] + align_option + [device, '--'] + script rc, out, err = module.run_command(command) if rc != 0: module.fail_json( - msg="Error while running parted script: %s" % command.strip(), + msg="Error while running parted script: %s" % " ".join(command).strip(), rc=rc, out=out, err=err ) @@ -572,11 +580,8 @@ def read_record(file_path, default=None): Reads the first line of a file and returns it. """ try: - f = open(file_path, 'r') - try: + with open(file_path, 'r') as f: return f.readline().strip() - finally: - f.close() except IOError: return default @@ -586,10 +591,7 @@ def part_exists(partitions, attribute, number): Looks if a partition that has a specific value for a specific attribute actually exists. """ - return any( - part[attribute] and - part[attribute] == number for part in partitions - ) + return any(part.get(attribute) == number for part in partitions) def check_size_format(size_str): @@ -601,11 +603,11 @@ def check_size_format(size_str): def main(): - global module, units_si, units_iec, parted_exec + global module, units_si, units_iec, parted_exec # pylint: disable=global-variable-not-assigned changed = False - output_script = "" - script = "" + output_script = [] + script = [] module = AnsibleModule( argument_spec=dict( device=dict(type='str', required=True), @@ -687,20 +689,19 @@ def main(): # Assign label if required mklabel_needed = current_device['generic'].get('table', None) != label if mklabel_needed: - script += "mklabel %s " % label + script += ["mklabel", label] # Create partition if required if part_type and (mklabel_needed or not part_exists(current_parts, 'num', number)): - script += "mkpart %s %s%s %s " % ( - part_type, - '%s ' % fs_type if fs_type is not None else '', - part_start, - part_end - ) + script += ["mkpart"] + script += [part_type] + if fs_type is not None: + script += [fs_type] + script += [part_start, part_end] # Set the unit of the run if unit and script: - script = "unit %s %s" % (unit, script) + script = ["unit", unit] + script # If partition exists, try to resize if resize and part_exists(current_parts, 'num', number): @@ -716,10 +717,7 @@ def main(): desired_part_end = convert_to_bytes(size, parsed_unit) if current_part_end != desired_part_end: - script += "resizepart %s %s " % ( - number, - part_end - ) + script += ["resizepart", str(number), part_end] # Execute the script and update the data structure. # This will create the partition for the next steps @@ -727,7 +725,7 @@ def main(): output_script += script parted(script, device, align) changed = True - script = "" + script = [] if not module.check_mode: current_parts = get_device_info(device, unit)['partitions'] @@ -740,10 +738,8 @@ def main(): # Assign name to the partition if name is not None and partition.get('name', None) != name: - # Wrap double quotes in single quotes so the shell doesn't strip - # the double quotes as those need to be included in the arg - # passed to parted - script += 'name %s \'"%s"\' ' % (number, name) + # The double quotes need to be included in the arg passed to parted + script += ['name', str(number), '"%s"' % name] # Manage flags if flags: @@ -757,14 +753,14 @@ def main(): flags_on = list(set(flags) - set(partition['flags'])) for f in flags_on: - script += "set %s %s on " % (number, f) + script += ["set", str(number), f, "on"] for f in flags_off: - script += "set %s %s off " % (number, f) + script += ["set", str(number), f, "off"] # Set the unit of the run if unit and script: - script = "unit %s %s" % (unit, script) + script = ["unit", unit] + script # Execute the script if script: @@ -775,21 +771,20 @@ def main(): elif state == 'absent': # Remove the partition if part_exists(current_parts, 'num', number) or module.check_mode: - script = "rm %s " % number + script = ["rm", str(number)] output_script += script changed = True parted(script, device, align) elif state == 'info': - output_script = "unit '%s' print " % unit - + output_script = ["unit", unit, "print"] # Final status of the device final_device_status = get_device_info(device, unit) module.exit_json( changed=changed, disk=final_device_status['generic'], partitions=final_device_status['partitions'], - script=output_script.strip() + script=output_script ) diff --git a/plugins/modules/packaging/language/pear.py b/plugins/modules/pear.py similarity index 65% rename from plugins/modules/packaging/language/pear.py rename to plugins/modules/pear.py index e8e36b3c56..c31845cf54 100644 --- a/plugins/modules/packaging/language/pear.py +++ b/plugins/modules/pear.py @@ -1,57 +1,64 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2012, Afterburn -# (c) 2013, Aaron Bull Schaefer -# (c) 2015, Jonathan Lestrelin +# Copyright (c) 2012, Afterburn +# Copyright (c) 2013, Aaron Bull Schaefer +# Copyright (c) 2015, Jonathan Lestrelin # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pear short_description: Manage pear/pecl packages description: - - Manage PHP packages with the pear package manager. + - Manage PHP packages with the pear package manager. author: - - Jonathan Lestrelin (@jle64) + - Jonathan Lestrelin (@jle64) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - description: - - Name of the package to install, upgrade, or remove. - required: true - aliases: [pkg] - state: - type: str - description: - - Desired state of the package. - default: "present" - choices: ["present", "installed", "latest", "absent", "removed"] - executable: - type: path - description: - - Path to the pear executable. - prompts: - description: - - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected question. - - Prompts will be processed in the same order as the packages list. - - You can optionnally specify an answer to any question in the list. - - If no answer is provided, the list item will only contain the regular expression. - - "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression: 'an_answer')." - - You can provide a list containing items with or without answer. - - A prompt list can be shorter or longer than the packages list but will issue a warning. - - If you want to specify that a package will not need prompts in the middle of a list, C(null). - type: list - elements: raw - version_added: 0.2.0 -''' + name: + type: str + description: + - Name of the package to install, upgrade, or remove. + required: true + aliases: [pkg] + state: + type: str + description: + - Desired state of the package. + default: "present" + choices: ["present", "installed", "latest", "absent", "removed"] + executable: + type: path + description: + - Path to the pear executable. + prompts: + description: + - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected + question. + - Prompts are processed in the same order as the packages list. + - You can optionally specify an answer to any question in the list. + - If no answer is provided, the list item must contain only the regular expression. + - "To specify an answer, the item must be a dictionary with the regular expression as key and the answer as value C(my_regular_expression: + 'an_answer')." + - You can provide a list containing items with or without answer. + - A prompt list can be shorter or longer than the packages list but it issues a warning. + - If you want to specify that a package does not need prompts in the middle of a list, V(null). + type: list + elements: raw + version_added: 0.2.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Install pear package community.general.pear: name: Net_URL2 @@ -67,19 +74,18 @@ EXAMPLES = r''' name: pecl/apcu state: present prompts: - - (.*)Enable internal debugging in APCu \[no\] + - (.*)Enable internal debugging in APCu \[no\] - name: Install pecl package with expected prompt and an answer community.general.pear: name: pecl/apcu state: present prompts: - - (.*)Enable internal debugging in APCu \[no\]: "yes" + - (.*)Enable internal debugging in APCu \[no\]: "yes" -- name: Install multiple pear/pecl packages at once with prompts. - Prompts will be processed on the same order as the packages order. - If there is more prompts than packages, packages without prompts will be installed without any prompt expected. - If there is more packages than prompts, additionnal prompts will be ignored. +- name: Install multiple pear/pecl packages at once with prompts. Prompts will be processed on the same order as the packages + order. If there is more prompts than packages, packages without prompts will be installed without any prompt expected. + If there is more packages than prompts, additional prompts will be ignored. community.general.pear: name: pecl/gnupg, pecl/apcu state: present @@ -87,10 +93,9 @@ EXAMPLES = r''' - I am a test prompt because gnupg doesnt asks anything - (.*)Enable internal debugging in APCu \[no\]: "yes" -- name: Install multiple pear/pecl packages at once skipping the first prompt. - Prompts will be processed on the same order as the packages order. - If there is more prompts than packages, packages without prompts will be installed without any prompt expected. - If there is more packages than prompts, additionnal prompts will be ignored. +- name: Install multiple pear/pecl packages at once skipping the first prompt. Prompts will be processed on the same order + as the packages order. If there is more prompts than packages, packages without prompts will be installed without any + prompt expected. If there is more packages than prompts, additional prompts will be ignored. community.general.pear: name: pecl/gnupg, pecl/apcu state: present @@ -107,7 +112,7 @@ EXAMPLES = r''' community.general.pear: name: Net_URL2,pecl/json_post state: absent -''' +""" import os @@ -144,34 +149,33 @@ def get_repository_version(pear_output): return None -def query_package(module, name, state="present"): +def query_package(module, name): """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, and a second boolean to indicate if the package is up-to-date.""" - if state == "present": - lcmd = "%s info %s" % (_get_pear_path(module), name) - lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) - if lrc != 0: - # package is not installed locally - return False, False - - rcmd = "%s remote-info %s" % (_get_pear_path(module), name) - rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) - - # get the version installed locally (if any) - lversion = get_local_version(rstdout) - - # get the version in the repository - rversion = get_repository_version(rstdout) - - if rrc == 0: - # Return True to indicate that the package is installed locally, - # and the result of the version number comparison - # to determine if the package is up-to-date. - return True, (lversion == rversion) - + lcmd = [_get_pear_path(module), "info", name] + lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) + if lrc != 0: + # package is not installed locally return False, False + rcmd = [_get_pear_path(module), "remote-info", name] + rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) + + # get the version installed locally (if any) + lversion = get_local_version(rstdout) + + # get the version in the repository + rversion = get_repository_version(rstdout) + + if rrc == 0: + # Return True to indicate that the package is installed locally, + # and the result of the version number comparison + # to determine if the package is up-to-date. + return True, (lversion == rversion) + + return False, False + def remove_packages(module, packages): remove_c = 0 @@ -182,7 +186,7 @@ def remove_packages(module, packages): if not installed: continue - cmd = "%s uninstall %s" % (_get_pear_path(module), package) + cmd = [_get_pear_path(module), "uninstall", package] rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: @@ -219,7 +223,7 @@ def install_packages(module, state, packages, prompts): # Preparing prompts answer according to item type tmp_prompts = [] for _item in prompts: - # If the current item is a dict then we expect it's key to be the prompt regex and it's value to be the answer + # If the current item is a dict then we expect its key to be the prompt regex and its value to be the answer # We also expect here that the dict only has ONE key and the first key will be taken if isinstance(_item, dict): key = list(_item.keys())[0] @@ -251,7 +255,7 @@ def install_packages(module, state, packages, prompts): prompt_regex = None data = default_stdin - cmd = "%s %s %s" % (_get_pear_path(module), command, package) + cmd = [_get_pear_path(module), command, package] rc, stdout, stderr = module.run_command(cmd, check_rc=False, prompt_regex=prompt_regex, data=data, binary_data=True) if rc != 0: module.fail_json(msg="failed to install %s: %s" % (package, to_text(stdout + stderr))) @@ -286,8 +290,8 @@ def main(): argument_spec=dict( name=dict(aliases=['pkg'], required=True), state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']), - executable=dict(default=None, required=False, type='path'), - prompts=dict(default=None, required=False, type='list', elements='raw'), + executable=dict(type='path'), + prompts=dict(type='list', elements='raw'), ), supports_check_mode=True) diff --git a/plugins/modules/system/pids.py b/plugins/modules/pids.py similarity index 82% rename from plugins/modules/system/pids.py rename to plugins/modules/pids.py index 1fd8014070..aa5f772201 100644 --- a/plugins/modules/system/pids.py +++ b/plugins/modules/pids.py @@ -1,19 +1,26 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Saranya Sridharan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +# Copyright (c) 2019, Saranya Sridharan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: pids -description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists." -short_description: "Retrieves process IDs list if the process is running otherwise return empty list" +description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines. Returns an empty list + if no process in that name exists." +short_description: Retrieves process IDs list if the process is running otherwise return empty list author: - Saranya Sridharan (@saranyasridharan) requirements: - psutil(python module) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: The name of the process(es) you want to get PID(s) for. @@ -23,17 +30,17 @@ options: type: str version_added: 3.0.0 ignore_case: - description: Ignore case in pattern if using the I(pattern) option. + description: Ignore case in pattern if using the O(pattern) option. type: bool default: false version_added: 3.0.0 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Pass the process name - name: Getting process IDs of the process community.general.pids: - name: python + name: python register: pids_of_python - name: Printing the process IDs obtained @@ -44,40 +51,35 @@ EXAMPLES = r''' community.general.pids: pattern: python(2(\.7)?|3(\.6)?)?\s+myapp\.py register: myapp_pids -''' +""" -RETURN = ''' +RETURN = r""" pids: - description: Process IDs of the given process + description: Process IDs of the given process. returned: list of none, one, or more process IDs type: list - sample: [100,200] -''' + sample: [100, 200] +""" import abc import re from os.path import basename -from ansible.module_utils import six -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils import deps from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -try: +with deps.declare("psutil"): import psutil - HAS_PSUTIL = True -except ImportError: - HAS_PSUTIL = False - class PSAdapterError(Exception): pass -@six.add_metaclass(abc.ABCMeta) -class PSAdapter(object): +class PSAdapter(object, metaclass=abc.ABCMeta): NAME_ATTRS = ('name', 'cmdline') PATTERN_ATTRS = ('name', 'exe', 'cmdline') @@ -106,7 +108,7 @@ class PSAdapter(object): attributes['cmdline'] and compare_lower(attributes['cmdline'][0], name)) def _get_proc_attributes(self, proc, *attributes): - return dict((attribute, self._get_attribute_from_proc(proc, attribute)) for attribute in attributes) + return {attribute: self._get_attribute_from_proc(proc, attribute) for attribute in attributes} @staticmethod @abc.abstractmethod @@ -176,8 +178,8 @@ def compare_lower(a, b): class Pids(object): def __init__(self, module): - if not HAS_PSUTIL: - module.fail_json(msg=missing_required_lib('psutil')) + + deps.validate(module) self._ps = PSAdapter.from_package(psutil) @@ -189,7 +191,7 @@ class Pids(object): self._pids = [] def execute(self): - if self._name: + if self._name is not None: self._pids = self._ps.get_pids_by_name(self._name) else: try: diff --git a/plugins/modules/monitoring/pingdom.py b/plugins/modules/pingdom.py similarity index 65% rename from plugins/modules/monitoring/pingdom.py rename to plugins/modules/pingdom.py index 23ed254543..5c6ad6f88c 100644 --- a/plugins/modules/monitoring/pingdom.py +++ b/plugins/modules/pingdom.py @@ -1,55 +1,60 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: pingdom short_description: Pause/unpause Pingdom alerts description: - - This module will let you pause/unpause Pingdom alerts + - This module lets you pause/unpause Pingdom alerts. author: - - "Dylan Silva (@thaumos)" - - "Justin Johns (!UNKNOWN)" + - "Dylan Silva (@thaumos)" + - "Justin Johns (!UNKNOWN)" requirements: - - "This pingdom python library: https://github.com/mbabineau/pingdom-python" + - "This pingdom python library: U(https://github.com/mbabineau/pingdom-python)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - state: - type: str - description: - - Define whether or not the check should be running or paused. - required: true - choices: [ "running", "paused", "started", "stopped" ] - checkid: - type: str - description: - - Pingdom ID of the check. - required: true - uid: - type: str - description: - - Pingdom user ID. - required: true - passwd: - type: str - description: - - Pingdom user password. - required: true - key: - type: str - description: - - Pingdom API key. - required: true + state: + type: str + description: + - Define whether or not the check should be running or paused. + required: true + choices: ["running", "paused", "started", "stopped"] + checkid: + type: str + description: + - Pingdom ID of the check. + required: true + uid: + type: str + description: + - Pingdom user ID. + required: true + passwd: + type: str + description: + - Pingdom user password. + required: true + key: + type: str + description: + - Pingdom API key. + required: true notes: - - This module does not yet have support to add/remove checks. -''' + - This module does not yet have support to add/remove checks. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Pause the check with the ID of 12345 community.general.pingdom: uid: example@example.com @@ -65,7 +70,7 @@ EXAMPLES = ''' key: apipassword123 checkid: 12345 state: running -''' +""" import traceback @@ -125,10 +130,10 @@ def main(): passwd = module.params['passwd'] key = module.params['key'] - if (state == "paused" or state == "stopped"): + if state == "paused" or state == "stopped": (rc, name, result) = pause(checkid, uid, passwd, key) - if (state == "running" or state == "started"): + if state == "running" or state == "started": (rc, name, result) = unpause(checkid, uid, passwd, key) if rc != 0: diff --git a/plugins/modules/packaging/language/pip_package_info.py b/plugins/modules/pip_package_info.py similarity index 54% rename from plugins/modules/packaging/language/pip_package_info.py rename to plugins/modules/pip_package_info.py index 25825cefb1..bcb4d45753 100644 --- a/plugins/modules/packaging/language/pip_package_info.py +++ b/plugins/modules/pip_package_info.py @@ -1,36 +1,39 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later # started out with AWX's scan_packages module -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: pip_package_info -short_description: pip package information +short_description: Pip package information description: - - Return information about installed pip packages + - Return information about installed pip packages. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module options: clients: description: - - A list of the pip executables that will be used to get the packages. - They can be supplied with the full path or just the executable name, i.e `pip3.7`. + - A list of the pip executables that are used to get the packages. They can be supplied with the full path or just the + executable name, for example V(pip3.7). default: ['pip'] - required: False + required: false type: list elements: path requirements: - - The requested pip executables must be installed on the target. + - pip >= 20.3b1 (necessary for the C(--format) option) + - The requested C(pip) executables must be installed on the target. author: - Matthew Jones (@matburt) - Brian Coca (@bcoca) - Adam Miller (@maxamillion) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Just get the list from default pip community.general.pip_package_info: @@ -41,52 +44,55 @@ EXAMPLES = ''' - name: Get from specific paths (virtualenvs?) community.general.pip_package_info: clients: '/home/me/projec42/python/pip3.5' -''' +""" -RETURN = ''' +RETURN = r""" packages: - description: a dictionary of installed package data + description: A dictionary of installed package data. returned: always type: dict contains: python: - description: A dictionary with each pip client which then contains a list of dicts with python package information + description: A dictionary with each pip client which then contains a list of dicts with python package information. returned: always type: dict sample: - "packages": { + { + "packages": { "pip": { - "Babel": [ - { - "name": "Babel", - "source": "pip", - "version": "2.6.0" - } - ], - "Flask": [ - { - "name": "Flask", - "source": "pip", - "version": "1.0.2" - } - ], - "Flask-SQLAlchemy": [ - { - "name": "Flask-SQLAlchemy", - "source": "pip", - "version": "2.3.2" - } - ], - "Jinja2": [ - { - "name": "Jinja2", - "source": "pip", - "version": "2.10" - } - ], - }, + "Babel": [ + { + "name": "Babel", + "source": "pip", + "version": "2.6.0" + } + ], + "Flask": [ + { + "name": "Flask", + "source": "pip", + "version": "1.0.2" + } + ], + "Flask-SQLAlchemy": [ + { + "name": "Flask-SQLAlchemy", + "source": "pip", + "version": "2.3.2" + } + ], + "Jinja2": [ + { + "name": "Jinja2", + "source": "pip", + "version": "2.10" + } + ] + } + } } -''' +""" + import json import os @@ -97,13 +103,13 @@ from ansible.module_utils.facts.packages import CLIMgr class PIP(CLIMgr): - def __init__(self, pip): + def __init__(self, pip, module): self.CLI = pip + self.module = module def list_installed(self): - global module - rc, out, err = module.run_command([self._cli, 'list', '-l', '--format=json']) + rc, out, err = self.module.run_command([self._cli, 'list', '-l', '--format=json']) if rc != 0: raise Exception("Unable to list packages rc=%s : %s" % (rc, err)) return json.loads(out) @@ -116,7 +122,6 @@ class PIP(CLIMgr): def main(): # start work - global module module = AnsibleModule( argument_spec=dict( clients=dict(type='list', elements='path', default=['pip']), @@ -133,7 +138,7 @@ def main(): module.warn('Skipping invalid pip client: %s' % (pip)) continue try: - pip_mgr = PIP(pip) + pip_mgr = PIP(pip, module) if pip_mgr.is_available(): found += 1 packages[pip] = pip_mgr.get_packages() diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py new file mode 100644 index 0000000000..4d4f7227f7 --- /dev/null +++ b/plugins/modules/pipx.py @@ -0,0 +1,441 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pipx +short_description: Manages applications installed with pipx +version_added: 3.8.0 +description: + - Manage Python applications installed in isolated virtualenvs using pipx. +extends_documentation_fragment: + - community.general.attributes + - community.general.pipx +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + state: + type: str + choices: + - present + - absent + - install + - install_all + - uninstall + - uninstall_all + - inject + - uninject + - upgrade + - upgrade_shared + - upgrade_all + - reinstall + - reinstall_all + - latest + - pin + - unpin + default: install + description: + - Desired state for the application. + - The states V(present) and V(absent) are aliases to V(install) and V(uninstall), respectively. + - The state V(latest) is equivalent to executing the task twice, with state V(install) and then V(upgrade). It was added + in community.general 5.5.0. + - The states V(install_all), V(uninject), V(upgrade_shared), V(pin) and V(unpin) are only available in C(pipx>=1.6.0), + make sure to have a compatible version when using this option. These states have been added in community.general 9.4.0. + name: + type: str + description: + - The name of the application and also the name of the Python package being installed. + - In C(pipx) documentation it is also referred to as the name of the virtual environment where the application is installed. + - If O(name) is a simple package name without version specifiers, then that name is used as the Python package name + to be installed. + - Starting in community.general 10.7.0, you can use package specifiers when O(state=present) or O(state=install). For + example, O(name=tox<4.0.0) or O(name=tox>3.0.27). + - Please note that when you use O(state=present) and O(name) with version specifiers, contrary to the behavior of C(pipx), + this module honors the version specifier and installs a version of the application that satisfies it. If you want + to ensure the reinstallation of the application even when the version specifier is met, then you must use O(force=true), + or perhaps use O(state=upgrade) instead. + - Use O(source) for installing from URLs or directories. + source: + type: str + description: + - Source for the package. This option is used when O(state=install) or O(state=latest), and it is ignored with other + states. + - Use O(source) when installing a Python package with version specifier, or from a local path, from a VCS URL or compressed + file. + - The value of this option is passed as-is to C(pipx). + - O(name) is still required when using O(source) to establish the application name without fetching the package from + a remote source. + - The module is not idempotent when using O(source). + install_apps: + description: + - Add apps from the injected packages. + - Only used when O(state=inject). + type: bool + default: false + version_added: 6.5.0 + install_deps: + description: + - Include applications of dependent packages. + - Only used when O(state=install), O(state=latest), or O(state=inject). + type: bool + default: false + inject_packages: + description: + - Packages to be injected into an existing virtual environment. + - Only used when O(state=inject). + type: list + elements: str + force: + description: + - Force modification of the application's virtual environment. See C(pipx) for details. + - Only used when O(state=install), O(state=upgrade), O(state=upgrade_all), O(state=latest), or O(state=inject). + - The module is not idempotent when O(force=true). + type: bool + default: false + include_injected: + description: + - Upgrade the injected packages along with the application. + - Only used when O(state=upgrade), O(state=upgrade_all), or O(state=latest). + - This is used with O(state=upgrade) and O(state=latest) since community.general 6.6.0. + type: bool + default: false + index_url: + description: + - Base URL of Python Package Index. + - Only used when O(state=install), O(state=upgrade), O(state=latest), or O(state=inject). + type: str + python: + description: + - Python version to be used when creating the application virtual environment. Must be 3.6+. + - Only used when O(state=install), O(state=latest), O(state=reinstall), or O(state=reinstall_all). + type: str + system_site_packages: + description: + - Give application virtual environment access to the system site-packages directory. + - Only used when O(state=install) or O(state=latest). + type: bool + default: false + version_added: 6.6.0 + editable: + description: + - Install the project in editable mode. + type: bool + default: false + version_added: 4.6.0 + pip_args: + description: + - Arbitrary arguments to pass directly to C(pip). + type: str + version_added: 4.6.0 + suffix: + description: + - Optional suffix for virtual environment and executable names. + - B(Warning:) C(pipx) documentation states this is an B(experimental) feature subject to change. + type: str + version_added: 9.3.0 + global: + version_added: 9.4.0 + spec_metadata: + description: + - Spec metadata file for O(state=install_all). + - This content of the file is usually generated with C(pipx list --json), and it can be obtained with M(community.general.pipx_info) + with O(community.general.pipx_info#module:include_raw=true) and obtaining the content from the RV(community.general.pipx_info#module:raw_output). + type: path + version_added: 9.4.0 +requirements: + - When using O(name) with version specifiers, the Python package C(packaging) is required. + - If the package C(packaging) is at a version lesser than C(22.0.0), it fails silently when processing invalid specifiers, + like C(tox<<<<4.0). +author: + - "Alexei Znamensky (@russoz)" +""" + +EXAMPLES = r""" +- name: Install tox + community.general.pipx: + name: tox + +- name: Install tox from git repository + community.general.pipx: + name: tox + source: git+https://github.com/tox-dev/tox.git + +- name: Upgrade tox + community.general.pipx: + name: tox + state: upgrade + +- name: Install or upgrade tox with extra 'docs' + community.general.pipx: + name: tox + source: tox[docs] + state: latest + +- name: Reinstall black with specific Python version + community.general.pipx: + name: black + state: reinstall + python: 3.7 + +- name: Uninstall pycowsay + community.general.pipx: + name: pycowsay + state: absent + +- name: Install multiple packages from list + vars: + pipx_packages: + - pycowsay + - black + - tox + community.general.pipx: + name: "{{ item }}" + state: latest + with_items: "{{ pipx_packages }}" +""" + +RETURN = r""" +version: + description: Version of pipx. + type: str + returned: always + sample: "1.7.1" + version_added: 10.1.0 +""" + + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_dict +from ansible_collections.community.general.plugins.module_utils.pkg_req import PackageRequirement +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +from ansible.module_utils.facts.compat import ansible_facts + + +def _make_name(name, suffix): + return name if suffix is None else "{0}{1}".format(name, suffix) + + +class PipX(StateModuleHelper): + output_params = ['name', 'source', 'index_url', 'force', 'installdeps'] + argument_spec = dict( + state=dict(type='str', default='install', + choices=[ + 'present', 'absent', 'install', 'install_all', 'uninstall', 'uninstall_all', 'inject', 'uninject', + 'upgrade', 'upgrade_shared', 'upgrade_all', 'reinstall', 'reinstall_all', 'latest', 'pin', 'unpin', + ]), + name=dict(type='str'), + source=dict(type='str'), + install_apps=dict(type='bool', default=False), + install_deps=dict(type='bool', default=False), + inject_packages=dict(type='list', elements='str'), + force=dict(type='bool', default=False), + include_injected=dict(type='bool', default=False), + index_url=dict(type='str'), + python=dict(type='str'), + system_site_packages=dict(type='bool', default=False), + editable=dict(type='bool', default=False), + pip_args=dict(type='str'), + suffix=dict(type='str'), + spec_metadata=dict(type='path'), + ) + argument_spec.update(pipx_common_argspec) + + module = dict( + argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['name']), + ('state', 'install', ['name']), + ('state', 'install_all', ['spec_metadata']), + ('state', 'absent', ['name']), + ('state', 'uninstall', ['name']), + ('state', 'upgrade', ['name']), + ('state', 'reinstall', ['name']), + ('state', 'latest', ['name']), + ('state', 'inject', ['name', 'inject_packages']), + ('state', 'pin', ['name']), + ('state', 'unpin', ['name']), + ], + required_by=dict( + suffix="name", + ), + supports_check_mode=True, + ) + + def _retrieve_installed(self): + output_process = make_process_dict(include_injected=True) + installed, dummy = self.runner('_list global', output_process=output_process).run() + + if self.app_name is None: + return installed + + return {k: v for k, v in installed.items() if k == self.app_name} + + def __init_module__(self): + if self.vars.executable: + self.command = [self.vars.executable] + else: + facts = ansible_facts(self.module, gather_subset=['python']) + self.command = [facts['python']['executable'], '-m', 'pipx'] + self.runner = pipx_runner(self.module, self.command) + + pkg_req = PackageRequirement(self.module, self.vars.name) + self.parsed_name = pkg_req.parsed_name + self.parsed_req = pkg_req.requirement + self.app_name = _make_name(self.parsed_name, self.vars.suffix) + + self.vars.set('application', self._retrieve_installed(), change=True, diff=True) + + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() + + if LooseVersion(self.vars.version) < LooseVersion("1.7.0"): + self.do_raise("The pipx tool must be at least at version 1.7.0") + + def __quit_module__(self): + self.vars.application = self._retrieve_installed() + + def _capture_results(self, ctx): + self.vars.stdout = ctx.results_out + self.vars.stderr = ctx.results_err + self.vars.cmd = ctx.cmd + self.vars.set('run_info', ctx.run_info, verbosity=4) + + def state_install(self): + # If we have a version spec and no source, use the version spec as source + if self.parsed_req and not self.vars.source: + self.vars.source = self.vars.name + + if self.vars.application.get(self.app_name): + is_installed = True + version_match = self.vars.application[self.app_name]['version'] in self.parsed_req.specifier if self.parsed_req else True + force = self.vars.force or (not version_match) + else: + is_installed = False + version_match = False + force = self.vars.force + + if is_installed and version_match and not force: + return + + self.changed = True + args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source' + with self.runner(args_order, check_mode_skip=True) as ctx: + ctx.run(name_source=[self.parsed_name, self.vars.source], force=force) + self._capture_results(ctx) + + state_present = state_install + + def state_install_all(self): + self.changed = True + with self.runner('state global index_url force python system_site_packages editable pip_args spec_metadata', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_upgrade(self): + name = _make_name(self.vars.name, self.vars.suffix) + if not self.vars.application: + self.do_raise("Trying to upgrade a non-existent application: {0}".format(name)) + if self.vars.force: + self.changed = True + + with self.runner('state global include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx: + ctx.run(name=name) + self._capture_results(ctx) + + def state_uninstall(self): + if self.vars.application: + name = _make_name(self.vars.name, self.vars.suffix) + with self.runner('state global name', check_mode_skip=True) as ctx: + ctx.run(name=name) + self._capture_results(ctx) + + state_absent = state_uninstall + + def state_reinstall(self): + name = _make_name(self.vars.name, self.vars.suffix) + if not self.vars.application: + self.do_raise("Trying to reinstall a non-existent application: {0}".format(name)) + self.changed = True + with self.runner('state global name python', check_mode_skip=True) as ctx: + ctx.run(name=name) + self._capture_results(ctx) + + def state_inject(self): + name = _make_name(self.vars.name, self.vars.suffix) + if not self.vars.application: + self.do_raise("Trying to inject packages into a non-existent application: {0}".format(name)) + if self.vars.force: + self.changed = True + with self.runner('state global index_url install_apps install_deps force editable pip_args name inject_packages', check_mode_skip=True) as ctx: + ctx.run(name=name) + self._capture_results(ctx) + + def state_uninject(self): + name = _make_name(self.vars.name, self.vars.suffix) + if not self.vars.application: + self.do_raise("Trying to uninject packages into a non-existent application: {0}".format(name)) + with self.runner('state global name inject_packages', check_mode_skip=True) as ctx: + ctx.run(name=name) + self._capture_results(ctx) + + def state_uninstall_all(self): + with self.runner('state global', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_reinstall_all(self): + with self.runner('state global python', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_upgrade_all(self): + if self.vars.force: + self.changed = True + with self.runner('state global include_injected force', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_upgrade_shared(self): + with self.runner('state global pip_args', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_latest(self): + if not self.vars.application or self.vars.force: + self.changed = True + args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source' + with self.runner(args_order, check_mode_skip=True) as ctx: + ctx.run(state='install', name_source=[self.vars.name, self.vars.source]) + self._capture_results(ctx) + + with self.runner('state global include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx: + ctx.run(state='upgrade') + self._capture_results(ctx) + + def state_pin(self): + with self.runner('state global name', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + def state_unpin(self): + with self.runner('state global name', check_mode_skip=True) as ctx: + ctx.run() + self._capture_results(ctx) + + +def main(): + PipX.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pipx_info.py b/plugins/modules/pipx_info.py new file mode 100644 index 0000000000..85d094c837 --- /dev/null +++ b/plugins/modules/pipx_info.py @@ -0,0 +1,197 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: pipx_info +short_description: Rretrieves information about applications installed with pipx +version_added: 5.6.0 +description: + - Retrieve details about Python applications installed in isolated virtualenvs using pipx. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module + - community.general.pipx +options: + name: + description: + - Name of an application installed with C(pipx). + type: str + include_deps: + description: + - Include dependent packages in the output. + type: bool + default: false + include_injected: + description: + - Include injected packages in the output. + type: bool + default: false + include_raw: + description: + - Returns the raw output of C(pipx list --json). + - The raw output is not affected by O(include_deps) or O(include_injected). + type: bool + default: false + global: + version_added: 9.3.0 +author: + - "Alexei Znamensky (@russoz)" +""" + +EXAMPLES = r""" +- name: retrieve all installed applications + community.general.pipx_info: {} + +- name: retrieve all installed applications, include dependencies and injected packages + community.general.pipx_info: + include_deps: true + include_injected: true + +- name: retrieve application tox + community.general.pipx_info: + name: tox + include_deps: true + +- name: retrieve application ansible-lint, include dependencies + community.general.pipx_info: + name: ansible-lint + include_deps: true +""" + +RETURN = r""" +application: + description: The list of installed applications. + returned: success + type: list + elements: dict + contains: + name: + description: The name of the installed application. + returned: success + type: str + sample: "tox" + version: + description: The version of the installed application. + returned: success + type: str + sample: "3.24.0" + dependencies: + description: The dependencies of the installed application, when O(include_deps=true). + returned: success + type: list + elements: str + sample: ["virtualenv"] + injected: + description: The injected packages for the installed application, when O(include_injected=true). + returned: success + type: dict + sample: + licenses: "0.6.1" + pinned: + description: + - Whether the installed application is pinned or not. + - When using C(pipx<=1.6.0), this returns C(null). + returned: success + type: bool + sample: + pinned: true + version_added: 10.0.0 + +raw_output: + description: The raw output of the C(pipx list) command, when O(include_raw=true). Used for debugging. + returned: success + type: dict + +cmd: + description: Command executed to obtain the list of installed applications. + returned: success + type: list + elements: str + sample: + [ + "/usr/bin/python3.10", + "-m", + "pipx", + "list", + "--include-injected", + "--json" + ] + +version: + description: Version of pipx. + type: str + returned: always + sample: "1.7.1" + version_added: 10.1.0 +""" + +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper +from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_dict +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + +from ansible.module_utils.facts.compat import ansible_facts + + +class PipXInfo(ModuleHelper): + output_params = ['name'] + argument_spec = dict( + name=dict(type='str'), + include_deps=dict(type='bool', default=False), + include_injected=dict(type='bool', default=False), + include_raw=dict(type='bool', default=False), + ) + argument_spec.update(pipx_common_argspec) + module = dict( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + def __init_module__(self): + if self.vars.executable: + self.command = [self.vars.executable] + else: + facts = ansible_facts(self.module, gather_subset=['python']) + self.command = [facts['python']['executable'], '-m', 'pipx'] + self.runner = pipx_runner(self.module, self.command) + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() + + if LooseVersion(self.vars.version) < LooseVersion("1.7.0"): + self.do_raise("The pipx tool must be at least at version 1.7.0") + + def __run__(self): + output_process = make_process_dict(self.vars.include_injected, self.vars.include_deps) + with self.runner('_list global', output_process=output_process) as ctx: + applications, raw_data = ctx.run() + if self.vars.include_raw: + self.vars.raw_output = raw_data + + if self.vars.name: + self.vars.application = [ + v + for k, v in applications.items() + if k == self.vars.name + ] + else: + self.vars.application = list(applications.values()) + self._capture_results(ctx) + + def _capture_results(self, ctx): + self.vars.cmd = ctx.cmd + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + + +def main(): + PipXInfo.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/packaging/os/pkg5.py b/plugins/modules/pkg5.py similarity index 77% rename from plugins/modules/packaging/os/pkg5.py rename to plugins/modules/pkg5.py index 266c073f37..1055d9090f 100644 --- a/plugins/modules/packaging/os/pkg5.py +++ b/plugins/modules/pkg5.py @@ -1,42 +1,47 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2014, Peter Oliver -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Peter Oliver +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pkg5 author: -- Peter Oliver (@mavit) + - Peter Oliver (@mavit) short_description: Manages packages with the Solaris 11 Image Packaging System description: - IPS packages are the native packages in Solaris 11 and higher. notes: - The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - An FRMI of the package(s) to be installed/removed/updated. - - Multiple packages may be specified, separated by C(,). + - Multiple packages may be specified, separated by V(,). required: true type: list elements: str state: description: - - Whether to install (I(present), I(latest)), or remove (I(absent)) a package. - choices: [ absent, latest, present, installed, removed, uninstalled ] + - Whether to install (V(present), V(latest)), or remove (V(absent)) a package. + choices: [absent, latest, present, installed, removed, uninstalled] default: present type: str accept_licenses: description: - Accept any licences. type: bool - default: no - aliases: [ accept, accept_licences ] + default: false + aliases: [accept, accept_licences] be_name: description: - Creates a new boot environment with the given name. @@ -45,9 +50,15 @@ options: description: - Refresh publishers before execution. type: bool - default: yes -''' -EXAMPLES = ''' + default: true + verbose: + description: + - Set to V(true) to disable quiet execution. + type: bool + default: false + version_added: 9.0.0 +""" +EXAMPLES = r""" - name: Install Vim community.general.pkg5: name: editor/vim @@ -55,7 +66,7 @@ EXAMPLES = ''' - name: Install Vim without refreshing publishers community.general.pkg5: name: editor/vim - refresh: no + refresh: false - name: Remove finger daemon community.general.pkg5: @@ -65,9 +76,9 @@ EXAMPLES = ''' - name: Install several packages at once community.general.pkg5: name: - - /file/gnu-findutils - - /text/gnu-grep -''' + - /file/gnu-findutils + - /text/gnu-grep +""" import re @@ -82,6 +93,7 @@ def main(): accept_licenses=dict(type='bool', default=False, aliases=['accept', 'accept_licences']), be_name=dict(type='str'), refresh=dict(type='bool', default=True), + verbose=dict(type='bool', default=False), ), supports_check_mode=True, ) @@ -148,9 +160,15 @@ def ensure(module, state, packages, params): else: no_refresh = ['--no-refresh'] + if params['verbose']: + verbosity = [] + else: + verbosity = ['-q'] + to_modify = list(filter(behaviour[state]['filter'], packages)) if to_modify: - rc, out, err = module.run_command(['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + ['-q', '--'] + to_modify) + rc, out, err = module.run_command( + ['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + verbosity + ['--'] + to_modify) response['rc'] = rc response['results'].append(out) response['msg'] += err diff --git a/plugins/modules/packaging/os/pkg5_publisher.py b/plugins/modules/pkg5_publisher.py similarity index 88% rename from plugins/modules/packaging/os/pkg5_publisher.py rename to plugins/modules/pkg5_publisher.py index 95d577655f..8ff9463c6b 100644 --- a/plugins/modules/packaging/os/pkg5_publisher.py +++ b/plugins/modules/pkg5_publisher.py @@ -1,40 +1,43 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright 2014 Peter Oliver # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pkg5_publisher author: "Peter Oliver (@mavit)" short_description: Manages Solaris 11 Image Packaging System publishers description: - IPS packages are the native packages in Solaris 11 and higher. - - This modules will configure which publishers a client will download IPS - packages from. + - This module configures which publishers a client downloads IPS packages from. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: name: description: - The publisher's name. required: true - aliases: [ publisher ] + aliases: [publisher] type: str state: description: - Whether to ensure that a publisher is present or absent. default: present - choices: [ present, absent ] + choices: [present, absent] type: str sticky: description: - - Packages installed from a sticky repository can only receive updates - from that repository. + - Packages installed from a sticky repository can only receive updates from that repository. type: bool enabled: description: @@ -52,8 +55,8 @@ options: - Multiple values may be provided. type: list elements: str -''' -EXAMPLES = ''' +""" +EXAMPLES = r""" - name: Fetch packages for the solaris publisher direct from Oracle community.general.pkg5_publisher: name: solaris @@ -64,7 +67,7 @@ EXAMPLES = ''' community.general.pkg5_publisher: name: site origin: 'https://pkg.example.com/site/' -''' +""" from ansible.module_utils.basic import AnsibleModule @@ -175,9 +178,7 @@ def get_publishers(module): name = values['publisher'] if name not in publishers: - publishers[name] = dict( - (k, values[k]) for k in ['sticky', 'enabled'] - ) + publishers[name] = {k: values[k] for k in ['sticky', 'enabled']} publishers[name]['origin'] = [] publishers[name]['mirror'] = [] diff --git a/plugins/modules/packaging/os/pkgin.py b/plugins/modules/pkgin.py similarity index 78% rename from plugins/modules/packaging/os/pkgin.py rename to plugins/modules/pkgin.py index dc7204e60d..e350f977ef 100644 --- a/plugins/modules/packaging/os/pkgin.py +++ b/plugins/modules/pkgin.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2013 Shaun Zinck # Copyright (c) 2015 Lawrence Leonard Gilbert @@ -9,69 +8,73 @@ # Based on pacman module written by Afterburn # that was based on apt module written by Matthew Williams # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pkgin -short_description: Package manager for SmartOS, NetBSD, et al. +short_description: Package manager for SmartOS, NetBSD, et al description: - - "The standard package manager for SmartOS, but also usable on NetBSD - or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))" + - 'The standard package manager for SmartOS, but also usable on NetBSD or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/)).' author: - - "Larry Gilbert (@L2G)" - - "Shaun Zinck (@szinck)" - - "Jasper Lievisse Adriaanse (@jasperla)" + - "Larry Gilbert (@L2G)" + - "Shaun Zinck (@szinck)" + - "Jasper Lievisse Adriaanse (@jasperla)" notes: - - "Known bug with pkgin < 0.8.0: if a package is removed and another - package depends on it, the other package will be silently removed as - well. New to Ansible 1.9: check-mode support." + - 'Known bug with pkgin < 0.8.0: if a package is removed and another package depends on it, the other package is silently + removed as well.' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of package to install/remove; - - multiple names may be given, separated by commas - aliases: [pkg] - type: list - elements: str - state: - description: - - Intended state of the package - choices: [ 'present', 'absent' ] - default: present - type: str - update_cache: - description: - - Update repository database. Can be run with other steps or on it's own. - type: bool - default: no - upgrade: - description: - - Upgrade main packages to their newer versions - type: bool - default: no - full_upgrade: - description: - - Upgrade all packages to their newer versions - type: bool - default: no - clean: - description: - - Clean packages cache - type: bool - default: no - force: - description: - - Force package reinstall - type: bool - default: no -''' + name: + description: + - Name of package to install/remove; + - Multiple names may be given, separated by commas. + aliases: [pkg] + type: list + elements: str + state: + description: + - Intended state of the package. + choices: ['present', 'absent'] + default: present + type: str + update_cache: + description: + - Update repository database. Can be run with other steps or on its own. + type: bool + default: false + upgrade: + description: + - Upgrade main packages to their newer versions. + type: bool + default: false + full_upgrade: + description: + - Upgrade all packages to their newer versions. + type: bool + default: false + clean: + description: + - Clean packages cache. + type: bool + default: false + force: + description: + - Force package reinstall. + type: bool + default: false +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install package foo community.general.pkgin: name: foo @@ -85,7 +88,7 @@ EXAMPLES = ''' - name: Update cache and install foo package community.general.pkgin: name: foo - update_cache: yes + update_cache: true - name: Remove package foo community.general.pkgin: @@ -99,25 +102,25 @@ EXAMPLES = ''' - name: Update repositories as a separate step community.general.pkgin: - update_cache: yes + update_cache: true - name: Upgrade main packages (equivalent to pkgin upgrade) community.general.pkgin: - upgrade: yes + upgrade: true - name: Upgrade all packages (equivalent to pkgin full-upgrade) community.general.pkgin: - full_upgrade: yes + full_upgrade: true - name: Force-upgrade all packages (equivalent to pkgin -F full-upgrade) community.general.pkgin: - full_upgrade: yes - force: yes + full_upgrade: true + force: true - name: Clean packages cache (equivalent to pkgin clean) community.general.pkgin: - clean: yes -''' + clean: true +""" import re @@ -137,18 +140,18 @@ def query_package(module, name): """ # test whether '-p' (parsable) flag is supported. - rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH) + rc, out, err = module.run_command([PKGIN_PATH, "-p", "-v"]) if rc == 0: - pflag = '-p' + pflag = ['-p'] splitchar = ';' else: - pflag = '' + pflag = [] splitchar = ' ' # Use "pkgin search" to find the package. The regular expression will # only match on the complete name. - rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name)) + rc, out, err = module.run_command([PKGIN_PATH] + pflag + ["search", "^%s$" % name]) # rc will not be 0 unless the search was a success if rc == 0: @@ -166,6 +169,13 @@ def query_package(module, name): # '<' - installed but out of date # '=' - installed and up to date # '>' - installed but newer than the repository version + + if (package in ('reading local summary...', + 'processing local summary...', + 'downloading pkg_summary.xz done.')) or \ + (package.startswith('processing remote summary (')): + continue + pkgname_with_version, raw_state = package.split(splitchar)[0:2] # Search for package, stripping version @@ -219,22 +229,19 @@ def format_pkgin_command(module, command, package=None): # an empty string. Some commands (e.g. 'update') will ignore extra # arguments, however this behaviour cannot be relied on for others. if package is None: - package = "" + packages = [] + else: + packages = [package] if module.params["force"]: - force = "-F" + force = ["-F"] else: - force = "" - - vars = {"pkgin": PKGIN_PATH, - "command": command, - "package": package, - "force": force} + force = [] if module.check_mode: - return "%(pkgin)s -n %(command)s %(package)s" % vars + return [PKGIN_PATH, "-n", command] + packages else: - return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars + return [PKGIN_PATH, "-y"] + force + [command] + packages def remove_packages(module, packages): @@ -309,7 +316,7 @@ def do_upgrade_packages(module, full=False): format_pkgin_command(module, cmd)) if rc == 0: - if re.search('^nothing to do.\n$', out): + if re.search('^(.*\n|)nothing to do.\n$', out): module.exit_json(changed=False, msg="nothing left to upgrade") else: module.fail_json(msg="could not %s packages" % cmd, stdout=out, stderr=err) diff --git a/plugins/modules/packaging/os/pkgng.py b/plugins/modules/pkgng.py similarity index 76% rename from plugins/modules/packaging/os/pkgng.py rename to plugins/modules/pkgng.py index ff7e45fa96..fe559940a7 100644 --- a/plugins/modules/packaging/os/pkgng.py +++ b/plugins/modules/pkgng.py @@ -1,112 +1,112 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, bleader +# Copyright (c) 2013, bleader # Written by bleader # Based on pkgin module written by Shaun Zinck # that was based on pacman module written by Afterburn # that was based on apt module written by Matthew Williams # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pkgng short_description: Package manager for FreeBSD >= 9.0 description: - - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0. + - Manage binary packages for FreeBSD using C(pkgng) which is available in versions after 9.0. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name or list of names of packages to install/remove. - - "With I(name=*), I(state: latest) will operate, but I(state: present) and I(state: absent) will be noops." - - > - Warning: In Ansible 2.9 and earlier this module had a misfeature - where I(name=*) with I(state: latest) or I(state: present) would - install every package from every package repository, filling up - the machines disk. Avoid using them unless you are certain that - your role will only be used with newer versions. - required: true - aliases: [pkg] - type: list - elements: str - state: - description: - - State of the package. - - 'Note: "latest" added in 2.7' - choices: [ 'present', 'latest', 'absent' ] - required: false - default: present - type: str - cached: - description: - - Use local package base instead of fetching an updated one. - type: bool - required: false - default: no - annotation: - description: - - A list of keyvalue-pairs of the form - C(<+/-/:>[=]). A C(+) denotes adding an annotation, a - C(-) denotes removing an annotation, and C(:) denotes modifying an - annotation. - If setting or modifying annotations, a value must be provided. - required: false - type: list - elements: str - pkgsite: - description: - - For pkgng versions before 1.1.4, specify packagesite to use - for downloading packages. If not specified, use settings from - C(/usr/local/etc/pkg.conf). - - For newer pkgng versions, specify a the name of a repository - configured in C(/usr/local/etc/pkg/repos). - required: false - type: str - rootdir: - description: - - For pkgng versions 1.5 and later, pkg will install all packages - within the specified root directory. - - Can not be used together with I(chroot) or I(jail) options. - required: false - type: path - chroot: - description: - - Pkg will chroot in the specified environment. - - Can not be used together with I(rootdir) or I(jail) options. - required: false - type: path - jail: - description: - - Pkg will execute in the given jail name or id. - - Can not be used together with I(chroot) or I(rootdir) options. - type: str - autoremove: - description: - - Remove automatically installed packages which are no longer needed. - required: false - type: bool - default: no - ignore_osver: - description: - - Ignore FreeBSD OS version check, useful on -STABLE and -CURRENT branches. - - Defines the C(IGNORE_OSVERSION) environment variable. - required: false - type: bool - default: no - version_added: 1.3.0 + name: + description: + - Name or list of names of packages to install/remove. + - With O(name=*), O(state=latest) operates, but O(state=present) and O(state=absent) are noops. + required: true + aliases: [pkg] + type: list + elements: str + state: + description: + - State of the package. + choices: ['present', 'latest', 'absent'] + required: false + default: present + type: str + cached: + description: + - Use local package base instead of fetching an updated one. + type: bool + required: false + default: false + annotation: + description: + - A list of keyvalue-pairs of the form C(<+/-/:>[=]). A V(+) denotes adding an annotation, a V(-) denotes + removing an annotation, and V(:) denotes modifying an annotation. If setting or modifying annotations, a value must + be provided. + required: false + type: list + elements: str + pkgsite: + description: + - For C(pkgng) versions before 1.1.4, specify C(packagesite) to use for downloading packages. If not specified, use + settings from C(/usr/local/etc/pkg.conf). + - For newer C(pkgng) versions, specify a the name of a repository configured in C(/usr/local/etc/pkg/repos). + required: false + type: str + rootdir: + description: + - For C(pkgng) versions 1.5 and later, pkg installs all packages within the specified root directory. + - Can not be used together with O(chroot) or O(jail) options. + required: false + type: path + chroot: + description: + - Pkg chroots in the specified environment. + - Can not be used together with O(rootdir) or O(jail) options. + required: false + type: path + jail: + description: + - Pkg executes in the given jail name or ID. + - Can not be used together with O(chroot) or O(rootdir) options. + type: str + autoremove: + description: + - Remove automatically installed packages which are no longer needed. + required: false + type: bool + default: false + ignore_osver: + description: + - Ignore FreeBSD OS version check, useful on C(-STABLE) and C(-CURRENT) branches. + - Defines the E(IGNORE_OSVERSION) environment variable. + required: false + type: bool + default: false + version_added: 1.3.0 + use_globs: + description: + - Treat the package names as shell glob patterns. + required: false + type: bool + default: true + version_added: 9.3.0 author: "bleader (@bleader)" notes: - - When using pkgsite, be careful that already in cache packages won't be downloaded again. - - When used with a `loop:` each package will be processed individually, - it is much more efficient to pass the list directly to the `name` option. -''' + - When using pkgsite, be careful that already in cache packages are not downloaded again. + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install package foo community.general.pkgng: name: foo @@ -126,7 +126,6 @@ EXAMPLES = ''' - bar state: absent -# "latest" support added in 2.7 - name: Upgrade package baz community.general.pkgng: name: baz @@ -136,7 +135,13 @@ EXAMPLES = ''' community.general.pkgng: name: "*" state: latest -''' + +- name: Upgrade foo/bar + community.general.pkgng: + name: foo/bar + state: latest + use_globs: false +""" from collections import defaultdict @@ -146,12 +151,9 @@ from ansible.module_utils.basic import AnsibleModule def query_package(module, run_pkgng, name): - rc, out, err = run_pkgng('info', '-g', '-e', name) + rc, out, err = run_pkgng('info', '-e', name) - if rc == 0: - return True - - return False + return rc == 0 def query_update(module, run_pkgng, name): @@ -159,12 +161,9 @@ def query_update(module, run_pkgng, name): # Check to see if a package upgrade is available. # rc = 0, no updates available or package not installed # rc = 1, updates available - rc, out, err = run_pkgng('upgrade', '-g', '-n', name) + rc, out, err = run_pkgng('upgrade', '-n', name) - if rc == 1: - return True - - return False + return rc == 1 def pkgng_older_than(module, pkgng_path, compare_version): @@ -190,7 +189,7 @@ def upgrade_packages(module, run_pkgng): pkgng_args = ['upgrade'] pkgng_args.append('-n' if module.check_mode else '-y') - rc, out, err = run_pkgng(*pkgng_args) + rc, out, err = run_pkgng(*pkgng_args, check_rc=(not module.check_mode)) matches = re.findall('^Number of packages to be (?:upgraded|reinstalled): ([0-9]+)', out, re.MULTILINE) for match in matches: @@ -265,7 +264,7 @@ def install_packages(module, run_pkgng, packages, cached, state): action_count[action] += len(package_list) continue - pkgng_args = [action, '-g', '-U', '-y'] + package_list + pkgng_args = [action, '-U', '-y'] + package_list rc, out, err = run_pkgng(*pkgng_args) stdout += out stderr += err @@ -295,7 +294,7 @@ def install_packages(module, run_pkgng, packages, cached, state): def annotation_query(module, run_pkgng, package, tag): - rc, out, err = run_pkgng('info', '-g', '-A', package) + rc, out, err = run_pkgng('info', '-A', package) match = re.search(r'^\s*(?P%s)\s*:\s*(?P\w+)' % tag, out, flags=re.MULTILINE) if match: return match.group('value') @@ -421,16 +420,18 @@ def autoremove_packages(module, run_pkgng): def main(): module = AnsibleModule( argument_spec=dict( - state=dict(default="present", choices=["present", "latest", "absent"], required=False), + state=dict(default="present", choices=["present", "latest", "absent"]), name=dict(aliases=["pkg"], required=True, type='list', elements='str'), cached=dict(default=False, type='bool'), - ignore_osver=dict(default=False, required=False, type='bool'), - annotation=dict(required=False, type='list', elements='str'), - pkgsite=dict(required=False), - rootdir=dict(required=False, type='path'), - chroot=dict(required=False, type='path'), - jail=dict(required=False, type='str'), - autoremove=dict(default=False, type='bool')), + ignore_osver=dict(default=False, type='bool'), + annotation=dict(type='list', elements='str'), + pkgsite=dict(), + rootdir=dict(type='path'), + chroot=dict(type='path'), + jail=dict(type='str'), + autoremove=dict(default=False, type='bool'), + use_globs=dict(default=True, type='bool'), + ), supports_check_mode=True, mutually_exclusive=[["rootdir", "chroot", "jail"]]) @@ -471,6 +472,9 @@ def main(): def run_pkgng(action, *args, **kwargs): cmd = [pkgng_path, dir_arg, action] + if p["use_globs"] and action in ('info', 'install', 'upgrade',): + args = ('-g',) + args + pkgng_env = {'BATCH': 'yes'} if p["ignore_osver"]: diff --git a/plugins/modules/packaging/os/pkgutil.py b/plugins/modules/pkgutil.py similarity index 79% rename from plugins/modules/packaging/os/pkgutil.py rename to plugins/modules/pkgutil.py index 0f1daca4ef..3d4616bbcb 100644 --- a/plugins/modules/packaging/os/pkgutil.py +++ b/plugins/modules/pkgutil.py @@ -1,66 +1,72 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2013, Alexander Winkler +# Copyright (c) 2013, Alexander Winkler # based on svr4pkg by # Boyd Adamson (2012) # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: pkgutil short_description: OpenCSW package management on Solaris description: -- This module installs, updates and removes packages from the OpenCSW project for Solaris. -- Unlike the M(community.general.svr4pkg) module, it will resolve and download dependencies. -- See U(https://www.opencsw.org/) for more information about the project. + - This module installs, updates and removes packages from the OpenCSW project for Solaris. + - Unlike the M(community.general.svr4pkg) module, it resolves and downloads dependencies. + - See U(https://www.opencsw.org/) for more information about the project. author: -- Alexander Winkler (@dermute) -- David Ponessa (@scathatheworm) + - Alexander Winkler (@dermute) + - David Ponessa (@scathatheworm) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + details: + - In order to check the availability of packages, the catalog cache under C(/var/opt/csw/pkgutil) may be refreshed even + in check mode. + diff_mode: + support: none options: name: description: - - The name of the package. - - When using C(state=latest), this can be C('*'), which updates all installed packages managed by pkgutil. + - The name of the package. + - When using O(state=latest), this can be V('*'), which updates all installed packages managed by pkgutil. type: list required: true elements: str - aliases: [ pkg ] + aliases: [pkg] site: description: - - The repository path to install the package from. - - Its global definition is in C(/etc/opt/csw/pkgutil.conf). + - The repository path to install the package from. + - Its global definition is in C(/etc/opt/csw/pkgutil.conf). required: false type: str state: description: - - Whether to install (C(present)/C(installed)), or remove (C(absent)/C(removed)) packages. - - The upgrade (C(latest)) operation will update/install the packages to the latest version available. + - Whether to install (V(present)/V(installed)), or remove (V(absent)/V(removed)) packages. + - The upgrade (V(latest)) operation updates/installs the packages to the latest version available. type: str required: true - choices: [ absent, installed, latest, present, removed ] + choices: [absent, installed, latest, present, removed] update_catalog: description: - - If you always want to refresh your catalog from the mirror, even when it's not stale, set this to C(yes). + - If you always want to refresh your catalog from the mirror, even when it is not stale, set this to V(true). type: bool - default: no + default: false force: description: - - To allow the update process to downgrade packages to match what is present in the repository, set this to C(yes). - - This is useful for rolling back to stable from testing, or similar operations. + - To allow the update process to downgrade packages to match what is present in the repository, set this to V(true). + - This is useful for rolling back to stable from testing, or similar operations. type: bool default: false version_added: 1.2.0 -notes: -- In order to check the availability of packages, the catalog cache under C(/var/opt/csw/pkgutil) may be refreshed even in check mode. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Install a package community.general.pkgutil: name: CSWcommon @@ -80,8 +86,8 @@ EXAMPLES = r''' - name: Install several packages community.general.pkgutil: name: - - CSWsudo - - CSWtop + - CSWsudo + - CSWtop state: present - name: Update all packages @@ -93,10 +99,10 @@ EXAMPLES = r''' community.general.pkgutil: name: '*' state: latest - force: yes -''' + force: true +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/storage/pmem/pmem.py b/plugins/modules/pmem.py similarity index 81% rename from plugins/modules/storage/pmem/pmem.py rename to plugins/modules/pmem.py index b91bab5fad..527c94cb98 100644 --- a/plugins/modules/storage/pmem/pmem.py +++ b/plugins/modules/pmem.py @@ -1,54 +1,58 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2022, Masayoshi Mizuma -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: - - Masayoshi Mizuma (@mizumm) + - Masayoshi Mizuma (@mizumm) module: pmem short_description: Configure Intel Optane Persistent Memory modules version_added: 4.5.0 description: - - This module allows Configuring Intel Optane Persistent Memory modules - (PMem) using ipmctl and ndctl command line tools. + - This module allows Configuring Intel Optane Persistent Memory modules (PMem) using C(ipmctl) and C(ndctl) command line + tools. requirements: - - ipmctl and ndctl command line tools - - xmltodict + - C(ipmctl) and C(ndctl) command line tools + - xmltodict +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: appdirect: description: - - Percentage of the total capacity to use in AppDirect Mode (C(0)-C(100)). - - Create AppDirect capacity utilizing hardware interleaving across the - requested PMem modules if applicable given the specified target. - - Total of I(appdirect), I(memorymode) and I(reserved) must be C(100) + - Percentage of the total capacity to use in AppDirect Mode (V(0)-V(100)). + - Create AppDirect capacity utilizing hardware interleaving across the requested PMem modules if applicable given the + specified target. + - Total of O(appdirect), O(memorymode) and O(reserved) must be V(100). type: int appdirect_interleaved: description: - - Create AppDirect capacity that is interleaved any other PMem modules. + - Create AppDirect capacity that is interleaved any other PMem modules. type: bool required: false default: true memorymode: description: - - Percentage of the total capacity to use in Memory Mode (C(0)-C(100)). + - Percentage of the total capacity to use in Memory Mode (V(0)-V(100)). type: int reserved: description: - - Percentage of the capacity to reserve (C(0)-C(100)). I(reserved) will not be mapped - into the system physical address space and will be presented as reserved - capacity with Show Device and Show Memory Resources Commands. - - I(reserved) will be set automatically if this is not configured. + - Percentage of the capacity to reserve (V(0)-V(100)). O(reserved) is not mapped into the system physical address space + and is presented as reserved capacity with Show Device and Show Memory Resources Commands. + - O(reserved) is set automatically if this is not configured. type: int required: false socket: description: - - This enables to set the configuration for each socket by using the socket ID. - - Total of I(appdirect), I(memorymode) and I(reserved) must be C(100) within one socket. + - This enables to set the configuration for each socket by using the socket ID. + - Total of O(appdirect), O(memorymode) and O(reserved) must be V(100) within one socket. type: list elements: dict suboptions: @@ -58,106 +62,107 @@ options: required: true appdirect: description: - - Percentage of the total capacity to use in AppDirect Mode (C(0)-C(100)) within the socket ID. + - Percentage of the total capacity to use in AppDirect Mode (V(0)-V(100)) within the socket ID. type: int required: true appdirect_interleaved: description: - - Create AppDirect capacity that is interleaved any other PMem modules within the socket ID. + - Create AppDirect capacity that is interleaved any other PMem modules within the socket ID. type: bool required: false default: true memorymode: description: - - Percentage of the total capacity to use in Memory Mode (C(0)-C(100)) within the socket ID. + - Percentage of the total capacity to use in Memory Mode (V(0)-V(100)) within the socket ID. type: int required: true reserved: description: - - Percentage of the capacity to reserve (C(0)-C(100)) within the socket ID. + - Percentage of the capacity to reserve (V(0)-V(100)) within the socket ID. type: int namespace: description: - - This enables to set the configuration for the namespace of the PMem. + - This enables to set the configuration for the namespace of the PMem. type: list elements: dict suboptions: mode: description: - - The mode of namespace. The detail of the mode is in the man page of ndctl-create-namespace. + - The mode of namespace. The detail of the mode is in the man page of ndctl-create-namespace. type: str required: true choices: ['raw', 'sector', 'fsdax', 'devdax'] type: description: - - The type of namespace. The detail of the type is in the man page of ndctl-create-namespace. + - The type of namespace. The detail of the type is in the man page of ndctl-create-namespace. type: str required: false choices: ['pmem', 'blk'] size: description: - - The size of namespace. This option supports the suffixes C(k) or C(K) or C(KB) for KiB, - C(m) or C(M) or C(MB) for MiB, C(g) or C(G) or C(GB) for GiB and C(t) or C(T) or C(TB) for TiB. + - The size of namespace. This option supports the suffixes V(k) or V(K) or V(KB) for KiB, V(m) or V(M) or V(MB) + for MiB, V(g) or V(G) or V(GB) for GiB and V(t) or V(T) or V(TB) for TiB. - This option is required if multiple namespaces are configured. - - If this option is not set, all of the avaiable space of a region is configured. + - If this option is not set, all of the available space of a region is configured. type: str required: false namespace_append: description: - - Enable to append the new namespaces to the system. - - The default is C(false) so the all existing namespaces not listed in I(namespace) are removed. + - Enable to append the new namespaces to the system. + - The default is V(false) so the all existing namespaces not listed in O(namespace) are removed. type: bool default: false required: false -''' +""" -RETURN = r''' +RETURN = r""" reboot_required: - description: Indicates that the system reboot is required to complete the PMem configuration. - returned: success - type: bool - sample: True + description: Indicates that the system reboot is required to complete the PMem configuration. + returned: success + type: bool + sample: true result: - description: - - Shows the value of AppDirect, Memory Mode and Reserved size in bytes. - - If I(socket) argument is provided, shows the values in each socket with C(socket) which contains the socket ID. - - If I(namespace) argument is provided, shows the detail of each namespace. - returned: success - type: list - elements: dict - contains: - appdirect: - description: AppDirect size in bytes. - type: int - memorymode: - description: Memory Mode size in bytes. - type: int - reserved: - description: Reserved size in bytes. - type: int - socket: - description: The socket ID to be configured. - type: int - namespace: - description: The list of the detail of namespace. - type: list - sample: [ - { - "appdirect": 111669149696, - "memorymode": 970662608896, - "reserved": 3626500096, - "socket": 0 - }, - { - "appdirect": 111669149696, - "memorymode": 970662608896, - "reserved": 3626500096, - "socket": 1 - } - ] -''' + description: + - Shows the value of AppDirect, Memory Mode and Reserved size in bytes. + - If O(socket) argument is provided, shows the values in each socket with C(socket) which contains the socket ID. + - If O(namespace) argument is provided, shows the detail of each namespace. + returned: success + type: list + elements: dict + contains: + appdirect: + description: AppDirect size in bytes. + type: int + memorymode: + description: Memory Mode size in bytes. + type: int + reserved: + description: Reserved size in bytes. + type: int + socket: + description: The socket ID to be configured. + type: int + namespace: + description: The list of the detail of namespace. + type: list + sample: + [ + { + "appdirect": 111669149696, + "memorymode": 970662608896, + "reserved": 3626500096, + "socket": 0 + }, + { + "appdirect": 111669149696, + "memorymode": 970662608896, + "reserved": 3626500096, + "socket": 1 + } + ] +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Configure the Pmem as AppDirect 10, Memory Mode 70, and the Reserved 20 percent. community.general.pmem: appdirect: 10 @@ -172,7 +177,7 @@ EXAMPLES = r''' - name: Configure the Pmem as AppDirect with not interleaved 10, Memory Mode 70, and the Reserved 20 percent. community.general.pmem: appdirect: 10 - appdirect_interleaved: False + appdirect_interleaved: false memorymode: 70 - name: Configure the Pmem each socket. @@ -180,7 +185,7 @@ EXAMPLES = r''' socket: - id: 0 appdirect: 10 - appdirect_interleaved: False + appdirect_interleaved: false memorymode: 70 reserved: 20 - id: 1 @@ -197,7 +202,7 @@ EXAMPLES = r''' - size: 320MB type: pmem mode: sector -''' +""" import json import re @@ -211,6 +216,7 @@ except ImportError: XMLTODICT_LIBRARY_IMPORT_ERROR = traceback.format_exc() else: HAS_XMLTODICT_LIBRARY = True + XMLTODICT_LIBRARY_IMPORT_ERROR = None class PersistentMemory(object): @@ -529,7 +535,7 @@ class PersistentMemory(object): out = xmltodict.parse(goal, dict_constructor=dict)['ConfigGoalList']['ConfigGoal'] for entry in out: - # Probably it's a bug of ipmctl to show the socket goal + # Probably it is a bug of ipmctl to show the socket goal # which isn't specified by the -socket option. # Anyway, filter the noise out here: if skt and skt['id'] != int(entry['SocketID'], 16): diff --git a/plugins/modules/pnpm.py b/plugins/modules/pnpm.py new file mode 100644 index 0000000000..8d11c83077 --- /dev/null +++ b/plugins/modules/pnpm.py @@ -0,0 +1,457 @@ +#!/usr/bin/python + +# Copyright (c) 2023 Aritra Sen +# Copyright (c) 2017 Chris Hoffman +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: pnpm +short_description: Manage Node.js packages with C(pnpm) +version_added: 7.4.0 +description: + - Manage Node.js packages with the L(pnpm package manager, https://pnpm.io/). +author: + - "Aritra Sen (@aretrosen)" + - "Chris Hoffman (@chrishoffman), creator of NPM Ansible module" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - The name of a Node.js library to install. + - All packages in C(package.json) are installed if not provided. + type: str + required: false + alias: + description: + - Alias of the Node.js library. + type: str + required: false + path: + description: + - The base path to install the Node.js libraries. + type: path + required: false + version: + description: + - The version of the library to be installed, in semver format. + type: str + required: false + global: + description: + - Install the Node.js library globally. + required: false + default: false + type: bool + executable: + description: + - The executable location for pnpm. + - The default location it searches for is E(PATH), fails if not set. + type: path + required: false + ignore_scripts: + description: + - Use the C(--ignore-scripts) flag when installing. + required: false + type: bool + default: false + no_optional: + description: + - Do not install optional packages, equivalent to C(--no-optional). + required: false + type: bool + default: false + production: + description: + - Install dependencies in production mode. + - Pnpm ignores any dependencies under C(devDependencies) in package.json. + required: false + type: bool + default: false + dev: + description: + - Install dependencies in development mode. + - Pnpm ignores any regular dependencies in C(package.json). + required: false + default: false + type: bool + optional: + description: + - Install dependencies in optional mode. + required: false + default: false + type: bool + state: + description: + - Installation state of the named Node.js library. + - If V(absent) is selected, a name option must be provided. + type: str + required: false + default: present + choices: ["present", "absent", "latest"] +requirements: + - Pnpm executable present in E(PATH). +""" + +EXAMPLES = r""" +- name: Install "tailwindcss" Node.js package. + community.general.pnpm: + name: tailwindcss + path: /app/location + +- name: Install "tailwindcss" Node.js package on version 3.3.2 + community.general.pnpm: + name: tailwindcss + version: 3.3.2 + path: /app/location + +- name: Install "tailwindcss" Node.js package globally. + community.general.pnpm: + name: tailwindcss + global: true + +- name: Install "tailwindcss" Node.js package as dev dependency. + community.general.pnpm: + name: tailwindcss + path: /app/location + dev: true + +- name: Install "tailwindcss" Node.js package as optional dependency. + community.general.pnpm: + name: tailwindcss + path: /app/location + optional: true + +- name: Install "tailwindcss" Node.js package version 0.1.3 as tailwind-1 + community.general.pnpm: + name: tailwindcss + alias: tailwind-1 + version: 0.1.3 + path: /app/location + +- name: Remove the globally-installed package "tailwindcss". + community.general.pnpm: + name: tailwindcss + global: true + state: absent + +- name: Install packages based on package.json. + community.general.pnpm: + path: /app/location + +- name: Update all packages in package.json to their latest version. + community.general.pnpm: + path: /app/location + state: latest +""" + +import json +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +class Pnpm(object): + def __init__(self, module, **kwargs): + self.module = module + self.name = kwargs["name"] + self.alias = kwargs["alias"] + self.version = kwargs["version"] + self.path = kwargs["path"] + self.globally = kwargs["globally"] + self.executable = kwargs["executable"] + self.ignore_scripts = kwargs["ignore_scripts"] + self.no_optional = kwargs["no_optional"] + self.production = kwargs["production"] + self.dev = kwargs["dev"] + self.optional = kwargs["optional"] + + self.alias_name_ver = None + + if self.alias is not None: + self.alias_name_ver = self.alias + "@npm:" + + if self.name is not None: + self.alias_name_ver = (self.alias_name_ver or "") + self.name + if self.version is not None: + self.alias_name_ver = self.alias_name_ver + "@" + str(self.version) + else: + self.alias_name_ver = self.alias_name_ver + "@latest" + + def _exec(self, args, run_in_check_mode=False, check_rc=True): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = self.executable + args + + if self.globally: + cmd.append("-g") + + if self.ignore_scripts: + cmd.append("--ignore-scripts") + + if self.no_optional: + cmd.append("--no-optional") + + if self.production: + cmd.append("-P") + + if self.dev: + cmd.append("-D") + + if self.name and self.optional: + cmd.append("-O") + + # If path is specified, cd into that path and run the command. + cwd = None + if self.path: + if not os.path.exists(self.path): + os.makedirs(self.path) + + if not os.path.isdir(self.path): + self.module.fail_json(msg="Path %s is not a directory" % self.path) + + if not self.alias_name_ver and not os.path.isfile( + os.path.join(self.path, "package.json") + ): + self.module.fail_json( + msg="package.json does not exist in provided path" + ) + + cwd = self.path + + _rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) + return out, err + + return None, None + + def missing(self): + if not os.path.isfile(os.path.join(self.path, "pnpm-lock.yaml")): + return True + + cmd = ["list", "--json"] + + if self.name is not None: + cmd.append(self.name) + + try: + out, err = self._exec(cmd, True, False) + if err is not None and err != "": + raise Exception(out) + + data = json.loads(out) + except Exception as e: + self.module.fail_json( + msg="Failed to parse pnpm output with error %s" % to_native(e) + ) + + if "error" in data: + return True + + data = data[0] + + for typedep in [ + "dependencies", + "devDependencies", + "optionalDependencies", + "unsavedDependencies", + ]: + if typedep not in data: + continue + + for dep, prop in data[typedep].items(): + if self.alias is not None and self.alias != dep: + continue + + name = prop["from"] if self.alias is not None else dep + if self.name != name: + continue + + if self.version is None or self.version == prop["version"]: + return False + + break + + return True + + def install(self): + if self.alias_name_ver is not None: + return self._exec(["add", self.alias_name_ver]) + return self._exec(["install"]) + + def update(self): + return self._exec(["update", "--latest"]) + + def uninstall(self): + if self.alias is not None: + return self._exec(["remove", self.alias]) + return self._exec(["remove", self.name]) + + def list_outdated(self): + if not os.path.isfile(os.path.join(self.path, "pnpm-lock.yaml")): + return list() + + cmd = ["outdated", "--format", "json"] + try: + out, err = self._exec(cmd, True, False) + + # BUG: It will not show correct error sometimes, like when it has + # plain text output intermingled with a {} + if err is not None and err != "": + raise Exception(out) + + # HACK: To fix the above bug, the following hack is implemented + data_lines = out.splitlines(True) + + out = None + for line in data_lines: + if len(line) > 0 and line[0] == "{": + out = line + continue + + if len(line) > 0 and line[0] == "}": + out += line + break + + if out is not None: + out += line + + data = json.loads(out) + except Exception as e: + self.module.fail_json( + msg="Failed to parse pnpm output with error %s" % to_native(e) + ) + + return data.keys() + + +def main(): + arg_spec = dict( + name=dict(), + alias=dict(), + path=dict(type="path"), + version=dict(), + executable=dict(type="path"), + ignore_scripts=dict(default=False, type="bool"), + no_optional=dict(default=False, type="bool"), + production=dict(default=False, type="bool"), + dev=dict(default=False, type="bool"), + optional=dict(default=False, type="bool"), + state=dict(default="present", choices=["present", "absent", "latest"]), + ) + arg_spec["global"] = dict(default=False, type="bool") + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) + + name = module.params["name"] + alias = module.params["alias"] + path = module.params["path"] + version = module.params["version"] + globally = module.params["global"] + ignore_scripts = module.params["ignore_scripts"] + no_optional = module.params["no_optional"] + production = module.params["production"] + dev = module.params["dev"] + optional = module.params["optional"] + state = module.params["state"] + + if module.params["executable"]: + executable = module.params["executable"].split(" ") + else: + executable = [module.get_bin_path("pnpm", True)] + + if name is None and version is not None: + module.fail_json(msg="version is meaningless when name is not provided") + + if name is None and alias is not None: + module.fail_json(msg="alias is meaningless when name is not provided") + + if path is None and not globally: + module.fail_json(msg="path must be specified when not using global") + elif path is not None and globally: + module.fail_json(msg="Cannot specify path when doing global installation") + + if globally and (production or dev or optional): + module.fail_json( + msg="Options production, dev, and optional is meaningless when installing packages globally" + ) + + if name is not None and path is not None and globally: + module.fail_json(msg="path should not be mentioned when installing globally") + + if production and dev and optional: + module.fail_json( + msg="Options production and dev and optional don't go together" + ) + + if production and dev: + module.fail_json(msg="Options production and dev don't go together") + + if production and optional: + module.fail_json(msg="Options production and optional don't go together") + + if dev and optional: + module.fail_json(msg="Options dev and optional don't go together") + + if name is not None and name[0:4] == "http" and version is not None: + module.fail_json(msg="Semver not supported on remote url downloads") + + if name is None and optional: + module.fail_json( + msg="Optional not available when package name not provided, use no_optional instead" + ) + + if state == "absent" and name is None: + module.fail_json(msg="Package name is required for uninstalling") + + if globally: + _rc, out, _err = module.run_command(executable + ["root", "-g"], check_rc=True) + path, _tail = os.path.split(out.strip()) + + pnpm = Pnpm( + module, + name=name, + alias=alias, + path=path, + version=version, + globally=globally, + executable=executable, + ignore_scripts=ignore_scripts, + no_optional=no_optional, + production=production, + dev=dev, + optional=optional, + ) + + changed = False + out = "" + err = "" + if state == "present": + if pnpm.missing(): + changed = True + out, err = pnpm.install() + elif state == "latest": + outdated = pnpm.list_outdated() + if name is not None: + if pnpm.missing() or name in outdated: + changed = True + out, err = pnpm.install() + elif len(outdated): + changed = True + out, err = pnpm.update() + else: # absent + if not pnpm.missing(): + changed = True + out, err = pnpm.uninstall() + + module.exit_json(changed=changed, out=out, err=err) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/packaging/os/portage.py b/plugins/modules/portage.py similarity index 68% rename from plugins/modules/packaging/os/portage.py rename to plugins/modules/portage.py index 2a8679dbbd..752960c042 100644 --- a/plugins/modules/packaging/os/portage.py +++ b/plugins/modules/portage.py @@ -1,187 +1,207 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2016, William L Thomson Jr -# (c) 2013, Yap Sok Ann +# Copyright (c) 2016, William L Thomson Jr +# Copyright (c) 2013, Yap Sok Ann # Written by Yap Sok Ann # Modified by William L. Thomson Jr. # Based on apt module written by Matthew Williams # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: portage short_description: Package manager for Gentoo description: - - Manages Gentoo packages + - Manages Gentoo packages. +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: package: description: - - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world) + - Package atom or set, for example V(sys-apps/foo) or V(>foo-2.13) or V(@world). aliases: [name] type: list elements: str state: description: - - State of the package atom + - State of the package atom. default: "present" - choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged", "latest" ] + choices: ["present", "installed", "emerged", "absent", "removed", "unmerged", "latest"] type: str update: description: - - Update packages to the best version available (--update) + - Update packages to the best version available (C(--update)). type: bool - default: no + default: false + + backtrack: + description: + - Set backtrack value (C(--backtrack)). + type: int + version_added: 5.8.0 deep: description: - - Consider the entire dependency tree of packages (--deep) + - Consider the entire dependency tree of packages (C(--deep)). type: bool - default: no + default: false newuse: description: - - Include installed packages where USE flags have changed (--newuse) + - Include installed packages where USE flags have changed (C(--newuse)). type: bool - default: no + default: false changed_use: description: - - Include installed packages where USE flags have changed, except when - - flags that the user has not enabled are added or removed - - (--changed-use) + - Include installed packages where USE flags have changed, except when. + - Flags that the user has not enabled are added or removed. + - (C(--changed-use)). type: bool - default: no + default: false oneshot: description: - - Do not add the packages to the world file (--oneshot) + - Do not add the packages to the world file (C(--oneshot)). type: bool - default: no + default: false noreplace: description: - - Do not re-emerge installed packages (--noreplace) + - Do not re-emerge installed packages (C(--noreplace)). type: bool - default: yes + default: true nodeps: description: - - Only merge packages but not their dependencies (--nodeps) + - Only merge packages but not their dependencies (C(--nodeps)). type: bool - default: no + default: false onlydeps: description: - - Only merge packages' dependencies but not the packages (--onlydeps) + - Only merge packages' dependencies but not the packages (C(--onlydeps)). type: bool - default: no + default: false depclean: description: - - Remove packages not needed by explicitly merged packages (--depclean) - - If no package is specified, clean up the world's dependencies - - Otherwise, --depclean serves as a dependency aware version of --unmerge + - Remove packages not needed by explicitly merged packages (C(--depclean)). + - If no package is specified, clean up the world's dependencies. + - Otherwise, C(--depclean) serves as a dependency aware version of C(--unmerge). type: bool - default: no + default: false quiet: description: - - Run emerge in quiet mode (--quiet) + - Run emerge in quiet mode (C(--quiet)). type: bool - default: no + default: false verbose: description: - - Run emerge in verbose mode (--verbose) + - Run emerge in verbose mode (C(--verbose)). type: bool - default: no + default: false + + select: + description: + - If set to V(true), explicitely add the package to the world file. + - Please note that this option is not used for idempotency, it is only used when actually installing a package. + type: bool + version_added: 8.6.0 sync: description: - - Sync package repositories first - - If yes, perform "emerge --sync" - - If web, perform "emerge-webrsync" - choices: [ "web", "yes", "no" ] + - Sync package repositories first. + - If V(yes), perform C(emerge --sync). + - If V(web), perform C(emerge-webrsync). + choices: ["web", "yes", "no"] type: str getbinpkgonly: description: - Merge only packages specified at C(PORTAGE_BINHOST) in C(make.conf). type: bool - default: no + default: false version_added: 1.3.0 getbinpkg: description: - Prefer packages specified at C(PORTAGE_BINHOST) in C(make.conf). type: bool - default: no + default: false usepkgonly: description: - Merge only binaries (no compiling). type: bool - default: no + default: false usepkg: description: - Tries to use the binary package(s) in the locally available packages directory. type: bool - default: no + default: false keepgoing: description: - Continue as much as possible after an error. type: bool - default: no + default: false jobs: description: - Specifies the number of packages to build simultaneously. - - "Since version 2.6: Value of 0 or False resets any previously added" - - --jobs setting values + - 'Since version 2.6: Value of V(0) or V(false) resets any previously added C(--jobs) setting values.' type: int loadavg: description: - - Specifies that no new builds should be started if there are - - other builds running and the load average is at least LOAD - - "Since version 2.6: Value of 0 or False resets any previously added" - - --load-average setting values + - Specifies that no new builds should be started if there are other builds running and the load average is at least + LOAD. + - 'Since version 2.6: Value of 0 or False resets any previously added C(--load-average) setting values.' type: float + withbdeps: + description: + - Specifies that build time dependencies should be installed. + type: bool + version_added: 5.8.0 + quietbuild: description: - - Redirect all build output to logs alone, and do not display it - - on stdout (--quiet-build) + - Redirect all build output to logs alone, and do not display it on stdout (C(--quiet-build)). type: bool - default: no + default: false quietfail: description: - - Suppresses display of the build log on stdout (--quiet-fail) - - Only the die message and the path of the build log will be - - displayed on stdout. + - Suppresses display of the build log on stdout (--quiet-fail). + - Only the die message and the path of the build log are displayed on stdout. type: bool - default: no + default: false -requirements: [ gentoolkit ] author: - - "William L Thomson Jr (@wltjr)" - - "Yap Sok Ann (@sayap)" - - "Andrew Udvare (@Tatsh)" -''' + - "William L Thomson Jr (@wltjr)" + - "Yap Sok Ann (@sayap)" + - "Andrew Udvare (@Tatsh)" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Make sure package foo is installed community.general.portage: package: foo @@ -195,43 +215,56 @@ EXAMPLES = ''' - name: Update package foo to the latest version (os specific alternative to latest) community.general.portage: package: foo - update: yes + update: true - name: Install package foo using PORTAGE_BINHOST setup community.general.portage: package: foo - getbinpkg: yes + getbinpkg: true - name: Re-install world from binary packages only and do not allow any compiling community.general.portage: package: '@world' - usepkgonly: yes + usepkgonly: true - name: Sync repositories and update world community.general.portage: package: '@world' - update: yes - deep: yes - sync: yes + update: true + deep: true + sync: true - name: Remove unneeded packages community.general.portage: - depclean: yes + depclean: true - name: Remove package foo if it is not explicitly needed community.general.portage: package: foo state: absent - depclean: yes -''' + depclean: true +""" import os import re +import sys +import traceback -from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.respawn import has_respawned, respawn_module from ansible.module_utils.common.text.converters import to_native +try: + from portage.dbapi import vartree + from portage.exception import InvalidAtom + HAS_PORTAGE = True + PORTAGE_IMPORT_ERROR = None +except ImportError: + HAS_PORTAGE = False + PORTAGE_IMPORT_ERROR = traceback.format_exc() + + def query_package(module, package, action): if package.startswith('@'): return query_set(module, package, action) @@ -239,13 +272,15 @@ def query_package(module, package, action): def query_atom(module, atom, action): - cmd = '%s list %s' % (module.equery_path, atom) - - rc, out, err = module.run_command(cmd) - return rc == 0 + vdb = vartree.vardbapi() + try: + exists = vdb.match(atom) + except InvalidAtom: + return False + return bool(exists) -def query_set(module, set, action): +def query_set(module, set_, action): system_sets = [ '@live-rebuild', '@module-rebuild', @@ -257,16 +292,16 @@ def query_set(module, set, action): '@x11-module-rebuild', ] - if set in system_sets: + if set_ in system_sets: if action == 'unmerge': - module.fail_json(msg='set %s cannot be removed' % set) + module.fail_json(msg='set %s cannot be removed' % set_) return False world_sets_path = '/var/lib/portage/world_sets' if not os.path.exists(world_sets_path): return False - cmd = 'grep %s %s' % (set, world_sets_path) + cmd = ['grep', set_, world_sets_path] rc, out, err = module.run_command(cmd) return rc == 0 @@ -278,27 +313,28 @@ def sync_repositories(module, webrsync=False): if webrsync: webrsync_path = module.get_bin_path('emerge-webrsync', required=True) - cmd = '%s --quiet' % webrsync_path + cmd = [webrsync_path, '--quiet'] else: - cmd = '%s --sync --quiet --ask=n' % module.emerge_path + cmd = [module.emerge_path, '--sync', '--quiet', '--ask=n'] rc, out, err = module.run_command(cmd) if rc != 0: module.fail_json(msg='could not sync package repositories') -# Note: In the 3 functions below, equery is done one-by-one, but emerge is done -# in one go. If that is not desirable, split the packages into multiple tasks -# instead of joining them together with comma. +# Note: In the 3 functions below, package querying is done one-by-one, +# but emerge is done in one go. If that is not desirable, split the +# packages into multiple tasks instead of joining them together with +# comma. def emerge_packages(module, packages): """Run emerge command against given list of atoms.""" p = module.params - if p['noreplace'] and not (p['update'] or p['state'] == 'latest'): + if p['noreplace'] and not p['changed_use'] and not p['newuse'] and not (p['update'] or p['state'] == 'latest'): for package in packages: - if p['noreplace'] and not query_package(module, package, 'emerge'): + if p['noreplace'] and not p['changed_use'] and not p['newuse'] and not query_package(module, package, 'emerge'): break else: module.exit_json(changed=False, msg='Packages already present.') @@ -335,6 +371,9 @@ def emerge_packages(module, packages): emerge_flags = { 'jobs': '--jobs', 'loadavg': '--load-average', + 'backtrack': '--backtrack', + 'withbdeps': '--with-bdeps', + 'select': '--select', } for flag, arg in emerge_flags.items(): @@ -344,13 +383,14 @@ def emerge_packages(module, packages): """Fallback to default: don't use this argument at all.""" continue - if not flag_val: + """Add the --flag=value pair.""" + if isinstance(flag_val, bool): + args.extend((arg, to_native('y' if flag_val else 'n'))) + elif not flag_val: """If the value is 0 or 0.0: add the flag, but not the value.""" args.append(arg) - continue - - """Add the --flag=value pair.""" - args.extend((arg, to_native(flag_val))) + else: + args.extend((arg, to_native(flag_val))) cmd, (rc, out, err) = run_emerge(module, packages, *args) if rc != 0: @@ -468,12 +508,13 @@ portage_absent_states = ['absent', 'unmerged', 'removed'] def main(): module = AnsibleModule( argument_spec=dict( - package=dict(type='list', elements='str', default=None, aliases=['name']), + package=dict(type='list', elements='str', aliases=['name']), state=dict( default=portage_present_states[0], choices=portage_present_states + portage_absent_states, ), update=dict(default=False, type='bool'), + backtrack=dict(type='int'), deep=dict(default=False, type='bool'), newuse=dict(default=False, type='bool'), changed_use=dict(default=False, type='bool'), @@ -482,16 +523,18 @@ def main(): nodeps=dict(default=False, type='bool'), onlydeps=dict(default=False, type='bool'), depclean=dict(default=False, type='bool'), + select=dict(type='bool'), quiet=dict(default=False, type='bool'), verbose=dict(default=False, type='bool'), - sync=dict(default=None, choices=['yes', 'web', 'no']), + sync=dict(choices=['yes', 'web', 'no']), getbinpkgonly=dict(default=False, type='bool'), getbinpkg=dict(default=False, type='bool'), usepkgonly=dict(default=False, type='bool'), usepkg=dict(default=False, type='bool'), keepgoing=dict(default=False, type='bool'), - jobs=dict(default=None, type='int'), - loadavg=dict(default=None, type='float'), + jobs=dict(type='int'), + loadavg=dict(type='float'), + withbdeps=dict(type='bool'), quietbuild=dict(default=False, type='bool'), quietfail=dict(default=False, type='bool'), ), @@ -501,12 +544,19 @@ def main(): ['quiet', 'verbose'], ['quietbuild', 'verbose'], ['quietfail', 'verbose'], + ['oneshot', 'select'], ], supports_check_mode=True, ) + if not HAS_PORTAGE: + if sys.executable != '/usr/bin/python' and not has_respawned(): + respawn_module('/usr/bin/python') + else: + module.fail_json(msg=missing_required_lib('portage'), + exception=PORTAGE_IMPORT_ERROR) + module.emerge_path = module.get_bin_path('emerge', required=True) - module.equery_path = module.get_bin_path('equery', required=True) p = module.params diff --git a/plugins/modules/packaging/os/portinstall.py b/plugins/modules/portinstall.py similarity index 73% rename from plugins/modules/packaging/os/portinstall.py rename to plugins/modules/portinstall.py index d1c33cc5c8..8598294a68 100644 --- a/plugins/modules/packaging/os/portinstall.py +++ b/plugins/modules/portinstall.py @@ -1,46 +1,51 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, berenddeboer +# Copyright (c) 2013, berenddeboer # Written by berenddeboer # Based on pkgng module written by bleader # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: portinstall short_description: Installing packages from FreeBSD's ports system description: - - Manage packages for FreeBSD using 'portinstall'. + - Manage packages for FreeBSD using C(portinstall). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - name of package to install/remove - aliases: [pkg] - required: true - type: str - state: - description: - - state of the package - choices: [ 'present', 'absent' ] - required: false - default: present - type: str - use_packages: - description: - - use packages instead of ports whenever available - type: bool - required: false - default: yes + name: + description: + - Name of package to install/remove. + aliases: [pkg] + required: true + type: str + state: + description: + - State of the package. + choices: ['present', 'absent'] + required: false + default: present + type: str + use_packages: + description: + - Use packages instead of ports whenever available. + type: bool + required: false + default: true author: "berenddeboer (@berenddeboer)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install package foo community.general.portinstall: name: foo @@ -55,14 +60,12 @@ EXAMPLES = ''' community.general.portinstall: name: foo,bar state: absent -''' +""" -import os import re -import sys +from shlex import quote as shlex_quote from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import shlex_quote def query_package(module, name): @@ -73,12 +76,13 @@ def query_package(module, name): if pkg_info_path: pkgng = False pkg_glob_path = module.get_bin_path('pkg_glob', True) + # TODO: convert run_comand() argument to list! rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, shlex_quote(name)), use_unsafe_shell=True) + pkg_info_path = [pkg_info_path] else: pkgng = True - pkg_info_path = module.get_bin_path('pkg', True) - pkg_info_path = pkg_info_path + " info" - rc, out, err = module.run_command("%s %s" % (pkg_info_path, name)) + pkg_info_path = [module.get_bin_path('pkg', True), "info"] + rc, out, err = module.run_command(pkg_info_path + [name]) found = rc == 0 @@ -88,10 +92,7 @@ def query_package(module, name): # some package is installed name_without_digits = re.sub('[0-9]', '', name) if name != name_without_digits: - if pkgng: - rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) - else: - rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) + rc, out, err = module.run_command(pkg_info_path + [name_without_digits]) found = rc == 0 @@ -101,13 +102,13 @@ def query_package(module, name): def matching_packages(module, name): ports_glob_path = module.get_bin_path('ports_glob', True) - rc, out, err = module.run_command("%s %s" % (ports_glob_path, name)) + rc, out, err = module.run_command([ports_glob_path, name]) # counts the number of packages found occurrences = out.count('\n') if occurrences == 0: name_without_digits = re.sub('[0-9]', '', name) if name != name_without_digits: - rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits)) + rc, out, err = module.run_command([ports_glob_path, name_without_digits]) occurrences = out.count('\n') return occurrences @@ -129,10 +130,12 @@ def remove_packages(module, packages): if not query_package(module, package): continue + # TODO: convert run_comand() argument to list! rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, shlex_quote(package)), use_unsafe_shell=True) if query_package(module, package): name_without_digits = re.sub('[0-9]', '', package) + # TODO: convert run_comand() argument to list! rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, shlex_quote(name_without_digits)), use_unsafe_shell=True) @@ -157,13 +160,13 @@ def install_packages(module, packages, use_packages): if not portinstall_path: pkg_path = module.get_bin_path('pkg', False) if pkg_path: - module.run_command("pkg install -y portupgrade") + module.run_command([pkg_path, "install", "-y", "portupgrade"]) portinstall_path = module.get_bin_path('portinstall', True) if use_packages: - portinstall_params = "--use-packages" + portinstall_params = ["--use-packages"] else: - portinstall_params = "" + portinstall_params = [] for package in packages: if query_package(module, package): @@ -172,7 +175,7 @@ def install_packages(module, packages, use_packages): # TODO: check how many match matches = matching_packages(module, package) if matches == 1: - rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package)) + rc, out, err = module.run_command([portinstall_path, "--batch"] + portinstall_params + [package]) if not query_package(module, package): module.fail_json(msg="failed to install %s: %s" % (package, out)) elif matches == 0: diff --git a/plugins/modules/net_tools/pritunl/pritunl_org.py b/plugins/modules/pritunl_org.py similarity index 65% rename from plugins/modules/net_tools/pritunl/pritunl_org.py rename to plugins/modules/pritunl_org.py index 35796ae361..241d0cb08f 100644 --- a/plugins/modules/net_tools/pritunl/pritunl_org.py +++ b/plugins/modules/pritunl_org.py @@ -1,54 +1,53 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Florian Dambrine -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: pritunl_org author: Florian Dambrine (@Lowess) version_added: 2.5.0 short_description: Manages Pritunl Organizations using the Pritunl API description: - - A module to manage Pritunl organizations using the Pritunl API. + - A module to manage Pritunl organizations using the Pritunl API. extends_documentation_fragment: - - community.general.pritunl + - community.general.pritunl + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - name: - type: str - required: true - aliases: - - org - description: - - The name of the organization to manage in Pritunl. - - force: - type: bool - default: false - description: - - If I(force) is C(true) and I(state) is C(absent), the module - will delete the organization, no matter if it contains users - or not. By default I(force) is C(false), which will cause the - module to fail the deletion of the organization when it contains - users. - - state: - type: str - default: 'present' - choices: - - present - - absent - description: - - If C(present), the module adds organization I(name) to - Pritunl. If C(absent), attempt to delete the organization - from Pritunl (please read about I(force) usage). + name: + type: str + required: true + aliases: + - org + description: + - The name of the organization to manage in Pritunl. + force: + type: bool + default: false + description: + - If O(force) is V(true) and O(state) is V(absent), the module deletes the organization, no matter if it contains users + or not. By default O(force) is V(false), which causes the module to fail the deletion of the organization when it + contains users. + state: + type: str + default: 'present' + choices: + - present + - absent + description: + - If V(present), the module adds organization O(name) to Pritunl. If V(absent), attempt to delete the organization from + Pritunl (please read about O(force) usage). """ -EXAMPLES = """ +EXAMPLES = r""" - name: Ensure the organization named MyOrg exists community.general.pritunl_org: state: present @@ -60,20 +59,20 @@ EXAMPLES = """ name: MyOrg """ -RETURN = """ +RETURN = r""" response: - description: JSON representation of a Pritunl Organization. - returned: success - type: dict - sample: - { - "auth_api": False, - "name": "Foo", - "auth_token": None, - "user_count": 0, - "auth_secret": None, - "id": "csftwlu6uhralzi2dpmhekz3", - } + description: JSON representation of a Pritunl Organization. + returned: success + type: dict + sample: + { + "auth_api": false, + "name": "Foo", + "auth_token": null, + "user_count": 0, + "auth_secret": null, + "id": "csftwlu6uhralzi2dpmhekz3" + } """ @@ -175,12 +174,10 @@ def main(): argument_spec.update( dict( name=dict(required=True, type="str", aliases=["org"]), - force=dict(required=False, type="bool", default=False), - state=dict( - required=False, choices=["present", "absent"], default="present" - ), + force=dict(type="bool", default=False), + state=dict(choices=["present", "absent"], default="present"), ) - ), + ) module = AnsibleModule(argument_spec=argument_spec) diff --git a/plugins/modules/net_tools/pritunl/pritunl_org_info.py b/plugins/modules/pritunl_org_info.py similarity index 51% rename from plugins/modules/net_tools/pritunl/pritunl_org_info.py rename to plugins/modules/pritunl_org_info.py index a7e65c80d1..a98fcd9f4d 100644 --- a/plugins/modules/net_tools/pritunl/pritunl_org_info.py +++ b/plugins/modules/pritunl_org_info.py @@ -1,36 +1,34 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Florian Dambrine -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: pritunl_org_info author: Florian Dambrine (@Lowess) version_added: 2.5.0 short_description: List Pritunl Organizations using the Pritunl API description: - - A module to list Pritunl organizations using the Pritunl API. + - A module to list Pritunl organizations using the Pritunl API. extends_documentation_fragment: - - community.general.pritunl + - community.general.pritunl + - community.general.attributes + - community.general.attributes.info_module options: - organization: - type: str - required: false - aliases: - - org - default: null - description: - - Name of the Pritunl organization to search for. - If none provided, the module will return all Pritunl - organizations. + organization: + type: str + required: false + aliases: + - org + default: null + description: + - Name of the Pritunl organization to search for. If none provided, the module returns all Pritunl organizations. """ -EXAMPLES = """ +EXAMPLES = r""" - name: List all existing Pritunl organizations community.general.pritunl_org_info: @@ -39,39 +37,39 @@ EXAMPLES = """ organization: MyOrg """ -RETURN = """ +RETURN = r""" organizations: - description: List of Pritunl organizations. - returned: success - type: list - elements: dict - sample: - [ - { - "auth_api": False, - "name": "FooOrg", - "auth_token": None, - "user_count": 0, - "auth_secret": None, - "id": "csftwlu6uhralzi2dpmhekz3", - }, - { - "auth_api": False, - "name": "MyOrg", - "auth_token": None, - "user_count": 3, - "auth_secret": None, - "id": "58070daee63f3b2e6e472c36", - }, - { - "auth_api": False, - "name": "BarOrg", - "auth_token": None, - "user_count": 0, - "auth_secret": None, - "id": "v1sncsxxybnsylc8gpqg85pg", - } - ] + description: List of Pritunl organizations. + returned: success + type: list + elements: dict + sample: + [ + { + "auth_api": false, + "name": "FooOrg", + "auth_token": null, + "user_count": 0, + "auth_secret": null, + "id": "csftwlu6uhralzi2dpmhekz3" + }, + { + "auth_api": false, + "name": "MyOrg", + "auth_token": null, + "user_count": 3, + "auth_secret": null, + "id": "58070daee63f3b2e6e472c36" + }, + { + "auth_api": false, + "name": "BarOrg", + "auth_token": null, + "user_count": 0, + "auth_secret": null, + "id": "v1sncsxxybnsylc8gpqg85pg" + } + ] """ from ansible.module_utils.basic import AnsibleModule @@ -113,9 +111,9 @@ def main(): argument_spec.update( dict( - organization=dict(required=False, type="str", default=None, aliases=["org"]) + organization=dict(type="str", aliases=["org"]) ) - ), + ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) diff --git a/plugins/modules/net_tools/pritunl/pritunl_user.py b/plugins/modules/pritunl_user.py similarity index 62% rename from plugins/modules/net_tools/pritunl/pritunl_user.py rename to plugins/modules/pritunl_user.py index 0beb9720b6..ff5ed479e6 100644 --- a/plugins/modules/net_tools/pritunl/pritunl_user.py +++ b/plugins/modules/pritunl_user.py @@ -1,97 +1,92 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Florian Dambrine -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: pritunl_user author: "Florian Dambrine (@Lowess)" version_added: 2.3.0 short_description: Manage Pritunl Users using the Pritunl API description: - - A module to manage Pritunl users using the Pritunl API. + - A module to manage Pritunl users using the Pritunl API. extends_documentation_fragment: - - community.general.pritunl + - community.general.pritunl + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - organization: - type: str - required: true - aliases: - - org - description: - - The name of the organization the user is part of. - - state: - type: str - default: 'present' - choices: - - present - - absent - description: - - If C(present), the module adds user I(user_name) to - the Pritunl I(organization). If C(absent), removes the user - I(user_name) from the Pritunl I(organization). - - user_name: - type: str - required: true - default: null - description: - - Name of the user to create or delete from Pritunl. - - user_email: - type: str - required: false - default: null - description: - - Email address associated with the user I(user_name). - - user_type: - type: str - required: false - default: client - choices: - - client - - server - description: - - Type of the user I(user_name). - - user_groups: - type: list - elements: str - required: false - default: null - description: - - List of groups associated with the user I(user_name). - - user_disabled: - type: bool - required: false - default: null - description: - - Enable/Disable the user I(user_name). - - user_gravatar: - type: bool - required: false - default: null - description: - - Enable/Disable Gravatar usage for the user I(user_name). - - user_mac_addresses: - type: list - elements: str - description: - - Allowed MAC addresses for the user I(user_name). - version_added: 5.0.0 + organization: + type: str + required: true + aliases: + - org + description: + - The name of the organization the user is part of. + state: + type: str + default: 'present' + choices: + - present + - absent + description: + - If V(present), the module adds user O(user_name) to the Pritunl O(organization). If V(absent), removes the user O(user_name) + from the Pritunl O(organization). + user_name: + type: str + required: true + default: + description: + - Name of the user to create or delete from Pritunl. + user_email: + type: str + required: false + default: + description: + - Email address associated with the user O(user_name). + user_type: + type: str + required: false + default: client + choices: + - client + - server + description: + - Type of the user O(user_name). + user_groups: + type: list + elements: str + required: false + default: + description: + - List of groups associated with the user O(user_name). + user_disabled: + type: bool + required: false + default: + description: + - Enable/Disable the user O(user_name). + user_gravatar: + type: bool + required: false + default: + description: + - Enable/Disable Gravatar usage for the user O(user_name). + user_mac_addresses: + type: list + elements: str + description: + - Allowed MAC addresses for the user O(user_name). + version_added: 5.0.0 """ -EXAMPLES = """ +EXAMPLES = r""" - name: Create the user Foo with email address foo@bar.com in MyOrg community.general.pritunl_user: state: present @@ -107,7 +102,7 @@ EXAMPLES = """ organization: MyOrg user_name: Foo user_email: foo@bar.com - user_disabled: yes + user_disabled: true - name: Make sure the user Foo is not part of MyOrg anymore community.general.pritunl_user: @@ -116,37 +111,38 @@ EXAMPLES = """ user_name: Foo """ -RETURN = """ +RETURN = r""" response: - description: JSON representation of Pritunl Users. - returned: success - type: dict - sample: - { - "audit": false, - "auth_type": "google", - "bypass_secondary": false, - "client_to_client": false, - "disabled": false, - "dns_mapping": null, - "dns_servers": null, - "dns_suffix": null, - "email": "foo@bar.com", - "gravatar": true, - "groups": [ - "foo", "bar" - ], - "id": "5d070dafe63q3b2e6s472c3b", - "name": "foo@acme.com", - "network_links": [], - "organization": "58070daee6sf342e6e4s2c36", - "organization_name": "Acme", - "otp_auth": true, - "otp_secret": "35H5EJA3XB2$4CWG", - "pin": false, - "port_forwarding": [], - "servers": [], - } + description: JSON representation of Pritunl Users. + returned: success + type: dict + sample: + { + "audit": false, + "auth_type": "google", + "bypass_secondary": false, + "client_to_client": false, + "disabled": false, + "dns_mapping": null, + "dns_servers": null, + "dns_suffix": null, + "email": "foo@bar.com", + "gravatar": true, + "groups": [ + "foo", + "bar" + ], + "id": "5d070dafe63q3b2e6s472c3b", + "name": "foo@acme.com", + "network_links": [], + "organization": "58070daee6sf342e6e4s2c36", + "organization_name": "Acme", + "otp_auth": true, + "otp_secret": "35H5EJA3XB2$4CWG", + "pin": false, + "port_forwarding": [], + "servers": [] + } """ @@ -322,20 +318,16 @@ def main(): argument_spec.update( dict( organization=dict(required=True, type="str", aliases=["org"]), - state=dict( - required=False, choices=["present", "absent"], default="present" - ), + state=dict(choices=["present", "absent"], default="present"), user_name=dict(required=True, type="str"), - user_type=dict( - required=False, choices=["client", "server"], default="client" - ), - user_email=dict(required=False, type="str", default=None), - user_groups=dict(required=False, type="list", elements="str", default=None), - user_disabled=dict(required=False, type="bool", default=None), - user_gravatar=dict(required=False, type="bool", default=None), - user_mac_addresses=dict(required=False, type="list", elements="str", default=None), + user_type=dict(choices=["client", "server"], default="client"), + user_email=dict(type="str"), + user_groups=dict(type="list", elements="str"), + user_disabled=dict(type="bool"), + user_gravatar=dict(type="bool"), + user_mac_addresses=dict(type="list", elements="str"), ) - ), + ) module = AnsibleModule(argument_spec=argument_spec) diff --git a/plugins/modules/net_tools/pritunl/pritunl_user_info.py b/plugins/modules/pritunl_user_info.py similarity index 52% rename from plugins/modules/net_tools/pritunl/pritunl_user_info.py rename to plugins/modules/pritunl_user_info.py index e8cf5e2955..99a91eaad3 100644 --- a/plugins/modules/net_tools/pritunl/pritunl_user_info.py +++ b/plugins/modules/pritunl_user_info.py @@ -1,49 +1,47 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Florian Dambrine -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: pritunl_user_info author: "Florian Dambrine (@Lowess)" version_added: 2.3.0 short_description: List Pritunl Users using the Pritunl API description: - - A module to list Pritunl users using the Pritunl API. + - A module to list Pritunl users using the Pritunl API. extends_documentation_fragment: - - community.general.pritunl + - community.general.pritunl + - community.general.attributes + - community.general.attributes.info_module options: - organization: - type: str - required: true - aliases: - - org - description: - - The name of the organization the user is part of. - - user_name: - type: str - required: false - description: - - Name of the user to filter on Pritunl. - - user_type: - type: str - required: false - default: client - choices: - - client - - server - description: - - Type of the user I(user_name). + organization: + type: str + required: true + aliases: + - org + description: + - The name of the organization the user is part of. + user_name: + type: str + required: false + description: + - Name of the user to filter on Pritunl. + user_type: + type: str + required: false + default: client + choices: + - client + - server + description: + - Type of the user O(user_name). """ -EXAMPLES = """ +EXAMPLES = r""" - name: List all existing users part of the organization MyOrg community.general.pritunl_user_info: state: list @@ -56,40 +54,41 @@ EXAMPLES = """ user_name: Florian """ -RETURN = """ +RETURN = r""" users: - description: List of Pritunl users. - returned: success - type: list - elements: dict - sample: - [ - { - "audit": false, - "auth_type": "google", - "bypass_secondary": false, - "client_to_client": false, - "disabled": false, - "dns_mapping": null, - "dns_servers": null, - "dns_suffix": null, - "email": "foo@bar.com", - "gravatar": true, - "groups": [ - "foo", "bar" - ], - "id": "5d070dafe63q3b2e6s472c3b", - "name": "foo@acme.com", - "network_links": [], - "organization": "58070daee6sf342e6e4s2c36", - "organization_name": "Acme", - "otp_auth": true, - "otp_secret": "35H5EJA3XB2$4CWG", - "pin": false, - "port_forwarding": [], - "servers": [], - } - ] + description: List of Pritunl users. + returned: success + type: list + elements: dict + sample: + [ + { + "audit": false, + "auth_type": "google", + "bypass_secondary": false, + "client_to_client": false, + "disabled": false, + "dns_mapping": null, + "dns_servers": null, + "dns_suffix": null, + "email": "foo@bar.com", + "gravatar": true, + "groups": [ + "foo", + "bar" + ], + "id": "5d070dafe63q3b2e6s472c3b", + "name": "foo@acme.com", + "network_links": [], + "organization": "58070daee6sf342e6e4s2c36", + "organization_name": "Acme", + "otp_auth": true, + "otp_secret": "35H5EJA3XB2$4CWG", + "pin": false, + "port_forwarding": [], + "servers": [] + } + ] """ from ansible.module_utils.basic import AnsibleModule @@ -150,14 +149,10 @@ def main(): argument_spec.update( dict( organization=dict(required=True, type="str", aliases=["org"]), - user_name=dict(required=False, type="str", default=None), - user_type=dict( - required=False, - choices=["client", "server"], - default="client", - ), + user_name=dict(type="str"), + user_type=dict(choices=["client", "server"], default="client"), ) - ), + ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) diff --git a/plugins/modules/cloud/pubnub/pubnub_blocks.py b/plugins/modules/pubnub_blocks.py similarity index 83% rename from plugins/modules/cloud/pubnub/pubnub_blocks.py rename to plugins/modules/pubnub_blocks.py index d3b76337a3..9f2135de20 100644 --- a/plugins/modules/cloud/pubnub/pubnub_blocks.py +++ b/plugins/modules/pubnub_blocks.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # PubNub Real-time Cloud-Hosted Push API and Push Notification Client # Frameworks @@ -7,61 +6,61 @@ # http://www.pubnub.com/ # http://www.pubnub.com/terms # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pubnub_blocks -short_description: PubNub blocks management module. +short_description: PubNub blocks management module description: - - "This module allows Ansible to interface with the PubNub BLOCKS - infrastructure by providing the following operations: create / remove, - start / stop and rename for blocks and create / modify / remove for event - handlers" + - 'This module allows Ansible to interface with the PubNub BLOCKS infrastructure by providing the following operations: + create / remove, start / stop and rename for blocks and create / modify / remove for event handlers.' author: - PubNub (@pubnub) - Sergey Mamontov (@parfeon) requirements: - - "python >= 2.7" - "pubnub_blocks_client >= 1.0" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: email: description: - Email from account for which new session should be started. - - "Not required if C(cache) contains result of previous module call (in - same play)." + - Not required if O(cache) contains result of previous module call (in same play). required: false type: str + default: '' password: description: - - Password which match to account to which specified C(email) belong. - - "Not required if C(cache) contains result of previous module call (in - same play)." + - Password which match to account to which specified O(email) belong. + - Not required if O(cache) contains result of previous module call (in same play). required: false type: str + default: '' cache: - description: > - In case if single play use blocks management module few times it is - preferred to enabled 'caching' by making previous module to share - gathered artifacts and pass them to this parameter. + description: >- + In case if single play use blocks management module few times it is preferred to enabled 'caching' by making previous + module to share gathered artifacts and pass them to this parameter. required: false type: dict default: {} account: description: - - "Name of PubNub account for from which C(application) will be used to - manage blocks." - - "User's account will be used if value not set or empty." + - Name of PubNub account for from which O(application) is used to manage blocks. + - User's account is used if value not set or empty. type: str - required: false + default: '' application: description: - - "Name of target PubNub application for which blocks configuration on - specific C(keyset) will be done." + - Name of target PubNub application for which blocks configuration on specific O(keyset) is done. type: str required: true keyset: @@ -71,68 +70,57 @@ options: required: true state: description: - - "Intended block state after event handlers creation / update process - will be completed." + - Intended block state after event handlers creation / update process is completed. required: false default: 'present' choices: ['started', 'stopped', 'present', 'absent'] type: str name: description: - - Name of managed block which will be later visible on admin.pubnub.com. + - Name of managed block which is later visible on admin.pubnub.com. required: true type: str description: description: - - Short block description which will be later visible on - admin.pubnub.com. Used only if block doesn't exists and won't change - description for existing block. + - Short block description which is later visible on U(https://admin.pubnub.com). + - Used only if block does not exists and does not change description for existing block. required: false type: str event_handlers: description: - - "List of event handlers which should be updated for specified block - C(name)." - - "Each entry for new event handler should contain: C(name), C(src), - C(channels), C(event). C(name) used as event handler name which can be - used later to make changes to it." + - List of event handlers which should be updated for specified block O(name). + - 'Each entry for new event handler should contain: V(name), V(src), V(channels), V(event). V(name) used as event handler + name which can be used later to make changes to it.' - C(src) is full path to file with event handler code. - - "C(channels) is name of channel from which event handler is waiting - for events." - - "C(event) is type of event which is able to trigger event handler: - I(js-before-publish), I(js-after-publish), I(js-after-presence)." - - "Each entry for existing handlers should contain C(name) (so target - handler can be identified). Rest parameters (C(src), C(channels) and - C(event)) can be added if changes required for them." - - "It is possible to rename event handler by adding C(changes) key to - event handler payload and pass dictionary, which will contain single key - C(name), where new name should be passed." - - "To remove particular event handler it is possible to set C(state) for - it to C(absent) and it will be removed." + - V(channels) is name of channel from which event handler is waiting for events. + - 'V(event) is type of event which is able to trigger event handler: V(js-before-publish), V(js-after-publish), V(js-after-presence).' + - Each entry for existing handlers should contain C(name) (so target handler can be identified). Rest parameters (C(src), + C(channels) and C(event)) can be added if changes required for them. + - It is possible to rename event handler by adding C(changes) key to event handler payload and pass dictionary, which + contains single key C(name), where new name should be passed. + - To remove particular event handler it is possible to set C(state) for it to C(absent) and it is removed. required: false default: [] type: list elements: dict changes: description: - - "List of fields which should be changed by block itself (doesn't - affect any event handlers)." - - "Possible options for change is: C(name)." + - List of fields which should be changed by block itself (does not affect any event handlers). + - 'Possible options for change is: O(name).' required: false default: {} type: dict validate_certs: description: - - "This key allow to try skip certificates check when performing REST API - calls. Sometimes host may have issues with certificates on it and this - will cause problems to call PubNub REST API." - - If check should be ignored C(False) should be passed to this parameter. + - This key allow to try skip certificates check when performing REST API calls. Sometimes host may have issues with + certificates on it and this causes problems to call PubNub REST API. + - If check should be ignored V(false) should be passed to this parameter. required: false default: true type: bool -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Event handler create example. - name: Create single event handler community.general.pubnub_blocks: @@ -142,8 +130,7 @@ EXAMPLES = ''' keyset: '{{ keyset_name }}' name: '{{ block_name }}' event_handlers: - - - src: '{{ path_to_handler_source }}' + - src: '{{ path_to_handler_source }}' name: '{{ handler_name }}' event: 'js-before-publish' channels: '{{ handler_channel }}' @@ -157,8 +144,7 @@ EXAMPLES = ''' keyset: '{{ keyset_name }}' name: '{{ block_name }}' event_handlers: - - - name: '{{ handler_name }}' + - name: '{{ handler_name }}' event: 'js-after-publish' # Stop block and event handlers. @@ -190,8 +176,7 @@ EXAMPLES = ''' name: '{{ block_name }}' state: present event_handlers: - - - src: '{{ path_to_handler_1_source }}' + - src: '{{ path_to_handler_1_source }}' name: '{{ event_handler_1_name }}' channels: '{{ event_handler_1_channel }}' event: 'js-before-publish' @@ -204,8 +189,7 @@ EXAMPLES = ''' name: '{{ block_name }}' state: present event_handlers: - - - src: '{{ path_to_handler_2_source }}' + - src: '{{ path_to_handler_2_source }}' name: '{{ event_handler_2_name }}' channels: '{{ event_handler_2_channel }}' event: 'js-before-publish' @@ -217,22 +201,22 @@ EXAMPLES = ''' keyset: '{{ keyset_name }}' name: '{{ block_name }}' state: started -''' +""" -RETURN = ''' +RETURN = r""" module_cache: - description: "Cached account information. In case if with single play module - used few times it is better to pass cached data to next module calls to speed - up process." + description: + - Cached account information. In case if with single play module used few times it is better to pass cached data to next + module calls to speed up process. type: dict returned: always -''' +""" import copy import os try: # Import PubNub BLOCKS client. - from pubnub_blocks_client import User, Account, Owner, Application, Keyset + from pubnub_blocks_client import User from pubnub_blocks_client import Block, EventHandler from pubnub_blocks_client import exceptions HAS_PUBNUB_BLOCKS_CLIENT = True @@ -546,9 +530,9 @@ def _content_of_file_at_path(path): def main(): fields = dict( - email=dict(default='', required=False, type='str'), - password=dict(default='', required=False, type='str', no_log=True), - account=dict(default='', required=False, type='str'), + email=dict(default='', type='str'), + password=dict(default='', type='str', no_log=True), + account=dict(default='', type='str'), application=dict(required=True, type='str'), keyset=dict(required=True, type='str', no_log=False), state=dict(default='present', type='str', diff --git a/plugins/modules/packaging/os/pulp_repo.py b/plugins/modules/pulp_repo.py similarity index 86% rename from plugins/modules/packaging/os/pulp_repo.py rename to plugins/modules/pulp_repo.py index d14d84451b..5486c56231 100644 --- a/plugins/modules/packaging/os/pulp_repo.py +++ b/plugins/modules/pulp_repo.py @@ -1,106 +1,101 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2016, Joe Adams <@sysadmind> +# Copyright (c) 2016, Joe Adams <@sysadmind> # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pulp_repo author: "Joe Adams (@sysadmind)" -short_description: Add or remove Pulp repos from a remote host. +short_description: Add or remove Pulp repos from a remote host description: - Add or remove Pulp repos from a remote host. - Note, this is for Pulp 2 only. +attributes: + check_mode: + support: full + diff_mode: + support: none options: add_export_distributor: description: - Whether or not to add the export distributor to new C(rpm) repositories. type: bool - default: no + default: false feed: description: - Upstream feed URL to receive updates from. type: str force_basic_auth: description: - - httplib2, the library used by the M(ansible.builtin.uri) module only sends - authentication information when a webservice responds to an initial - request with a 401 status. Since some basic auth services do not - properly send a 401, logins will fail. This option forces the sending of - the Basic authentication header upon initial request. + - C(httplib2), the library used by the M(ansible.builtin.uri) module only sends authentication information when a webservice + responds to an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins + fail. This option forces the sending of the Basic authentication header upon initial request. type: bool - default: no + default: false generate_sqlite: description: - - Boolean flag to indicate whether sqlite files should be generated during - a repository publish. + - Boolean flag to indicate whether sqlite files should be generated during a repository publish. required: false type: bool - default: no + default: false feed_ca_cert: description: - - CA certificate string used to validate the feed source SSL certificate. - This can be the file content or the path to the file. + - CA certificate string used to validate the feed source SSL certificate. This can be the file content or the path to + the file. type: str - aliases: [ importer_ssl_ca_cert ] + aliases: [importer_ssl_ca_cert] feed_client_cert: description: - - Certificate used as the client certificate when synchronizing the - repository. This is used to communicate authentication information to - the feed source. The value to this option must be the full path to the - certificate. The specified file may be the certificate itself or a - single file containing both the certificate and private key. This can be - the file content or the path to the file. + - Certificate used as the client certificate when synchronizing the repository. This is used to communicate authentication + information to the feed source. The value to this option must be the full path to the certificate. The specified file + may be the certificate itself or a single file containing both the certificate and private key. This can be the file + content or the path to the file. type: str - aliases: [ importer_ssl_client_cert ] + aliases: [importer_ssl_client_cert] feed_client_key: description: - - Private key to the certificate specified in I(importer_ssl_client_cert), - assuming it is not included in the certificate file itself. This can be - the file content or the path to the file. + - Private key to the certificate specified in O(feed_client_cert), assuming it is not included in the certificate file + itself. This can be the file content or the path to the file. type: str - aliases: [ importer_ssl_client_key ] + aliases: [importer_ssl_client_key] name: description: - Name of the repo to add or remove. This correlates to repo-id in Pulp. required: true type: str - aliases: [ repo ] + aliases: [repo] proxy_host: description: - - Proxy url setting for the pulp repository importer. This is in the - format scheme://host. + - Proxy URL setting for the pulp repository importer. This is in the format V(scheme://host). required: false - default: null + default: type: str proxy_port: description: - Proxy port setting for the pulp repository importer. required: false - default: null + default: type: str proxy_username: description: - Proxy username for the pulp repository importer. required: false - default: null + default: type: str proxy_password: description: - Proxy password for the pulp repository importer. required: false - default: null + default: type: str publish_distributor: description: - - Distributor to use when state is C(publish). The default is to - publish all distributors. + - Distributor to use when O(state=publish). The default is to publish all distributors. type: str pulp_host: description: @@ -113,61 +108,59 @@ options: type: str repo_type: description: - - Repo plugin type to use (i.e. C(rpm), C(docker)). + - Repo plugin type to use (that is, V(rpm), V(docker)). default: rpm type: str repoview: description: - - Whether to generate repoview files for a published repository. Setting - this to "yes" automatically activates `generate_sqlite`. + - Whether to generate repoview files for a published repository. Setting this to V(true) automatically activates O(generate_sqlite). required: false type: bool - default: no + default: false serve_http: description: - Make the repo available over HTTP. type: bool - default: no + default: false serve_https: description: - Make the repo available over HTTPS. type: bool - default: yes + default: true state: description: - - The repo state. A state of C(sync) will queue a sync of the repo. - This is asynchronous but not delayed like a scheduled sync. A state of - C(publish) will use the repository's distributor to publish the content. + - The repo state. A state of V(sync) queues a sync of the repo. This is asynchronous but not delayed like a scheduled + sync. A state of V(publish) uses the repository's distributor to publish the content. default: present - choices: [ "present", "absent", "sync", "publish" ] + choices: ["present", "absent", "sync", "publish"] type: str url_password: description: - - The password for use in HTTP basic authentication to the pulp API. - If the I(url_username) parameter is not specified, the I(url_password) - parameter will not be used. + - The password for use in HTTP basic authentication to the pulp API. If the O(url_username) parameter is not specified, + the O(url_password) parameter is not used. url_username: description: - The username for use in HTTP basic authentication to the pulp API. validate_certs: description: - - If C(no), SSL certificates will not be validated. This should only be - used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool - default: yes + default: true wait_for_completion: description: - Wait for asynchronous tasks to complete before returning. type: bool - default: no + default: false notes: - - This module can currently only create distributors and importers on rpm - repositories. Contributions to support other repo types are welcome. + - This module can currently only create distributors and importers on rpm repositories. Contributions to support other repo + types are welcome. extends_documentation_fragment: - - url -''' + - ansible.builtin.url + - community.general.attributes +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new repo with name 'my_repo' community.general.pulp_repo: name: my_repo @@ -182,7 +175,7 @@ EXAMPLES = ''' relative_url: centos/6/updates url_username: admin url_password: admin - force_basic_auth: yes + force_basic_auth: true state: present - name: Remove a repo from the pulp server @@ -190,15 +183,15 @@ EXAMPLES = ''' name: my_old_repo repo_type: rpm state: absent -''' +""" -RETURN = ''' +RETURN = r""" repo: description: Name of the repo that the action was performed on. returned: success type: str sample: my_repo -''' +""" import json import os @@ -588,29 +581,20 @@ def main(): if importer_ssl_ca_cert is not None: importer_ssl_ca_cert_file_path = os.path.abspath(importer_ssl_ca_cert) if os.path.isfile(importer_ssl_ca_cert_file_path): - importer_ssl_ca_cert_file_object = open(importer_ssl_ca_cert_file_path, 'r') - try: + with open(importer_ssl_ca_cert_file_path, 'r') as importer_ssl_ca_cert_file_object: importer_ssl_ca_cert = importer_ssl_ca_cert_file_object.read() - finally: - importer_ssl_ca_cert_file_object.close() if importer_ssl_client_cert is not None: importer_ssl_client_cert_file_path = os.path.abspath(importer_ssl_client_cert) if os.path.isfile(importer_ssl_client_cert_file_path): - importer_ssl_client_cert_file_object = open(importer_ssl_client_cert_file_path, 'r') - try: + with open(importer_ssl_client_cert_file_path, 'r') as importer_ssl_client_cert_file_object: importer_ssl_client_cert = importer_ssl_client_cert_file_object.read() - finally: - importer_ssl_client_cert_file_object.close() if importer_ssl_client_key is not None: importer_ssl_client_key_file_path = os.path.abspath(importer_ssl_client_key) if os.path.isfile(importer_ssl_client_key_file_path): - importer_ssl_client_key_file_object = open(importer_ssl_client_key_file_path, 'r') - try: + with open(importer_ssl_client_key_file_path, 'r') as importer_ssl_client_key_file_object: importer_ssl_client_key = importer_ssl_client_key_file_object.read() - finally: - importer_ssl_client_key_file_object.close() server = pulp_server(module, pulp_host, repo_type, wait_for_completion=wait_for_completion) server.set_repo_list() diff --git a/plugins/modules/system/puppet.py b/plugins/modules/puppet.py similarity index 55% rename from plugins/modules/system/puppet.py rename to plugins/modules/puppet.py index ed7341cb94..60500f2831 100644 --- a/plugins/modules/system/puppet.py +++ b/plugins/modules/puppet.py @@ -1,22 +1,27 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2015, Hewlett-Packard Development Company, L.P. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Hewlett-Packard Development Company, L.P. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: puppet short_description: Runs puppet description: - - Runs I(puppet) agent or apply in a reliable manner. + - Runs C(puppet) agent or apply in a reliable manner. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: timeout: description: - - How long to wait for I(puppet) to finish. + - How long to wait for C(puppet) to finish. type: str default: 30m puppetmaster: @@ -34,8 +39,8 @@ options: noop: description: - Override puppet.conf noop mode. - - When C(yes), run Puppet agent with C(--noop) switch set. - - When C(no), run Puppet agent with C(--no-noop) switch set. + - When V(true), run Puppet agent with C(--noop) switch set. + - When V(false), run Puppet agent with C(--no-noop) switch set. - When unset (default), use default or puppet.conf value if defined. type: bool facts: @@ -51,13 +56,18 @@ options: description: - Puppet environment to be used. type: str + confdir: + description: + - Path to the directory containing the puppet.conf file. + type: str + version_added: 5.1.0 logdest: description: - - Where the puppet logs should go, if puppet apply is being used. - - C(all) will go to both C(console) and C(syslog). - - C(stdout) will be deprecated and replaced by C(console). + - Where the puppet logs should go, if puppet apply is being used. + - V(all) goes to both C(console) and C(syslog). + - V(stdout) is deprecated and replaced by C(console). type: str - choices: [ all, stdout, syslog ] + choices: [all, stdout, syslog] default: stdout certname: description: @@ -68,6 +78,12 @@ options: - A list of puppet tags to be used. type: list elements: str + skip_tags: + description: + - A list of puppet tags to be excluded. + type: list + elements: str + version_added: 6.6.0 execute: description: - Execute a specific piece of Puppet code. @@ -75,13 +91,20 @@ options: type: str use_srv_records: description: - - Toggles use_srv_records flag + - Toggles use_srv_records flag. type: bool summarize: description: - Whether to print a transaction summary. type: bool default: false + waitforlock: + description: + - The maximum amount of time C(puppet) should wait for an already running C(puppet) agent to finish before starting. + - If a number without unit is provided, it is assumed to be a number of seconds. Allowed units are V(m) for minutes + and V(h) for hours. + type: str + version_added: 9.0.0 verbose: description: - Print extra information. @@ -94,18 +117,27 @@ options: default: false show_diff: description: - - Whether to print file changes details - - Alias C(show-diff) has been deprecated and will be removed in community.general 7.0.0. - aliases: ['show-diff'] + - Whether to print file changes details. type: bool default: false + environment_lang: + description: + - The lang environment to use when running the puppet agent. + - The default value, V(C), is supported on every system, but can lead to encoding errors if UTF-8 is used in the output. + - Use V(C.UTF-8) or V(en_US.UTF-8) or similar UTF-8 supporting locales in case of problems. You need to make sure the + selected locale is supported on the system the puppet agent runs on. + - Starting with community.general 9.1.0, you can use the value V(auto) and the module tries to determine the best parseable + locale to use. + type: str + default: C + version_added: 8.6.0 requirements: -- puppet + - puppet author: -- Monty Taylor (@emonty) -''' + - Monty Taylor (@emonty) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Run puppet agent and fail if anything goes wrong community.general.puppet: @@ -128,33 +160,37 @@ EXAMPLES = r''' - name: Run puppet using a specific tags community.general.puppet: tags: - - update - - nginx + - update + - nginx + skip_tags: + - service + +- name: Wait 30 seconds for any current puppet runs to finish + community.general.puppet: + waitforlock: 30 + +- name: Wait 5 minutes for any current puppet runs to finish + community.general.puppet: + waitforlock: 5m - name: Run puppet agent in noop mode community.general.puppet: - noop: yes + noop: true - name: Run a manifest with debug, log to both syslog and console, specify module path community.general.puppet: modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules logdest: all manifest: /var/lib/example/puppet_step_config.pp -''' +""" import json import os import stat +import ansible_collections.community.general.plugins.module_utils.puppet as puppet_utils + from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import shlex_quote - - -def _get_facter_dir(): - if os.getuid() == 0: - return '/etc/facter/facts.d' - else: - return os.path.expanduser('~/.facter/facts.d') def _write_structured_data(basedir, basename, data): @@ -179,22 +215,24 @@ def main(): puppetmaster=dict(type='str'), modulepath=dict(type='str'), manifest=dict(type='str'), + confdir=dict(type='str'), noop=dict(type='bool'), logdest=dict(type='str', default='stdout', choices=['all', 'stdout', 'syslog']), # The following is not related to Ansible's diff; see https://github.com/ansible-collections/community.general/pull/3980#issuecomment-1005666154 - show_diff=dict( - type='bool', default=False, aliases=['show-diff'], - deprecated_aliases=[dict(name='show-diff', version='7.0.0', collection_name='community.general')]), + show_diff=dict(type='bool', default=False), facts=dict(type='dict'), facter_basename=dict(type='str', default='ansible'), environment=dict(type='str'), certname=dict(type='str'), tags=dict(type='list', elements='str'), + skip_tags=dict(type='list', elements='str'), execute=dict(type='str'), summarize=dict(type='bool', default=False), + waitforlock=dict(type='str'), debug=dict(type='bool', default=False), verbose=dict(type='bool', default=False), use_srv_records=dict(type='bool'), + environment_lang=dict(type='str', default='C'), ), supports_check_mode=True, mutually_exclusive=[ @@ -205,16 +243,6 @@ def main(): ) p = module.params - global PUPPET_CMD - PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin']) - - if not PUPPET_CMD: - module.fail_json( - msg="Could not find puppet. Please ensure it is installed.") - - global TIMEOUT_CMD - TIMEOUT_CMD = module.get_bin_path("timeout", False) - if p['manifest']: if not os.path.exists(p['manifest']): module.fail_json( @@ -223,94 +251,30 @@ def main(): # Check if puppet is disabled here if not p['manifest']: - rc, stdout, stderr = module.run_command( - PUPPET_CMD + " config print agent_disabled_lockfile") - if os.path.exists(stdout.strip()): - module.fail_json( - msg="Puppet agent is administratively disabled.", - disabled=True) - elif rc != 0: - module.fail_json( - msg="Puppet agent state could not be determined.") + puppet_utils.ensure_agent_enabled(module) if module.params['facts'] and not module.check_mode: _write_structured_data( - _get_facter_dir(), + puppet_utils.get_facter_dir(), module.params['facter_basename'], module.params['facts']) - if TIMEOUT_CMD: - base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict( - timeout_cmd=TIMEOUT_CMD, - timeout=shlex_quote(p['timeout']), - puppet_cmd=PUPPET_CMD) - else: - base_cmd = PUPPET_CMD + runner = puppet_utils.puppet_runner(module) if not p['manifest'] and not p['execute']: - cmd = ("%(base_cmd)s agent --onetime" - " --no-daemonize --no-usecacheonfailure --no-splay" - " --detailed-exitcodes --verbose --color 0") % dict(base_cmd=base_cmd) - if p['puppetmaster']: - cmd += " --server %s" % shlex_quote(p['puppetmaster']) - if p['show_diff']: - cmd += " --show_diff" - if p['environment']: - cmd += " --environment '%s'" % p['environment'] - if p['tags']: - cmd += " --tags '%s'" % ','.join(p['tags']) - if p['certname']: - cmd += " --certname='%s'" % p['certname'] - if module.check_mode: - cmd += " --noop" - elif 'noop' in p: - if p['noop']: - cmd += " --noop" - else: - cmd += " --no-noop" - if p['use_srv_records'] is not None: - if not p['use_srv_records']: - cmd += " --no-use_srv_records" - else: - cmd += " --use_srv_records" + args_order = "_agent_fixed puppetmaster show_diff confdir environment tags skip_tags certname noop use_srv_records waitforlock" + with runner(args_order) as ctx: + rc, stdout, stderr = ctx.run() else: - cmd = "%s apply --detailed-exitcodes " % base_cmd - if p['logdest'] == 'syslog': - cmd += "--logdest syslog " - if p['logdest'] == 'all': - cmd += " --logdest syslog --logdest console" - if p['modulepath']: - cmd += "--modulepath='%s'" % p['modulepath'] - if p['environment']: - cmd += "--environment '%s' " % p['environment'] - if p['certname']: - cmd += " --certname='%s'" % p['certname'] - if p['tags']: - cmd += " --tags '%s'" % ','.join(p['tags']) - if module.check_mode: - cmd += "--noop " - elif 'noop' in p: - if p['noop']: - cmd += " --noop" - else: - cmd += " --no-noop" - if p['execute']: - cmd += " --execute '%s'" % p['execute'] - else: - cmd += " %s" % shlex_quote(p['manifest']) - if p['summarize']: - cmd += " --summarize" - if p['debug']: - cmd += " --debug" - if p['verbose']: - cmd += " --verbose" - rc, stdout, stderr = module.run_command(cmd) + args_order = "_apply_fixed logdest modulepath environment certname tags skip_tags noop _execute summarize debug verbose waitforlock" + with runner(args_order) as ctx: + rc, stdout, stderr = ctx.run(_execute=[p['execute'], p['manifest']]) if rc == 0: # success module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr) elif rc == 1: - # rc==1 could be because it's disabled + # rc==1 could be because it is disabled # rc==1 could also mean there was a compilation failure disabled = "administratively disabled" in stdout if disabled: @@ -326,11 +290,11 @@ def main(): elif rc == 124: # timeout module.exit_json( - rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr) + rc=rc, msg="%s timed out" % ctx.cmd, stdout=stdout, stderr=stderr) else: # failure module.fail_json( - rc=rc, msg="%s failed with return code: %d" % (cmd, rc), + rc=rc, msg="%s failed with return code: %d" % (ctx.cmd, rc), stdout=stdout, stderr=stderr) diff --git a/plugins/modules/notification/pushbullet.py b/plugins/modules/pushbullet.py similarity index 67% rename from plugins/modules/notification/pushbullet.py rename to plugins/modules/pushbullet.py index 435fcf2fcb..6c0d0d8770 100644 --- a/plugins/modules/notification/pushbullet.py +++ b/plugins/modules/pushbullet.py @@ -1,65 +1,66 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" author: "Willy Barro (@willybarro)" -requirements: [ pushbullet.py ] +requirements: [pushbullet.py] module: pushbullet short_description: Sends notifications to Pushbullet description: - - This module sends push notifications via Pushbullet to channels or devices. + - This module sends push notifications through Pushbullet to channels or devices. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - api_key: - type: str - description: - - Push bullet API token - required: true - channel: - type: str - description: - - The channel TAG you wish to broadcast a push notification, - as seen on the "My Channels" > "Edit your channel" at - Pushbullet page. - device: - type: str - description: - - The device NAME you wish to send a push notification, - as seen on the Pushbullet main page. - push_type: - type: str - description: - - Thing you wish to push. - default: note - choices: [ "note", "link" ] - title: - type: str - description: - - Title of the notification. - required: true - body: - type: str - description: - - Body of the notification, e.g. Details of the fault you're alerting. - url: - type: str - description: - - URL field, used when I(push_type) is C(link). - + api_key: + type: str + description: + - Push bullet API token. + required: true + channel: + type: str + description: + - The channel TAG you wish to broadcast a push notification, as seen on the "My Channels" > "Edit your channel" at Pushbullet + page. + device: + type: str + description: + - The device NAME you wish to send a push notification, as seen on the Pushbullet main page. + push_type: + type: str + description: + - Thing you wish to push. + default: note + choices: ["note", "link"] + title: + type: str + description: + - Title of the notification. + required: true + body: + type: str + description: + - Body of the notification, for example details of the fault you are alerting. + url: + type: str + description: + - URL field, used when O(push_type=link). notes: - - Requires pushbullet.py Python package on the remote host. - You can install it via pip with ($ pip install pushbullet.py). - See U(https://github.com/randomchars/pushbullet.py) -''' + - Requires C(pushbullet.py) Python package on the remote host. You can install it through C(pip) with C(pip install pushbullet.py). + - See U(https://github.com/randomchars/pushbullet.py). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Sends a push notification to a device community.general.pushbullet: api_key: "ABC123abc123ABC123abc123ABC123ab" @@ -78,7 +79,7 @@ EXAMPLES = ''' community.general.pushbullet: api_key: ABC123abc123ABC123abc123ABC123ab channel: my-awesome-channel - title: Broadcasting a message to the #my-awesome-channel folks + title: "Broadcasting a message to the #my-awesome-channel folks" - name: Sends a push notification with title and body to a channel community.general.pushbullet: @@ -86,7 +87,7 @@ EXAMPLES = ''' channel: my-awesome-channel title: ALERT! Signup service is down body: Error rate on signup service is over 90% for more than 2 minutes -''' +""" import traceback @@ -111,12 +112,12 @@ def main(): module = AnsibleModule( argument_spec=dict( api_key=dict(type='str', required=True, no_log=True), - channel=dict(type='str', default=None), - device=dict(type='str', default=None), + channel=dict(type='str'), + device=dict(type='str'), push_type=dict(type='str', default="note", choices=['note', 'link']), title=dict(type='str', required=True), - body=dict(type='str', default=None), - url=dict(type='str', default=None), + body=dict(type='str'), + url=dict(type='str'), ), mutually_exclusive=( ['channel', 'device'], diff --git a/plugins/modules/notification/pushover.py b/plugins/modules/pushover.py similarity index 81% rename from plugins/modules/notification/pushover.py rename to plugins/modules/pushover.py index 7f73592a36..483eeae863 100644 --- a/plugins/modules/notification/pushover.py +++ b/plugins/modules/pushover.py @@ -1,23 +1,26 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2012, Jim Richardson # Copyright (c) 2019, Bernd Arnold -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: pushover -short_description: Send notifications via U(https://pushover.net) +short_description: Send notifications through U(https://pushover.net) description: - - Send notifications via pushover, to subscriber list of devices, and email - addresses. Requires pushover app on devices. + - Send notifications through pushover to subscriber list of devices and email addresses. Requires pushover app on devices. notes: - - You will require a pushover.net account to use this module. But no account - is required to receive messages. + - You need a pushover.net account to use this module. But no account is required to receive messages. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: msg: type: str @@ -45,7 +48,7 @@ options: - Message priority (see U(https://pushover.net) for details). required: false default: '0' - choices: [ '-2', '-1', '0', '1', '2' ] + choices: ['-2', '-1', '0', '1', '2'] device: type: str description: @@ -56,9 +59,9 @@ options: author: - "Jim Richardson (@weaselkeeper)" - "Bernd Arnold (@wopfel)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Send notifications via pushover.net community.general.pushover: msg: '{{ inventory_hostname }} is acting strange ...' @@ -82,10 +85,10 @@ EXAMPLES = ''' user_key: baa5fe97f2c5ab3ca8f0bb59 device: admins-iPhone delegate_to: localhost -''' +""" +from urllib.parse import urlencode from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url @@ -135,7 +138,7 @@ def main(): msg=dict(required=True), app_token=dict(required=True, no_log=True), user_key=dict(required=True, no_log=True), - pri=dict(required=False, default='0', choices=['-2', '-1', '0', '1', '2']), + pri=dict(default='0', choices=['-2', '-1', '0', '1', '2']), device=dict(type='str'), ), ) diff --git a/plugins/modules/system/python_requirements_info.py b/plugins/modules/python_requirements_info.py similarity index 83% rename from plugins/modules/system/python_requirements_info.py rename to plugins/modules/python_requirements_info.py index dc0e0a44cc..5409b848e4 100644 --- a/plugins/modules/system/python_requirements_info.py +++ b/plugins/modules/python_requirements_info.py @@ -1,33 +1,34 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: python_requirements_info short_description: Show python path and assert dependency versions description: - Get info about available Python requirements on the target host, including listing required libraries and gathering versions. - - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module options: dependencies: type: list elements: str - description: > - A list of version-likes or module names to check for installation. - Supported operators: <, >, <=, >=, or ==. The bare module name like - I(ansible), the module with a specific version like I(boto3==1.6.1), or a - partial version like I(requests>2) are all valid specifications. + description: + - 'A list of version-likes or module names to check for installation. Supported operators: C(<), C(>), C(<=), C(>=), + or C(==).' + - The bare module name like V(ansible), the module with a specific version like V(boto3==1.6.1), or a partial version + like V(requests>2) are all valid specifications. default: [] author: - Will Thames (@willthames) - Ryan Scott Brown (@ryansb) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Show python lib/site paths community.general.python_requirements_info: @@ -36,21 +37,21 @@ EXAMPLES = ''' dependencies: - boto3>1.6 - botocore<2 -''' +""" -RETURN = ''' +RETURN = r""" python: - description: path to python version used + description: Path to the Python interpreter used. returned: always type: str sample: /usr/local/opt/python@2/bin/python2.7 python_version: - description: version of python + description: Version of Python. returned: always type: str sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]" python_version_info: - description: breakdown version of python + description: Breakdown version of Python. returned: always type: dict contains: @@ -81,25 +82,26 @@ python_version_info: sample: 0 version_added: 4.2.0 python_system_path: - description: List of paths python is looking for modules in + description: List of paths Python is looking for modules in. returned: always type: list sample: - /usr/local/opt/python@2/site-packages/ - /usr/lib/python/site-packages/ valid: - description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null + description: A dictionary of dependencies that matched their desired versions. If no version was specified, then RV(ignore:desired) + is V(null). returned: always type: dict sample: boto3: - desired: null + desired: installed: 1.7.60 botocore: desired: botocore<2 installed: 1.10.60 mismatched: - description: A dictionary of dependencies that did not satisfy the desired version + description: A dictionary of dependencies that did not satisfy the desired version. returned: always type: dict sample: @@ -107,13 +109,13 @@ mismatched: desired: botocore>2 installed: 1.10.60 not_found: - description: A list of packages that could not be imported at all, and are not installed + description: A list of packages that could not be imported at all, and are not installed. returned: always type: list sample: - boto4 - requests -''' +""" import re import sys diff --git a/plugins/modules/files/read_csv.py b/plugins/modules/read_csv.py similarity index 69% rename from plugins/modules/files/read_csv.py rename to plugins/modules/read_csv.py index 484a365e4c..e195029d03 100644 --- a/plugins/modules/files/read_csv.py +++ b/plugins/modules/read_csv.py @@ -1,73 +1,79 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Dag Wieers (@dagwieers) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: read_csv short_description: Read a CSV file description: -- Read a CSV file and return a list or a dictionary, containing one dictionary per row. + - Read a CSV file and return a list or a dictionary, containing one dictionary per row. author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: path: description: - - The CSV filename to read data from. + - The CSV filename to read data from. type: path - required: yes - aliases: [ filename ] + required: true + aliases: [filename] key: description: - - The column name used as a key for the resulting dictionary. - - If C(key) is unset, the module returns a list of dictionaries, - where each dictionary is a row in the CSV file. + - The column name used as a key for the resulting dictionary. + - If O(key) is unset, the module returns a list of dictionaries, where each dictionary is a row in the CSV file. type: str dialect: description: - - The CSV dialect to use when parsing the CSV file. - - Possible values include C(excel), C(excel-tab) or C(unix). + - The CSV dialect to use when parsing the CSV file. + - Possible values include V(excel), V(excel-tab) or V(unix). type: str default: excel fieldnames: description: - - A list of field names for every column. - - This is needed if the CSV does not have a header. + - A list of field names for every column. + - This is needed if the CSV does not have a header. type: list elements: str unique: description: - - Whether the C(key) used is expected to be unique. + - Whether the O(key) used is expected to be unique. type: bool - default: yes + default: true delimiter: description: - - A one-character string used to separate fields. - - When using this parameter, you change the default value used by I(dialect). - - The default value depends on the dialect used. + - A one-character string used to separate fields. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. type: str skipinitialspace: description: - - Whether to ignore any whitespaces immediately following the delimiter. - - When using this parameter, you change the default value used by I(dialect). - - The default value depends on the dialect used. + - Whether to ignore any whitespaces immediately following the delimiter. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. type: bool strict: description: - - Whether to raise an exception on bad CSV input. - - When using this parameter, you change the default value used by I(dialect). - - The default value depends on the dialect used. + - Whether to raise an exception on bad CSV input. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. type: bool -notes: -- Ansible also ships with the C(csvfile) lookup plugin, which can be used to do selective lookups in CSV files from Jinja. -''' +seealso: + - plugin: ansible.builtin.csvfile + plugin_type: lookup + description: Can be used to do selective lookups in CSV files from Jinja. +""" -EXAMPLES = r''' +EXAMPLES = r""" # Example CSV file with header # # name,uid,gid @@ -108,9 +114,9 @@ EXAMPLES = r''' delimiter: ';' register: users delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" dict: description: The CSV content as a dictionary. returned: success @@ -129,13 +135,13 @@ list: returned: success type: list sample: - - name: dag - uid: 500 - gid: 500 - - name: jeroen - uid: 501 - gid: 500 -''' + - name: dag + uid: 500 + gid: 500 + - name: jeroen + uid: 501 + gid: 500 +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native diff --git a/plugins/modules/redfish_command.py b/plugins/modules/redfish_command.py new file mode 100644 index 0000000000..736d38d6c4 --- /dev/null +++ b/plugins/modules/redfish_command.py @@ -0,0 +1,1159 @@ +#!/usr/bin/python + +# Copyright (c) 2017-2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: redfish_command +short_description: Manages Out-Of-Band controllers using Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action. + - Manages OOB controller ex. reboot, log management. + - Manages OOB controller users ex. add, remove, update. + - Manages system power ex. on, off, graceful and forced reboot. +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + username: + description: + - Username for authenticating to OOB controller. + type: str + password: + description: + - Password for authenticating to OOB controller. + type: str + auth_token: + description: + - Security token for authenticating to OOB controller. + type: str + version_added: 2.3.0 + session_uri: + description: + - URI of the session resource. + type: str + version_added: 2.3.0 + id: + required: false + aliases: [account_id] + description: + - ID of account to delete/modify. + - Can also be used in account creation to work around vendor issues where the ID of the new user is required in the + POST request. + type: str + new_username: + required: false + aliases: [account_username] + description: + - Username of account to add/delete/modify. + type: str + new_password: + required: false + aliases: [account_password] + description: + - New password of account to add/modify. + type: str + roleid: + required: false + aliases: [account_roleid] + description: + - Role of account to add/modify. + type: str + account_types: + required: false + aliases: [account_accounttypes] + description: + - Array of account types to apply to a user account. + type: list + elements: str + version_added: '7.2.0' + oem_account_types: + required: false + aliases: [account_oemaccounttypes] + description: + - Array of OEM account types to apply to a user account. + type: list + elements: str + version_added: '7.2.0' + bootdevice: + required: false + description: + - Boot device when setting boot configuration. + type: str + timeout: + description: + - Timeout in seconds for HTTP requests to OOB controller. + - The default value for this parameter changed from V(10) to V(60) in community.general 9.0.0. + type: int + default: 60 + boot_override_mode: + description: + - Boot mode when using an override. + type: str + choices: [Legacy, UEFI] + version_added: 3.5.0 + uefi_target: + required: false + description: + - UEFI boot target when bootdevice is "UefiTarget". + type: str + boot_next: + required: false + description: + - BootNext target when bootdevice is "UefiBootNext". + type: str + update_username: + required: false + aliases: [account_updatename] + description: + - New user name for updating account_username. + type: str + version_added: '0.2.0' + account_properties: + required: false + description: + - Properties of account service to update. + type: dict + default: {} + version_added: '0.2.0' + resource_id: + required: false + description: + - ID of the System, Manager or Chassis to modify. + type: str + version_added: '0.2.0' + update_image_uri: + required: false + description: + - URI of the image for the update. + type: str + version_added: '0.2.0' + update_image_file: + required: false + description: + - Filename, with optional path, of the image for the update. + type: path + version_added: '7.1.0' + update_protocol: + required: false + description: + - Protocol for the update. + type: str + version_added: '0.2.0' + update_targets: + required: false + description: + - List of target resource URIs to apply the update to. + type: list + elements: str + default: [] + version_added: '0.2.0' + update_creds: + required: false + description: + - Credentials for retrieving the update image. + type: dict + version_added: '0.2.0' + suboptions: + username: + required: false + description: + - Username for retrieving the update image. + type: str + password: + required: false + description: + - Password for retrieving the update image. + type: str + update_apply_time: + required: false + description: + - Time when to apply the update. + type: str + choices: + - Immediate + - OnReset + - AtMaintenanceWindowStart + - InMaintenanceWindowOnReset + - OnStartUpdateRequest + version_added: '6.1.0' + update_oem_params: + required: false + description: + - Properties for HTTP Multipart Push Updates. + type: dict + version_added: '7.5.0' + update_handle: + required: false + description: + - Handle to check the status of an update in progress. + type: str + version_added: '6.1.0' + update_custom_oem_header: + required: false + description: + - Optional OEM header, sent as separate form-data for the Multipart HTTP push update. + - The header shall start with "Oem" according to DMTF Redfish spec 12.6.2.2. + - For more details, see U(https://www.dmtf.org/sites/default/files/standards/documents/DSP0266_1.21.0.html). + - If set, then O(update_custom_oem_params) is required too. + type: str + version_added: '10.1.0' + update_custom_oem_params: + required: false + description: + - Custom OEM properties for HTTP Multipart Push updates. + - If set, then O(update_custom_oem_header) is required too. + - The properties are passed raw without any validation or conversion by Ansible. This means the content can be a file, + a string, or any other data. If the content is a dictionary that should be converted to JSON, then the content must + be converted to JSON before passing it to this module using the P(ansible.builtin.to_json#filter) filter. + type: raw + version_added: '10.1.0' + update_custom_oem_mime_type: + required: false + description: + - MIME Type for custom OEM properties for HTTP Multipart Push updates. + type: str + version_added: '10.1.0' + virtual_media: + required: false + description: + - Options for VirtualMedia commands. + type: dict + version_added: '0.2.0' + suboptions: + media_types: + required: false + description: + - List of media types appropriate for the image. + type: list + elements: str + default: [] + image_url: + required: false + description: + - URL of the image to insert or eject. + type: str + inserted: + required: false + description: + - Indicates that the image is treated as inserted on command completion. + type: bool + default: true + write_protected: + required: false + description: + - Indicates that the media is treated as write-protected. + type: bool + default: true + username: + required: false + description: + - Username for accessing the image URL. + type: str + password: + required: false + description: + - Password for accessing the image URL. + type: str + transfer_protocol_type: + required: false + description: + - Network protocol to use with the image. + type: str + transfer_method: + required: false + description: + - Transfer method to use with the image. + type: str + strip_etag_quotes: + description: + - Removes surrounding quotes of etag used in C(If-Match) header of C(PATCH) requests. + - Only use this option to resolve bad vendor implementation where C(If-Match) only matches the unquoted etag string. + type: bool + default: false + version_added: 3.7.0 + bios_attributes: + required: false + description: + - BIOS attributes that needs to be verified in the given server. + type: dict + version_added: 6.4.0 + reset_to_defaults_mode: + description: + - Mode to apply when reseting to default. + type: str + choices: [ResetAll, PreserveNetworkAndUsers, PreserveNetwork] + version_added: 8.6.0 + wait: + required: false + description: + - Block until the service is ready again. + type: bool + default: false + version_added: 9.1.0 + wait_timeout: + required: false + description: + - How long to block until the service is ready again before giving up. + type: int + default: 120 + version_added: 9.1.0 + ciphers: + version_added: 9.2.0 + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + +author: + - "Jose Delarosa (@jose-delarosa)" + - "T S Kushal (@TSKushal)" +""" + +EXAMPLES = r""" +- name: Restart system power gracefully + community.general.redfish_command: + category: Systems + command: PowerGracefulRestart + resource_id: 437XR1138R2 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Turn system power off + community.general.redfish_command: + category: Systems + command: PowerForceOff + resource_id: 437XR1138R2 + +- name: Restart system power forcefully + community.general.redfish_command: + category: Systems + command: PowerForceRestart + resource_id: 437XR1138R2 + +- name: Shutdown system power gracefully + community.general.redfish_command: + category: Systems + command: PowerGracefulShutdown + resource_id: 437XR1138R2 + +- name: Turn system power on + community.general.redfish_command: + category: Systems + command: PowerOn + resource_id: 437XR1138R2 + +- name: Reboot system power + community.general.redfish_command: + category: Systems + command: PowerReboot + resource_id: 437XR1138R2 + +- name: Set one-time boot device to {{ bootdevice }} + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + resource_id: 437XR1138R2 + bootdevice: "{{ bootdevice }}" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set one-time boot device to UefiTarget of "/0x31/0x33/0x01/0x01" + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + resource_id: 437XR1138R2 + bootdevice: "UefiTarget" + uefi_target: "/0x31/0x33/0x01/0x01" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set one-time boot device to BootNext target of "Boot0001" + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + resource_id: 437XR1138R2 + bootdevice: "UefiBootNext" + boot_next: "Boot0001" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set persistent boot device override + community.general.redfish_command: + category: Systems + command: EnableContinuousBootOverride + resource_id: 437XR1138R2 + bootdevice: "{{ bootdevice }}" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set one-time boot to BiosSetup + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + boot_next: BiosSetup + boot_override_mode: Legacy + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Disable persistent boot device override + community.general.redfish_command: + category: Systems + command: DisableBootOverride + +- name: Set system indicator LED to blink using security token for auth + community.general.redfish_command: + category: Systems + command: IndicatorLedBlink + resource_id: 437XR1138R2 + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + +- name: Add user + community.general.redfish_command: + category: Accounts + command: AddUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + new_username: "{{ new_username }}" + new_password: "{{ new_password }}" + roleid: "{{ roleid }}" + +- name: Add user with specified account types + community.general.redfish_command: + category: Accounts + command: AddUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + new_username: "{{ new_username }}" + new_password: "{{ new_password }}" + roleid: "{{ roleid }}" + account_types: + - Redfish + - WebUI + +- name: Add user using new option aliases + community.general.redfish_command: + category: Accounts + command: AddUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_password: "{{ account_password }}" + account_roleid: "{{ account_roleid }}" + +- name: Delete user + community.general.redfish_command: + category: Accounts + command: DeleteUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + +- name: Disable user + community.general.redfish_command: + category: Accounts + command: DisableUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + +- name: Enable user + community.general.redfish_command: + category: Accounts + command: EnableUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + +- name: Add and enable user + community.general.redfish_command: + category: Accounts + command: AddUser,EnableUser + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + new_username: "{{ new_username }}" + new_password: "{{ new_password }}" + roleid: "{{ roleid }}" + +- name: Update user password + community.general.redfish_command: + category: Accounts + command: UpdateUserPassword + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_password: "{{ account_password }}" + +- name: Update user role + community.general.redfish_command: + category: Accounts + command: UpdateUserRole + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + roleid: "{{ roleid }}" + +- name: Update user name + community.general.redfish_command: + category: Accounts + command: UpdateUserName + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_updatename: "{{ account_updatename }}" + +- name: Update user name + community.general.redfish_command: + category: Accounts + command: UpdateUserName + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + update_username: "{{ update_username }}" + +- name: Update AccountService properties + community.general.redfish_command: + category: Accounts + command: UpdateAccountServiceProperties + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_properties: + AccountLockoutThreshold: 5 + AccountLockoutDuration: 600 + +- name: Update user AccountTypes + community.general.redfish_command: + category: Accounts + command: UpdateUserAccountTypes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + account_username: "{{ account_username }}" + account_types: + - Redfish + - WebUI + +- name: Clear Manager Logs with a timeout of 20 seconds + community.general.redfish_command: + category: Manager + command: ClearLogs + resource_id: BMC + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + +- name: Create session + community.general.redfish_command: + category: Sessions + command: CreateSession + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Set chassis indicator LED to blink using security token for auth + community.general.redfish_command: + category: Chassis + command: IndicatorLedBlink + resource_id: 1U + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + +- name: Delete session using security token created by CreateSesssion above + community.general.redfish_command: + category: Sessions + command: DeleteSession + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + session_uri: "{{ result.session.uri }}" + +- name: Clear Sessions + community.general.redfish_command: + category: Sessions + command: ClearSessions + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Simple update + community.general.redfish_command: + category: Update + command: SimpleUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_image_uri: https://example.com/myupdate.img + +- name: Simple update with additional options + community.general.redfish_command: + category: Update + command: SimpleUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_image_uri: //example.com/myupdate.img + update_protocol: FTP + update_targets: + - /redfish/v1/UpdateService/FirmwareInventory/BMC + update_creds: + username: operator + password: supersecretpwd + +- name: Multipart HTTP push update; timeout is 600 seconds to allow for a large image transfer + community.general.redfish_command: + category: Update + command: MultipartHTTPPushUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 600 + update_image_file: ~/images/myupdate.img + +- name: Multipart HTTP push with additional options; timeout is 600 seconds to allow for a large image transfer + community.general.redfish_command: + category: Update + command: MultipartHTTPPushUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 600 + update_image_file: ~/images/myupdate.img + update_targets: + - /redfish/v1/UpdateService/FirmwareInventory/BMC + update_oem_params: + PreserveConfiguration: false + +- name: Multipart HTTP push with custom OEM options + vars: + oem_payload: + ImageType: BMC + community.general.redfish_command: + category: Update + command: MultipartHTTPPushUpdate + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_image_file: ~/images/myupdate.img + update_targets: + - /redfish/v1/UpdateService/FirmwareInventory/BMC + update_custom_oem_header: OemParameters + update_custom_oem_mime_type: "application/json" + update_custom_oem_params: "{{ oem_payload | to_json }}" + +- name: Perform requested operations to continue the update + community.general.redfish_command: + category: Update + command: PerformRequestedOperations + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_handle: /redfish/v1/TaskService/TaskMonitors/735 + +- name: Insert Virtual Media + community.general.redfish_command: + category: Systems + command: VirtualMediaInsert + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + media_types: + - CD + - DVD + resource_id: 1 + +- name: Insert Virtual Media + community.general.redfish_command: + category: Manager + command: VirtualMediaInsert + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + media_types: + - CD + - DVD + resource_id: BMC + +- name: Eject Virtual Media + community.general.redfish_command: + category: Systems + command: VirtualMediaEject + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + resource_id: 1 + +- name: Eject Virtual Media + community.general.redfish_command: + category: Manager + command: VirtualMediaEject + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: 'http://example.com/images/SomeLinux-current.iso' + resource_id: BMC + +- name: Restart manager power gracefully + community.general.redfish_command: + category: Manager + command: GracefulRestart + resource_id: BMC + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Restart manager power gracefully and wait for it to be available + community.general.redfish_command: + category: Manager + command: GracefulRestart + resource_id: BMC + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + wait: true + +- name: Restart manager power gracefully + community.general.redfish_command: + category: Manager + command: PowerGracefulRestart + resource_id: BMC + +- name: Turn manager power off + community.general.redfish_command: + category: Manager + command: PowerForceOff + resource_id: BMC + +- name: Restart manager power forcefully + community.general.redfish_command: + category: Manager + command: PowerForceRestart + resource_id: BMC + +- name: Shutdown manager power gracefully + community.general.redfish_command: + category: Manager + command: PowerGracefulShutdown + resource_id: BMC + +- name: Turn manager power on + community.general.redfish_command: + category: Manager + command: PowerOn + resource_id: BMC + +- name: Reboot manager power + community.general.redfish_command: + category: Manager + command: PowerReboot + resource_id: BMC + +- name: Factory reset manager to defaults + community.general.redfish_command: + category: Manager + command: ResetToDefaults + resource_id: BMC + reset_to_defaults_mode: ResetAll + +- name: Verify BIOS attributes + community.general.redfish_command: + category: Systems + command: VerifyBiosAttributes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + bios_attributes: + SubNumaClustering: "Disabled" + WorkloadProfile: "Virtualization-MaxPerformance" +""" + +RETURN = r""" +msg: + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +return_values: + description: Dictionary containing command-specific response data from the action. + returned: on success + type: dict + version_added: 6.1.0 + sample: + { + "update_status": { + "handle": "/redfish/v1/TaskService/TaskMonitors/735", + "messages": [], + "resets_requested": [], + "ret": true, + "status": "New" + } + } +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC +from ansible.module_utils.common.text.converters import to_native + + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart", + "PowerGracefulShutdown", "PowerReboot", "PowerCycle", "PowerFullPowerCycle", + "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride", + "IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink", "VirtualMediaInsert", + "VirtualMediaEject", "VerifyBiosAttributes"], + "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"], + "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser", + "UpdateUserRole", "UpdateUserPassword", "UpdateUserName", + "UpdateUserAccountTypes", "UpdateAccountServiceProperties"], + "Sessions": ["ClearSessions", "CreateSession", "DeleteSession"], + "Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert", + "ResetToDefaults", + "VirtualMediaEject", "PowerOn", "PowerForceOff", "PowerForceRestart", + "PowerGracefulRestart", "PowerGracefulShutdown", "PowerReboot"], + "Update": ["SimpleUpdate", "MultipartHTTPPushUpdate", "PerformRequestedOperations"], +} + + +def main(): + result = {} + return_values = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + session_uri=dict(), + id=dict(aliases=["account_id"]), + new_username=dict(aliases=["account_username"]), + new_password=dict(aliases=["account_password"], no_log=True), + roleid=dict(aliases=["account_roleid"]), + account_types=dict(type='list', elements='str', aliases=["account_accounttypes"]), + oem_account_types=dict(type='list', elements='str', aliases=["account_oemaccounttypes"]), + update_username=dict(type='str', aliases=["account_updatename"]), + account_properties=dict(type='dict', default={}), + bootdevice=dict(), + timeout=dict(type='int', default=60), + uefi_target=dict(), + boot_next=dict(), + boot_override_mode=dict(choices=['Legacy', 'UEFI']), + resource_id=dict(), + update_image_uri=dict(), + update_image_file=dict(type='path'), + update_protocol=dict(), + update_targets=dict(type='list', elements='str', default=[]), + update_oem_params=dict(type='dict'), + update_custom_oem_header=dict(type='str'), + update_custom_oem_mime_type=dict(type='str'), + update_custom_oem_params=dict(type='raw'), + update_creds=dict( + type='dict', + options=dict( + username=dict(), + password=dict(no_log=True) + ) + ), + update_apply_time=dict(choices=['Immediate', 'OnReset', 'AtMaintenanceWindowStart', + 'InMaintenanceWindowOnReset', 'OnStartUpdateRequest']), + update_handle=dict(), + virtual_media=dict( + type='dict', + options=dict( + media_types=dict(type='list', elements='str', default=[]), + image_url=dict(), + inserted=dict(type='bool', default=True), + write_protected=dict(type='bool', default=True), + username=dict(), + password=dict(no_log=True), + transfer_protocol_type=dict(), + transfer_method=dict(), + ) + ), + strip_etag_quotes=dict(type='bool', default=False), + reset_to_defaults_mode=dict(choices=['ResetAll', 'PreserveNetworkAndUsers', 'PreserveNetwork']), + bios_attributes=dict(type="dict"), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=120), + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, + required_together=[ + ('username', 'password'), + ('update_custom_oem_header', 'update_custom_oem_params'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # user to add/modify/delete + user = { + 'account_id': module.params['id'], + 'account_username': module.params['new_username'], + 'account_password': module.params['new_password'], + 'account_roleid': module.params['roleid'], + 'account_accounttypes': module.params['account_types'], + 'account_oemaccounttypes': module.params['oem_account_types'], + 'account_updatename': module.params['update_username'], + 'account_properties': module.params['account_properties'], + 'account_passwordchangerequired': None, + } + + # timeout + timeout = module.params['timeout'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # update options + update_opts = { + 'update_image_uri': module.params['update_image_uri'], + 'update_image_file': module.params['update_image_file'], + 'update_protocol': module.params['update_protocol'], + 'update_targets': module.params['update_targets'], + 'update_creds': module.params['update_creds'], + 'update_apply_time': module.params['update_apply_time'], + 'update_oem_params': module.params['update_oem_params'], + 'update_custom_oem_header': module.params['update_custom_oem_header'], + 'update_custom_oem_params': module.params['update_custom_oem_params'], + 'update_custom_oem_mime_type': module.params['update_custom_oem_mime_type'], + 'update_handle': module.params['update_handle'], + } + + # Boot override options + boot_opts = { + 'bootdevice': module.params['bootdevice'], + 'uefi_target': module.params['uefi_target'], + 'boot_next': module.params['boot_next'], + 'boot_override_mode': module.params['boot_override_mode'], + } + + # VirtualMedia options + virtual_media = module.params['virtual_media'] + + # Etag options + strip_etag_quotes = module.params['strip_etag_quotes'] + + # BIOS Attributes options + bios_attributes = module.params['bios_attributes'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = RedfishUtils(creds, root_uri, timeout, module, + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Accounts": + ACCOUNTS_COMMANDS = { + "AddUser": rf_utils.add_user, + "EnableUser": rf_utils.enable_user, + "DeleteUser": rf_utils.delete_user, + "DisableUser": rf_utils.disable_user, + "UpdateUserRole": rf_utils.update_user_role, + "UpdateUserPassword": rf_utils.update_user_password, + "UpdateUserName": rf_utils.update_user_name, + "UpdateUserAccountTypes": rf_utils.update_user_accounttypes, + "UpdateAccountServiceProperties": rf_utils.update_accountservice_properties + } + + # execute only if we find an Account service resource + result = rf_utils._find_accountservice_resource() + if result['ret'] is False: + # If a password change is required and the user is attempting to + # modify their password, try to proceed. + user['account_passwordchangerequired'] = rf_utils.check_password_change_required(result) + if len(command_list) == 1 and command_list[0] == "UpdateUserPassword" and user['account_passwordchangerequired']: + result = rf_utils.update_user_password(user) + else: + module.fail_json(msg=to_native(result['msg'])) + else: + for command in command_list: + result = ACCOUNTS_COMMANDS[command](user) + + elif category == "Systems": + # execute only if we find a System resource + result = rf_utils._find_systems_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command.startswith('Power'): + result = rf_utils.manage_system_power(command) + elif command == "SetOneTimeBoot": + boot_opts['override_enabled'] = 'Once' + result = rf_utils.set_boot_override(boot_opts) + elif command == "EnableContinuousBootOverride": + boot_opts['override_enabled'] = 'Continuous' + result = rf_utils.set_boot_override(boot_opts) + elif command == "DisableBootOverride": + boot_opts['override_enabled'] = 'Disabled' + result = rf_utils.set_boot_override(boot_opts) + elif command.startswith('IndicatorLed'): + result = rf_utils.manage_system_indicator_led(command) + elif command == 'VirtualMediaInsert': + result = rf_utils.virtual_media_insert(virtual_media, category) + elif command == 'VirtualMediaEject': + result = rf_utils.virtual_media_eject(virtual_media, category) + elif command == 'VerifyBiosAttributes': + result = rf_utils.verify_bios_attributes(bios_attributes) + + elif category == "Chassis": + result = rf_utils._find_chassis_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + led_commands = ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"] + + # Check if more than one led_command is present + num_led_commands = sum([command in led_commands for command in command_list]) + if num_led_commands > 1: + result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."} + else: + for command in command_list: + if command in led_commands: + result = rf_utils.manage_chassis_indicator_led(command) + + elif category == "Sessions": + # execute only if we find SessionService resources + resource = rf_utils._find_sessionservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "ClearSessions": + result = rf_utils.clear_sessions() + elif command == "CreateSession": + result = rf_utils.create_session() + elif command == "DeleteSession": + result = rf_utils.delete_session(module.params['session_uri']) + + elif category == "Manager": + # execute only if we find a Manager service resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + # standardize on the Power* commands, but allow the legacy + # GracefulRestart command + if command == 'GracefulRestart': + command = 'PowerGracefulRestart' + + if command.startswith('Power'): + result = rf_utils.manage_manager_power(command, module.params['wait'], module.params['wait_timeout']) + elif command == 'ClearLogs': + result = rf_utils.clear_logs() + elif command == 'VirtualMediaInsert': + result = rf_utils.virtual_media_insert(virtual_media, category) + elif command == 'VirtualMediaEject': + result = rf_utils.virtual_media_eject(virtual_media, category) + elif command == 'ResetToDefaults': + result = rf_utils.manager_reset_to_defaults(module.params['reset_to_defaults_mode']) + + elif category == "Update": + # execute only if we find UpdateService resources + resource = rf_utils._find_updateservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "SimpleUpdate": + result = rf_utils.simple_update(update_opts) + if 'update_status' in result: + return_values['update_status'] = result['update_status'] + elif command == "MultipartHTTPPushUpdate": + result = rf_utils.multipath_http_push_update(update_opts) + if 'update_status' in result: + return_values['update_status'] = result['update_status'] + elif command == "PerformRequestedOperations": + result = rf_utils.perform_requested_update_operations(update_opts['update_handle']) + + # Return data back or fail with proper message + if result['ret'] is True: + del result['ret'] + changed = result.get('changed', True) + session = result.get('session', dict()) + module.exit_json(changed=changed, session=session, + msg='Action was successful', + return_values=return_values) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/redfish_config.py b/plugins/modules/redfish_config.py new file mode 100644 index 0000000000..a804baab8e --- /dev/null +++ b/plugins/modules/redfish_config.py @@ -0,0 +1,588 @@ +#!/usr/bin/python + +# Copyright (c) 2017-2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: redfish_config +short_description: Manages Out-Of-Band controllers using Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to set or update a configuration attribute. + - Manages BIOS configuration settings. + - Manages OOB controller configuration settings. +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + username: + description: + - Username for authenticating to OOB controller. + type: str + password: + description: + - Password for authenticating to OOB controller. + type: str + auth_token: + description: + - Security token for authenticating to OOB controller. + type: str + version_added: 2.3.0 + bios_attributes: + required: false + description: + - Dictionary of BIOS attributes to update. + default: {} + type: dict + version_added: '0.2.0' + timeout: + description: + - Timeout in seconds for HTTP requests to OOB controller. + - The default value for this parameter changed from V(10) to V(60) in community.general 9.0.0. + type: int + default: 60 + boot_order: + required: false + description: + - List of BootOptionReference strings specifying the BootOrder. + default: [] + type: list + elements: str + version_added: '0.2.0' + network_protocols: + required: false + description: + - Setting dict of manager services to update. + type: dict + default: {} + version_added: '0.2.0' + resource_id: + required: false + description: + - ID of the System, Manager or Chassis to modify. + type: str + version_added: '0.2.0' + service_id: + required: false + description: + - ID of the manager to update. + type: str + version_added: '8.4.0' + nic_addr: + required: false + description: + - EthernetInterface Address string on OOB controller. + default: 'null' + type: str + version_added: '0.2.0' + nic_config: + required: false + description: + - Setting dict of EthernetInterface on OOB controller. + type: dict + default: {} + version_added: '0.2.0' + strip_etag_quotes: + description: + - Removes surrounding quotes of etag used in C(If-Match) header of C(PATCH) requests. + - Only use this option to resolve bad vendor implementation where C(If-Match) only matches the unquoted etag string. + type: bool + default: false + version_added: 3.7.0 + hostinterface_config: + required: false + description: + - Setting dict of HostInterface on OOB controller. + type: dict + default: {} + version_added: '4.1.0' + hostinterface_id: + required: false + description: + - Redfish HostInterface instance ID if multiple HostInterfaces are present. + type: str + version_added: '4.1.0' + sessions_config: + required: false + description: + - Setting dict of Sessions. + type: dict + default: {} + version_added: '5.7.0' + storage_subsystem_id: + required: false + description: + - ID of the Storage Subsystem on which the volume is to be created. + type: str + default: '' + version_added: '7.3.0' + storage_none_volume_deletion: + required: false + description: + - Indicates if all non-RAID volumes are automatically deleted prior to creating the new volume. + type: bool + default: false + version_added: '9.5.0' + volume_ids: + required: false + description: + - List of IDs of volumes to be deleted. + type: list + default: [] + elements: str + version_added: '7.3.0' + secure_boot_enable: + required: false + description: + - Setting parameter to enable or disable SecureBoot. + type: bool + default: true + version_added: '7.5.0' + volume_details: + required: false + description: + - Setting dictionary of volume to be created. + - If C(CapacityBytes) key is not specified in this dictionary, the size of the volume is determined by the Redfish service. + It is possible the size is not the maximum available size. + type: dict + default: {} + version_added: '7.5.0' + power_restore_policy: + description: + - The desired power state of the system when power is restored after a power loss. + type: str + choices: + - AlwaysOn + - AlwaysOff + - LastState + version_added: '10.5.0' + ciphers: + version_added: 9.2.0 + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + +author: + - "Jose Delarosa (@jose-delarosa)" + - "T S Kushal (@TSKushal)" +""" + +EXAMPLES = r""" +- name: Set BootMode to UEFI + community.general.redfish_config: + category: Systems + command: SetBiosAttributes + resource_id: 437XR1138R2 + bios_attributes: + BootMode: "Uefi" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set multiple BootMode attributes + community.general.redfish_config: + category: Systems + command: SetBiosAttributes + resource_id: 437XR1138R2 + bios_attributes: + BootMode: "Bios" + OneTimeBootMode: "Enabled" + BootSeqRetry: "Enabled" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Enable PXE Boot for NIC1 + community.general.redfish_config: + category: Systems + command: SetBiosAttributes + resource_id: 437XR1138R2 + bios_attributes: + PxeDev1EnDis: Enabled + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set BIOS default settings with a timeout of 20 seconds + community.general.redfish_config: + category: Systems + command: SetBiosDefaultSettings + resource_id: 437XR1138R2 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + +- name: Set boot order + community.general.redfish_config: + category: Systems + command: SetBootOrder + boot_order: + - Boot0002 + - Boot0001 + - Boot0000 + - Boot0003 + - Boot0004 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set boot order to the default + community.general.redfish_config: + category: Systems + command: SetDefaultBootOrder + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set Manager Network Protocols + community.general.redfish_config: + category: Manager + command: SetNetworkProtocols + network_protocols: + SNMP: + ProtocolEnabled: true + Port: 161 + HTTP: + ProtocolEnabled: false + Port: 8080 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set Manager NIC + community.general.redfish_config: + category: Manager + command: SetManagerNic + nic_config: + DHCPv4: + DHCPEnabled: false + IPv4StaticAddresses: + Address: 192.168.1.3 + Gateway: 192.168.1.1 + SubnetMask: 255.255.255.0 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Disable Host Interface + community.general.redfish_config: + category: Manager + command: SetHostInterface + hostinterface_config: + InterfaceEnabled: false + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Enable Host Interface for HostInterface resource ID '2' + community.general.redfish_config: + category: Manager + command: SetHostInterface + hostinterface_config: + InterfaceEnabled: true + hostinterface_id: "2" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set SessionService Session Timeout to 30 minutes + community.general.redfish_config: + category: Sessions + command: SetSessionService + sessions_config: + SessionTimeout: 1800 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Enable SecureBoot + community.general.redfish_config: + category: Systems + command: EnableSecureBoot + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Set SecureBoot + community.general.redfish_config: + category: Systems + command: SetSecureBoot + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + secure_boot_enable: true + +- name: Delete All Volumes + community.general.redfish_config: + category: Systems + command: DeleteVolumes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + storage_subsystem_id: "DExxxxxx" + volume_ids: ["volume1", "volume2"] + +- name: Create Volume + community.general.redfish_config: + category: Systems + command: CreateVolume + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + storage_subsystem_id: "DExxxxxx" + volume_details: + Name: "MR Volume" + RAIDType: "RAID0" + Drives: + - "/redfish/v1/Systems/1/Storage/DE00B000/Drives/1" + +- name: Set PowerRestorePolicy + community.general.redfish_config: + category: Systems + command: SetPowerRestorePolicy + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + power_restore_policy: "AlwaysOff" + +- name: Set service identification to {{ service_id }} + community.general.redfish_config: + category: Manager + command: SetServiceIdentification + service_id: "{{ service_id }}" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = r""" +msg: + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC +from ansible.module_utils.common.text.converters import to_native + + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder", + "SetDefaultBootOrder", "EnableSecureBoot", "SetSecureBoot", "DeleteVolumes", "CreateVolume", + "SetPowerRestorePolicy"], + "Manager": ["SetNetworkProtocols", "SetManagerNic", "SetHostInterface", "SetServiceIdentification"], + "Sessions": ["SetSessionService"], +} + + +def main(): + result = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + bios_attributes=dict(type='dict', default={}), + timeout=dict(type='int', default=60), + boot_order=dict(type='list', elements='str', default=[]), + network_protocols=dict( + type='dict', + default={} + ), + resource_id=dict(), + service_id=dict(), + nic_addr=dict(default='null'), + nic_config=dict( + type='dict', + default={} + ), + strip_etag_quotes=dict(type='bool', default=False), + hostinterface_config=dict(type='dict', default={}), + hostinterface_id=dict(), + sessions_config=dict(type='dict', default={}), + storage_subsystem_id=dict(type='str', default=''), + storage_none_volume_deletion=dict(type='bool', default=False), + volume_ids=dict(type='list', default=[], elements='str'), + secure_boot_enable=dict(type='bool', default=True), + volume_details=dict(type='dict', default={}), + power_restore_policy=dict(choices=['AlwaysOn', 'AlwaysOff', 'LastState']), + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # BIOS attributes to update + bios_attributes = module.params['bios_attributes'] + + # boot order + boot_order = module.params['boot_order'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # manager nic + nic_addr = module.params['nic_addr'] + nic_config = module.params['nic_config'] + + # Etag options + strip_etag_quotes = module.params['strip_etag_quotes'] + + # HostInterface config options + hostinterface_config = module.params['hostinterface_config'] + + # HostInterface instance ID + hostinterface_id = module.params['hostinterface_id'] + + # Service Identification + service_id = module.params['service_id'] + + # Sessions config options + sessions_config = module.params['sessions_config'] + + # Volume deletion options + storage_subsystem_id = module.params['storage_subsystem_id'] + volume_ids = module.params['volume_ids'] + + # Set SecureBoot options + secure_boot_enable = module.params['secure_boot_enable'] + + # Volume creation options + volume_details = module.params['volume_details'] + storage_subsystem_id = module.params['storage_subsystem_id'] + storage_none_volume_deletion = module.params['storage_none_volume_deletion'] + + # Power Restore Policy + power_restore_policy = module.params['power_restore_policy'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = RedfishUtils(creds, root_uri, timeout, module, + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Systems": + # execute only if we find a System resource + result = rf_utils._find_systems_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "SetBiosDefaultSettings": + result = rf_utils.set_bios_default_settings() + elif command == "SetBiosAttributes": + result = rf_utils.set_bios_attributes(bios_attributes) + elif command == "SetBootOrder": + result = rf_utils.set_boot_order(boot_order) + elif command == "SetDefaultBootOrder": + result = rf_utils.set_default_boot_order() + elif command == "EnableSecureBoot": + result = rf_utils.enable_secure_boot() + elif command == "SetSecureBoot": + result = rf_utils.set_secure_boot(secure_boot_enable) + elif command == "DeleteVolumes": + result = rf_utils.delete_volumes(storage_subsystem_id, volume_ids) + elif command == "CreateVolume": + result = rf_utils.create_volume(volume_details, storage_subsystem_id, storage_none_volume_deletion) + elif command == "SetPowerRestorePolicy": + result = rf_utils.set_power_restore_policy(power_restore_policy) + + elif category == "Manager": + # execute only if we find a Manager service resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "SetNetworkProtocols": + result = rf_utils.set_network_protocols(module.params['network_protocols']) + elif command == "SetManagerNic": + result = rf_utils.set_manager_nic(nic_addr, nic_config) + elif command == "SetHostInterface": + result = rf_utils.set_hostinterface_attributes(hostinterface_config, hostinterface_id) + elif command == "SetServiceIdentification": + result = rf_utils.set_service_identification(service_id) + + elif category == "Sessions": + # execute only if we find a Sessions resource + result = rf_utils._find_sessionservice_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "SetSessionService": + result = rf_utils.set_session_service(sessions_config) + + # Return data back or fail with proper message + if result['ret'] is True: + if result.get('warning'): + module.warn(to_native(result['warning'])) + + module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/redfish_info.py b/plugins/modules/redfish_info.py new file mode 100644 index 0000000000..af1b3af319 --- /dev/null +++ b/plugins/modules/redfish_info.py @@ -0,0 +1,642 @@ +#!/usr/bin/python + +# Copyright (c) 2017-2018 Dell EMC Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: redfish_info +short_description: Manages Out-Of-Band controllers using Redfish APIs +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to get information back. + - Information retrieved is placed in a location specified by the user. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module + - community.general.redfish +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix +options: + category: + required: false + description: + - List of categories to execute on OOB controller. + default: ['Systems'] + type: list + elements: str + command: + required: false + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + username: + description: + - Username for authenticating to OOB controller. + type: str + password: + description: + - Password for authenticating to OOB controller. + type: str + auth_token: + description: + - Security token for authenticating to OOB controller. + type: str + version_added: 2.3.0 + manager: + description: + - Name of manager on OOB controller to target. + type: str + version_added: '8.3.0' + timeout: + description: + - Timeout in seconds for HTTP requests to OOB controller. + - The default value for this parameter changed from V(10) to V(60) in community.general 9.0.0. + type: int + default: 60 + update_handle: + required: false + description: + - Handle to check the status of an update in progress. + type: str + version_added: '6.1.0' + ciphers: + version_added: 9.2.0 + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + +author: "Jose Delarosa (@jose-delarosa)" +""" + +EXAMPLES = r""" +- name: Get CPU inventory + community.general.redfish_info: + category: Systems + command: GetCpuInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}" + +- name: Get CPU model + community.general.redfish_info: + category: Systems + command: GetCpuInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.cpu.entries.0.Model }}" + +- name: Get memory inventory + community.general.redfish_info: + category: Systems + command: GetMemoryInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Get fan inventory with a timeout of 20 seconds + community.general.redfish_info: + category: Chassis + command: GetFanInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + register: result + +- name: Get Virtual Media information + community.general.redfish_info: + category: Manager + command: GetVirtualMedia + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}" + +- name: Get Virtual Media information from Systems + community.general.redfish_info: + category: Systems + command: GetVirtualMedia + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}" + +- name: Get Volume Inventory + community.general.redfish_info: + category: Systems + command: GetVolumeInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}" + +- name: Get Session information + community.general.redfish_info: + category: Sessions + command: GetSessions + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts.session.entries | to_nice_json }}" + +- name: Get default inventory information + community.general.redfish_info: + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result +- name: Print fetched information + ansible.builtin.debug: + msg: "{{ result.redfish_facts | to_nice_json }}" + +- name: Get several inventories + community.general.redfish_info: + category: Systems + command: GetNicInventory,GetBiosAttributes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get configuration of the AccountService + community.general.redfish_info: + category: Accounts + command: GetAccountServiceConfig + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get default system inventory and user information + community.general.redfish_info: + category: Systems,Accounts + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get default system, user and firmware information + community.general.redfish_info: + category: ["Systems", "Accounts", "Update"] + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get Manager NIC inventory information + community.general.redfish_info: + category: Manager + command: GetManagerNicInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get boot override information + community.general.redfish_info: + category: Systems + command: GetBootOverride + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get chassis inventory + community.general.redfish_info: + category: Chassis + command: GetChassisInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get all information available in the Manager category + community.general.redfish_info: + category: Manager + command: all + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get firmware update capability information + community.general.redfish_info: + category: Update + command: GetFirmwareUpdateCapabilities + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get firmware inventory + community.general.redfish_info: + category: Update + command: GetFirmwareInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get service identification + community.general.redfish_info: + category: Manager + command: GetServiceIdentification + manager: "{{ manager }}" + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get software inventory + community.general.redfish_info: + category: Update + command: GetSoftwareInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get the status of an update operation + community.general.redfish_info: + category: Update + command: GetUpdateStatus + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + update_handle: /redfish/v1/TaskService/TaskMonitors/735 + +- name: Get Manager Services + community.general.redfish_info: + category: Manager + command: GetNetworkProtocols + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get all information available in all categories + community.general.redfish_info: + category: all + command: all + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get system health report + community.general.redfish_info: + category: Systems + command: GetHealthReport + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get chassis health report + community.general.redfish_info: + category: Chassis + command: GetHealthReport + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get manager health report + community.general.redfish_info: + category: Manager + command: GetHealthReport + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get manager Redfish Host Interface inventory + community.general.redfish_info: + category: Manager + command: GetHostInterfaces + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get Manager Inventory + community.general.redfish_info: + category: Manager + command: GetManagerInventory + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get HPE Thermal Config + community.general.redfish_info: + category: Chassis + command: GetHPEThermalConfig + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get HPE Fan Percent Minimum + community.general.redfish_info: + category: Chassis + command: GetHPEFanPercentMin + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get BIOS registry + community.general.redfish_info: + category: Systems + command: GetBiosRegistries + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Get power restore policy + community.general.redfish_info: + category: Systems + command: GetPowerRestorePolicy + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + +- name: Check the availability of the service with a timeout of 5 seconds + community.general.redfish_info: + category: Service + command: CheckAvailability + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 5 + register: result +""" + +RETURN = r""" +result: + description: Different results depending on task. + returned: always + type: dict + sample: List of CPUs on system +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC + +CATEGORY_COMMANDS_ALL = { + "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory", + "GetMemoryInventory", "GetNicInventory", "GetHealthReport", + "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory", + "GetBiosAttributes", "GetBootOrder", "GetBootOverride", "GetVirtualMedia", "GetBiosRegistries", + "GetPowerRestorePolicy"], + "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower", + "GetChassisThermals", "GetChassisInventory", "GetHealthReport", "GetHPEThermalConfig", "GetHPEFanPercentMin"], + "Accounts": ["ListUsers", "GetAccountServiceConfig"], + "Sessions": ["GetSessions"], + "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory", + "GetUpdateStatus"], + "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols", + "GetHealthReport", "GetHostInterfaces", "GetManagerInventory", "GetServiceIdentification"], + "Service": ["CheckAvailability"], +} + +CATEGORY_COMMANDS_DEFAULT = { + "Systems": "GetSystemInventory", + "Chassis": "GetFanInventory", + "Accounts": "ListUsers", + "Update": "GetFirmwareInventory", + "Sessions": "GetSessions", + "Manager": "GetManagerNicInventory", + "Service": "CheckAvailability", +} + + +def main(): + result = {} + category_list = [] + argument_spec = dict( + category=dict(type='list', elements='str', default=['Systems']), + command=dict(type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=60), + update_handle=dict(), + manager=dict(), + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=True, + ) + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # update handle + update_handle = module.params['update_handle'] + + # manager + manager = module.params['manager'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = RedfishUtils(creds, root_uri, timeout, module) + + # Build Category list + if "all" in module.params['category']: + for entry in CATEGORY_COMMANDS_ALL: + category_list.append(entry) + else: + # one or more categories specified + category_list = module.params['category'] + + for category in category_list: + command_list = [] + # Build Command list for each Category + if category in CATEGORY_COMMANDS_ALL: + if not module.params['command']: + # True if we don't specify a command --> use default + command_list.append(CATEGORY_COMMANDS_DEFAULT[category]) + elif "all" in module.params['command']: + for entry in range(len(CATEGORY_COMMANDS_ALL[category])): + command_list.append(CATEGORY_COMMANDS_ALL[category][entry]) + # one or more commands + else: + command_list = module.params['command'] + # Verify that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg="Invalid Command: %s" % cmd) + else: + # Fail if even one category given is invalid + module.fail_json(msg="Invalid Category: %s" % category) + + # Organize by Categories / Commands + if category == "Service": + # service-level commands are always available + for command in command_list: + if command == "CheckAvailability": + result["service"] = rf_utils.check_service_availability() + + elif category == "Systems": + # execute only if we find a Systems resource + resource = rf_utils._find_systems_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetSystemInventory": + result["system"] = rf_utils.get_multi_system_inventory() + elif command == "GetCpuInventory": + result["cpu"] = rf_utils.get_multi_cpu_inventory() + elif command == "GetMemoryInventory": + result["memory"] = rf_utils.get_multi_memory_inventory() + elif command == "GetNicInventory": + result["nic"] = rf_utils.get_multi_nic_inventory(category) + elif command == "GetStorageControllerInventory": + result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory() + elif command == "GetDiskInventory": + result["disk"] = rf_utils.get_multi_disk_inventory() + elif command == "GetVolumeInventory": + result["volume"] = rf_utils.get_multi_volume_inventory() + elif command == "GetBiosAttributes": + result["bios_attribute"] = rf_utils.get_multi_bios_attributes() + elif command == "GetBootOrder": + result["boot_order"] = rf_utils.get_multi_boot_order() + elif command == "GetBootOverride": + result["boot_override"] = rf_utils.get_multi_boot_override() + elif command == "GetHealthReport": + result["health_report"] = rf_utils.get_multi_system_health_report() + elif command == "GetVirtualMedia": + result["virtual_media"] = rf_utils.get_multi_virtualmedia(category) + elif command == "GetBiosRegistries": + result["bios_registries"] = rf_utils.get_bios_registries() + elif command == "GetPowerRestorePolicy": + result["power_restore_policy"] = rf_utils.get_multi_power_restore_policy() + + elif category == "Chassis": + # execute only if we find Chassis resource + resource = rf_utils._find_chassis_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetFanInventory": + result["fan"] = rf_utils.get_fan_inventory() + elif command == "GetPsuInventory": + result["psu"] = rf_utils.get_psu_inventory() + elif command == "GetChassisThermals": + result["thermals"] = rf_utils.get_chassis_thermals() + elif command == "GetChassisPower": + result["chassis_power"] = rf_utils.get_chassis_power() + elif command == "GetChassisInventory": + result["chassis"] = rf_utils.get_chassis_inventory() + elif command == "GetHealthReport": + result["health_report"] = rf_utils.get_multi_chassis_health_report() + elif command == "GetHPEThermalConfig": + result["hpe_thermal_config"] = rf_utils.get_hpe_thermal_config() + elif command == "GetHPEFanPercentMin": + result["hpe_fan_percent_min"] = rf_utils.get_hpe_fan_percent_min() + + elif category == "Accounts": + # execute only if we find an Account service resource + resource = rf_utils._find_accountservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "ListUsers": + result["user"] = rf_utils.list_users() + elif command == "GetAccountServiceConfig": + result["accountservice_config"] = rf_utils.get_accountservice_properties() + + elif category == "Update": + # execute only if we find UpdateService resources + resource = rf_utils._find_updateservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetFirmwareInventory": + result["firmware"] = rf_utils.get_firmware_inventory() + elif command == "GetSoftwareInventory": + result["software"] = rf_utils.get_software_inventory() + elif command == "GetFirmwareUpdateCapabilities": + result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities() + elif command == "GetUpdateStatus": + result["update_status"] = rf_utils.get_update_status(update_handle) + + elif category == "Sessions": + # execute only if we find SessionService resources + resource = rf_utils._find_sessionservice_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetSessions": + result["session"] = rf_utils.get_sessions() + + elif category == "Manager": + # execute only if we find a Manager service resource + resource = rf_utils._find_managers_resource() + if resource['ret'] is False: + module.fail_json(msg=resource['msg']) + + for command in command_list: + if command == "GetManagerNicInventory": + result["manager_nics"] = rf_utils.get_multi_nic_inventory(category) + elif command == "GetVirtualMedia": + result["virtual_media"] = rf_utils.get_multi_virtualmedia(category) + elif command == "GetLogs": + result["log"] = rf_utils.get_logs() + elif command == "GetNetworkProtocols": + result["network_protocols"] = rf_utils.get_network_protocols() + elif command == "GetHealthReport": + result["health_report"] = rf_utils.get_multi_manager_health_report() + elif command == "GetHostInterfaces": + result["host_interfaces"] = rf_utils.get_hostinterfaces() + elif command == "GetManagerInventory": + result["manager"] = rf_utils.get_multi_manager_inventory() + elif command == "GetServiceIdentification": + result["service_id"] = rf_utils.get_service_identification(manager) + + # Return data back + module.exit_json(redfish_facts=result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/redhat_subscription.py b/plugins/modules/redhat_subscription.py new file mode 100644 index 0000000000..c2b76fe8ac --- /dev/null +++ b/plugins/modules/redhat_subscription.py @@ -0,0 +1,1167 @@ +#!/usr/bin/python + +# Copyright (c) James Laska (jlaska@redhat.com) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: redhat_subscription +short_description: Manage registration and subscriptions to RHSM using C(subscription-manager) +description: + - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) + command, registering using D-Bus if possible. +author: "Barnaby Court (@barnabycourt)" +notes: + - 'The module tries to use the D-Bus C(rhsm) service (part of C(subscription-manager)) to register, starting from community.general + 6.5.0: this is done so credentials (username, password, activation keys) can be passed to C(rhsm) in a secure way. C(subscription-manager) + itself gets credentials only as arguments of command line parameters, which is I(not) secure, as they can be easily stolen + by checking the process listing on the system. Due to limitations of the D-Bus interface of C(rhsm), the module does I(not) + use D-Bus for registration when trying either to register using O(token), or when specifying O(environment), or when the + system is old (typically RHEL 7 older than 7.4, RHEL 6, and older).' + - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an + Organization ID. + - Since 2.5 values for O(server_hostname), O(server_insecure), O(rhsm_baseurl), O(server_proxy_hostname), O(server_proxy_port), + O(server_proxy_user) and O(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf) config file and + default to V(null). + - It is possible to interact with C(subscription-manager) only as root, so root permissions are required to successfully + run this module. + - Since community.general 6.5.0, credentials (that is, O(username) and O(password), O(activationkey), or O(token)) are needed + only in case the system is not registered, or O(force_register) is specified; this makes it possible to use the module + to tweak an already registered system, for example attaching pools to it (using O(pool_ids)), and modifying the C(syspurpose) + attributes (using O(syspurpose)). +requirements: + - subscription-manager + - Optionally the C(dbus) Python library; this is usually included in the OS as it is used by C(subscription-manager). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Whether to register and subscribe (V(present)), or unregister (V(absent)) a system. + choices: ["present", "absent"] + default: "present" + type: str + username: + description: + - Access.redhat.com or Red Hat Satellite or Katello username. + type: str + password: + description: + - Access.redhat.com or Red Hat Satellite or Katello password. + type: str + token: + description: + - Sso.redhat.com API access token. + type: str + version_added: 6.3.0 + server_hostname: + description: + - Specify an alternative Red Hat Subscription Management or Red Hat Satellite or Katello server. + type: str + server_insecure: + description: + - Enable or disable https server certificate verification when connecting to O(server_hostname). + type: str + server_prefix: + description: + - Specify the prefix when registering to the Red Hat Subscription Management or Red Hat Satellite or Katello server. + type: str + version_added: 3.3.0 + server_port: + description: + - Specify the port when registering to the Red Hat Subscription Management or Red Hat Satellite or Katello server. + type: str + version_added: 3.3.0 + rhsm_baseurl: + description: + - Specify CDN baseurl. + type: str + rhsm_repo_ca_cert: + description: + - Specify an alternative location for a CA certificate for CDN. + type: str + server_proxy_hostname: + description: + - Specify an HTTP proxy hostname. + type: str + server_proxy_scheme: + description: + - Specify an HTTP proxy scheme, for example V(http) or V(https). + type: str + version_added: 6.2.0 + server_proxy_port: + description: + - Specify an HTTP proxy port. + type: str + server_proxy_user: + description: + - Specify a user for HTTP proxy with basic authentication. + type: str + server_proxy_password: + description: + - Specify a password for HTTP proxy with basic authentication. + type: str + auto_attach: + description: + - Upon successful registration, auto-consume available subscriptions. + - Please note that the alias O(ignore:autosubscribe) was removed in community.general 9.0.0. + type: bool + activationkey: + description: + - Supply an activation key for use with registration. + type: str + org_id: + description: + - Organization ID to use in conjunction with activationkey. + type: str + environment: + description: + - Register with a specific environment in the destination org. Used with Red Hat Satellite or Katello. + type: str + pool_ids: + description: + - Specify subscription pool IDs to consume. + - 'A pool ID may be specified as a C(string) - just the pool ID (for example V(0123456789abcdef0123456789abcdef)), or + as a C(dict) with the pool ID as the key, and a quantity as the value (for example V(0123456789abcdef0123456789abcdef: + 2). If the quantity is provided, it is used to consume multiple entitlements from a pool (the pool must support this).' + default: [] + type: list + elements: raw + consumer_type: + description: + - The type of unit to register, defaults to system. + type: str + consumer_name: + description: + - Name of the system to register, defaults to the hostname. + type: str + consumer_id: + description: + - References an existing consumer ID to resume using a previous registration for this system. If the system's identity + certificate is lost or corrupted, this option allows it to resume using its previous identity and subscriptions. The + default is to not specify a consumer ID so a new ID is created. + type: str + force_register: + description: + - Register the system even if it is already registered. + type: bool + default: false + release: + description: + - Set a release version. + type: str + syspurpose: + description: + - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json) and synchronize these attributes with RHSM + server. Syspurpose attributes help attach the most appropriate subscriptions to the system automatically. When C(syspurpose.json) + file already contains some attributes, then new attributes overwrite existing attributes. When some attribute is not + listed in the new list of attributes, the existing attribute is removed from C(syspurpose.json) file. Unknown attributes + are ignored. + type: dict + suboptions: + usage: + description: Syspurpose attribute usage. + type: str + role: + description: Syspurpose attribute role. + type: str + service_level_agreement: + description: Syspurpose attribute service_level_agreement. + type: str + addons: + description: Syspurpose attribute addons. + type: list + elements: str + sync: + description: + - When this option is V(true), then syspurpose attributes are synchronized with RHSM server immediately. When this + option is V(false), then syspurpose attributes are synchronized with RHSM server by rhsmcertd daemon. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + auto_attach: true + +- name: Same as above but subscribe to a specific pool by ID. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + pool_ids: 0123456789abcdef0123456789abcdef + +- name: Register and subscribe to multiple pools. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + pool_ids: + - 0123456789abcdef0123456789abcdef + - 1123456789abcdef0123456789abcdef + +- name: Same as above but consume multiple entitlements. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + pool_ids: + - 0123456789abcdef0123456789abcdef: 2 + - 1123456789abcdef0123456789abcdef: 4 + +- name: Register and pull existing system data. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + +- name: Register as user credentials into given environment (against Red Hat Satellite or Katello), and auto-subscribe. + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + environment: Library + auto_attach: true + +- name: Register as user (joe_user) with password (somepass) and a specific release + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + release: 7.4 + +- name: Register as user (joe_user) with password (somepass), set syspurpose attributes and synchronize them with server + community.general.redhat_subscription: + state: present + username: joe_user + password: somepass + auto_attach: true + syspurpose: + usage: "Production" + role: "Red Hat Enterprise Server" + service_level_agreement: "Premium" + addons: + - addon1 + - addon2 + sync: true +""" + +RETURN = r""" +subscribed_pool_ids: + description: List of pool IDs to which system is now subscribed. + returned: success + type: dict + sample: {"8a85f9815ab905d3015ab928c7005de4": "1"} +""" + +from os.path import isfile +from os import getuid, unlink +import configparser +import re +import shutil +import tempfile +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils import distro + + +SUBMAN_CMD = None + + +class Rhsm(object): + + REDHAT_REPO = "/etc/yum.repos.d/redhat.repo" + + def __init__(self, module): + self.module = module + + def update_plugin_conf(self, plugin, enabled=True): + plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin + + if isfile(plugin_conf): + tmpfd, tmpfile = tempfile.mkstemp() + shutil.copy2(plugin_conf, tmpfile) + cfg = configparser.ConfigParser() + cfg.read([tmpfile]) + + if enabled: + cfg.set('main', 'enabled', '1') + else: + cfg.set('main', 'enabled', '0') + + with open(tmpfile, 'w+') as fd: + cfg.write(fd) + self.module.atomic_move(tmpfile, plugin_conf) + + def enable(self): + ''' + Enable the system to receive updates from subscription-manager. + This involves updating affected yum plugins and removing any + conflicting yum repositories. + ''' + # Remove any existing redhat.repo + if isfile(self.REDHAT_REPO): + unlink(self.REDHAT_REPO) + self.update_plugin_conf('rhnplugin', False) + self.update_plugin_conf('subscription-manager', True) + + def configure(self, **kwargs): + ''' + Configure the system as directed for registration with RHSM + Raises: + * Exception - if error occurs while running command + ''' + + args = [SUBMAN_CMD, 'config'] + + # Pass supplied **kwargs as parameters to subscription-manager. Ignore + # non-configuration parameters and replace '_' with '.'. For example, + # 'server_hostname' becomes '--server.hostname'. + options = [] + for k, v in sorted(kwargs.items()): + if re.search(r'^(server|rhsm)_', k) and v is not None: + options.append('--%s=%s' % (k.replace('_', '.', 1), v)) + + # When there is nothing to configure, then it is not necessary + # to run config command, because it only returns current + # content of current configuration file + if len(options) == 0: + return + + args.extend(options) + + self.module.run_command(args, check_rc=True) + + @property + def is_registered(self): + ''' + Determine whether the current system + Returns: + * Boolean - whether the current system is currently registered to + RHSM. + ''' + + args = [SUBMAN_CMD, 'identity'] + rc, stdout, stderr = self.module.run_command(args, check_rc=False) + if rc == 0: + return True + else: + return False + + def _has_dbus_interface(self): + """ + Checks whether subscription-manager has a D-Bus interface. + + :returns: bool -- whether subscription-manager has a D-Bus interface. + """ + + def str2int(s, default=0): + try: + return int(s) + except ValueError: + return default + + distro_id = distro.id() + distro_version = tuple(str2int(p) for p in distro.version_parts()) + + # subscription-manager in any supported Fedora version has the interface. + if distro_id == 'fedora': + return True + # Any other distro: assume it is EL; + # the D-Bus interface was added to subscription-manager in RHEL 7.4. + return (distro_version[0] == 7 and distro_version[1] >= 4) or \ + distro_version[0] >= 8 + + def _can_connect_to_dbus(self): + """ + Checks whether it is possible to connect to the system D-Bus bus. + + :returns: bool -- whether it is possible to connect to the system D-Bus bus. + """ + + try: + # Technically speaking, subscription-manager uses dbus-python + # as D-Bus library, so this ought to work; better be safe than + # sorry, I guess... + import dbus + except ImportError: + self.module.debug('dbus Python module not available, will use CLI') + return False + + try: + bus = dbus.SystemBus() + msg = dbus.lowlevel.SignalMessage('/', 'com.example', 'test') + bus.send_message(msg) + bus.flush() + + except dbus.exceptions.DBusException as e: + self.module.debug('Failed to connect to system D-Bus bus, will use CLI: %s' % e) + return False + + self.module.debug('Verified system D-Bus bus as usable') + return True + + def register(self, was_registered, username, password, token, auto_attach, activationkey, org_id, + consumer_type, consumer_name, consumer_id, force_register, environment, + release): + ''' + Register the current system to the provided RHSM or Red Hat Satellite + or Katello server + + Raises: + * Exception - if any error occurs during the registration + ''' + # There is no support for token-based registration in the D-Bus API + # of rhsm, so always use the CLI in that case; + # also, since the specified environments are names, and the D-Bus APIs + # require IDs for the environments, use the CLI also in that case + if (not token and not environment and self._has_dbus_interface() and + self._can_connect_to_dbus()): + self._register_using_dbus(was_registered, username, password, auto_attach, + activationkey, org_id, consumer_type, + consumer_name, consumer_id, + force_register, environment, release) + return + self._register_using_cli(username, password, token, auto_attach, + activationkey, org_id, consumer_type, + consumer_name, consumer_id, + force_register, environment, release) + + def _register_using_cli(self, username, password, token, auto_attach, + activationkey, org_id, consumer_type, consumer_name, + consumer_id, force_register, environment, release): + ''' + Register using the 'subscription-manager' command + + Raises: + * Exception - if error occurs while running command + ''' + args = [SUBMAN_CMD, 'register'] + + # Generate command arguments + if force_register: + args.extend(['--force']) + + if org_id: + args.extend(['--org', org_id]) + + if auto_attach: + args.append('--auto-attach') + + if consumer_type: + args.extend(['--type', consumer_type]) + + if consumer_name: + args.extend(['--name', consumer_name]) + + if consumer_id: + args.extend(['--consumerid', consumer_id]) + + if environment: + args.extend(['--environment', environment]) + + if activationkey: + args.extend(['--activationkey', activationkey]) + elif token: + args.extend(['--token', token]) + else: + if username: + args.extend(['--username', username]) + if password: + args.extend(['--password', password]) + + if release: + args.extend(['--release', release]) + + rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False) + + def _register_using_dbus(self, was_registered, username, password, auto_attach, + activationkey, org_id, consumer_type, consumer_name, + consumer_id, force_register, environment, release): + ''' + Register using D-Bus (connecting to the rhsm service) + + Raises: + * Exception - if error occurs during the D-Bus communication + ''' + import dbus + + SUBSCRIPTION_MANAGER_LOCALE = 'C' + # Seconds to wait for Registration to complete over DBus; + # 10 minutes should be a pretty generous timeout. + REGISTRATION_TIMEOUT = 600 + + def str2int(s, default=0): + try: + return int(s) + except ValueError: + return default + + distro_id = distro.id() + distro_version_parts = distro.version_parts() + distro_version = tuple(str2int(p) for p in distro_version_parts) + + # Stop the rhsm service when using systemd (which means Fedora or + # RHEL 7+): this is because the service may not use new configuration bits + # - with subscription-manager < 1.26.5-1 (in RHEL < 8.2); + # fixed later by https://github.com/candlepin/subscription-manager/pull/2175 + # - sporadically: https://bugzilla.redhat.com/show_bug.cgi?id=2049296 + if distro_id == 'fedora' or distro_version[0] >= 7: + cmd = ['systemctl', 'stop', 'rhsm'] + self.module.run_command(cmd, check_rc=True, expand_user_and_vars=False) + + # While there is a 'force' options for the registration, it is actually + # not implemented (and thus it does not work) + # - in RHEL 7 and earlier + # - in RHEL 8 before 8.8: https://bugzilla.redhat.com/show_bug.cgi?id=2118486 + # - in RHEL 9 before 9.2: https://bugzilla.redhat.com/show_bug.cgi?id=2121350 + # Hence, use it only when implemented, manually unregistering otherwise. + # Match it on RHEL, since we know about it; other distributions + # will need their own logic. + dbus_force_option_works = False + if (distro_id == 'rhel' and + ((distro_version[0] == 8 and distro_version[1] >= 8) or + (distro_version[0] == 9 and distro_version[1] >= 2) or + distro_version[0] > 9)): + dbus_force_option_works = True + # We need to use the 'enable_content' D-Bus option to ensure that + # content is enabled; sadly the option is available depending on the + # version of the distro, and also depending on which API/method is used + # for registration. + dbus_has_enable_content_option = False + if activationkey: + def supports_enable_content_for_activation_keys(): + # subscription-manager in Fedora >= 41 has the new option. + if distro_id == 'fedora' and distro_version[0] >= 41: + return True + # Assume EL distros here. + if distro_version[0] >= 10: + return True + return False + dbus_has_enable_content_option = supports_enable_content_for_activation_keys() + else: + def supports_enable_content_for_credentials(): + # subscription-manager in any supported Fedora version + # has the new option. + if distro_id == 'fedora': + return True + # Check for RHEL 8 >= 8.6, or RHEL >= 9. + if distro_id == 'rhel' and \ + ((distro_version[0] == 8 and distro_version[1] >= 6) or + distro_version[0] >= 9): + return True + # CentOS: similar checks as for RHEL, with one extra bit: + # if the 2nd part of the version is empty, it means it is + # CentOS Stream, and thus we can assume it has the latest + # version of subscription-manager. + if distro_id == 'centos' and \ + ((distro_version[0] == 8 and + (distro_version[1] >= 6 or distro_version_parts[1] == '')) or + distro_version[0] >= 9): + return True + # Unknown or old distro: assume it does not support + # the new option. + return False + dbus_has_enable_content_option = supports_enable_content_for_credentials() + + if force_register and not dbus_force_option_works and was_registered: + self.unregister() + + register_opts = {} + if consumer_type: + # The option for the consumer type used to be 'type' in versions + # of RHEL before 9 & in RHEL 9 before 9.2, and then it changed to + # 'consumer_type'; since the Register*() D-Bus functions reject + # unknown options, we have to pass the right option depending on + # the version -- funky. + def supports_option_consumer_type(): + # subscription-manager in any supported Fedora version + # has the new option. + if distro_id == 'fedora': + return True + # Check for RHEL 9 >= 9.2, or RHEL >= 10. + if distro_id == 'rhel' and \ + ((distro_version[0] == 9 and distro_version[1] >= 2) or + distro_version[0] >= 10): + return True + # CentOS: since the change was only done in EL 9, then there is + # only CentOS Stream for 9, and thus we can assume it has the + # latest version of subscription-manager. + if distro_id == 'centos' and distro_version[0] >= 9: + return True + # Unknown or old distro: assume it does not support + # the new option. + return False + + consumer_type_key = 'type' + if supports_option_consumer_type(): + consumer_type_key = 'consumer_type' + register_opts[consumer_type_key] = consumer_type + if consumer_name: + register_opts['name'] = consumer_name + if consumer_id: + register_opts['consumerid'] = consumer_id + if environment: + # The option for environments used to be 'environment' in versions + # of RHEL before 8.6, and then it changed to 'environments'; since + # the Register*() D-Bus functions reject unknown options, we have + # to pass the right option depending on the version -- funky. + def supports_option_environments(): + # subscription-manager in any supported Fedora version + # has the new option. + if distro_id == 'fedora': + return True + # Check for RHEL 8 >= 8.6, or RHEL >= 9. + if distro_id == 'rhel' and \ + ((distro_version[0] == 8 and distro_version[1] >= 6) or + distro_version[0] >= 9): + return True + # CentOS: similar checks as for RHEL, with one extra bit: + # if the 2nd part of the version is empty, it means it is + # CentOS Stream, and thus we can assume it has the latest + # version of subscription-manager. + if distro_id == 'centos' and \ + ((distro_version[0] == 8 and + (distro_version[1] >= 6 or distro_version_parts[1] == '')) or + distro_version[0] >= 9): + return True + # Unknown or old distro: assume it does not support + # the new option. + return False + + environment_key = 'environment' + if supports_option_environments(): + environment_key = 'environments' + register_opts[environment_key] = environment + if force_register and dbus_force_option_works and was_registered: + register_opts['force'] = True + if dbus_has_enable_content_option: + register_opts['enable_content'] = "1" + # Wrap it as proper D-Bus dict + register_opts = dbus.Dictionary(register_opts, signature='sv', variant_level=1) + + connection_opts = {} + # Wrap it as proper D-Bus dict + connection_opts = dbus.Dictionary(connection_opts, signature='sv', variant_level=1) + + bus = dbus.SystemBus() + register_server = bus.get_object('com.redhat.RHSM1', + '/com/redhat/RHSM1/RegisterServer') + address = register_server.Start( + SUBSCRIPTION_MANAGER_LOCALE, + dbus_interface='com.redhat.RHSM1.RegisterServer', + ) + + try: + # Use the private bus to register the system + self.module.debug('Connecting to the private DBus') + private_bus = dbus.connection.Connection(address) + + try: + if activationkey: + args = ( + org_id, + [activationkey], + register_opts, + connection_opts, + SUBSCRIPTION_MANAGER_LOCALE, + ) + private_bus.call_blocking( + 'com.redhat.RHSM1', + '/com/redhat/RHSM1/Register', + 'com.redhat.RHSM1.Register', + 'RegisterWithActivationKeys', + 'sasa{sv}a{sv}s', + args, + timeout=REGISTRATION_TIMEOUT, + ) + else: + args = ( + org_id or '', + username, + password, + register_opts, + connection_opts, + SUBSCRIPTION_MANAGER_LOCALE, + ) + private_bus.call_blocking( + 'com.redhat.RHSM1', + '/com/redhat/RHSM1/Register', + 'com.redhat.RHSM1.Register', + 'Register', + 'sssa{sv}a{sv}s', + args, + timeout=REGISTRATION_TIMEOUT, + ) + + except dbus.exceptions.DBusException as e: + # Sometimes we get NoReply but the registration has succeeded. + # Check the registration status before deciding if this is an error. + if e.get_dbus_name() == 'org.freedesktop.DBus.Error.NoReply': + if not self.is_registered(): + # Host is not registered so re-raise the error + raise + else: + raise + # Host was registered so continue + finally: + # Always shut down the private bus + self.module.debug('Shutting down private DBus instance') + register_server.Stop( + SUBSCRIPTION_MANAGER_LOCALE, + dbus_interface='com.redhat.RHSM1.RegisterServer', + ) + + # Make sure to refresh all the local data: this will fetch all the + # certificates, update redhat.repo, etc. + self.module.run_command([SUBMAN_CMD, 'refresh'], + check_rc=True, expand_user_and_vars=False) + + if auto_attach: + args = [SUBMAN_CMD, 'attach', '--auto'] + self.module.run_command(args, check_rc=True, expand_user_and_vars=False) + + # There is no support for setting the release via D-Bus, so invoke + # the CLI for this. + if release: + args = [SUBMAN_CMD, 'release', '--set', release] + self.module.run_command(args, check_rc=True, expand_user_and_vars=False) + + def unsubscribe(self, serials=None): + ''' + Unsubscribe a system from subscribed channels + Args: + serials(list or None): list of serials to unsubscribe. If + serials is none or an empty list, then + all subscribed channels will be removed. + Raises: + * Exception - if error occurs while running command + ''' + items = [] + if serials is not None and serials: + items = ["--serial=%s" % s for s in serials] + if serials is None: + items = ["--all"] + + if items: + args = [SUBMAN_CMD, 'remove'] + items + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + return serials + + def unregister(self): + ''' + Unregister a currently registered system + Raises: + * Exception - if error occurs while running command + ''' + args = [SUBMAN_CMD, 'unregister'] + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + self.update_plugin_conf('rhnplugin', False) + self.update_plugin_conf('subscription-manager', False) + + def subscribe_by_pool_ids(self, pool_ids): + """ + Try to subscribe to the list of pool IDs + """ + available_pools = RhsmPools(self.module) + + available_pool_ids = [p.get_pool_id() for p in available_pools] + + for pool_id, quantity in sorted(pool_ids.items()): + if pool_id in available_pool_ids: + args = [SUBMAN_CMD, 'attach', '--pool', pool_id] + if quantity is not None: + args.extend(['--quantity', to_native(quantity)]) + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + else: + self.module.fail_json(msg='Pool ID: %s not in list of available pools' % pool_id) + return pool_ids + + def update_subscriptions_by_pool_ids(self, pool_ids): + changed = False + consumed_pools = RhsmPools(self.module, consumed=True) + + existing_pools = {} + serials_to_remove = [] + for p in consumed_pools: + pool_id = p.get_pool_id() + quantity_used = p.get_quantity_used() + existing_pools[pool_id] = quantity_used + + quantity = pool_ids.get(pool_id, 0) + if quantity is not None and quantity != quantity_used: + serials_to_remove.append(p.Serial) + + serials = self.unsubscribe(serials=serials_to_remove) + + missing_pools = {} + for pool_id, quantity in sorted(pool_ids.items()): + quantity_used = existing_pools.get(pool_id, 0) + if quantity is None and quantity_used == 0 or quantity not in (None, 0, quantity_used): + missing_pools[pool_id] = quantity + + self.subscribe_by_pool_ids(missing_pools) + + if missing_pools or serials: + changed = True + return {'changed': changed, 'subscribed_pool_ids': list(missing_pools.keys()), + 'unsubscribed_serials': serials} + + def sync_syspurpose(self): + """ + Try to synchronize syspurpose attributes with server + """ + args = [SUBMAN_CMD, 'status'] + rc, stdout, stderr = self.module.run_command(args, check_rc=False) + + +class RhsmPool(object): + ''' + Convenience class for housing subscription information + ''' + + def __init__(self, module, **kwargs): + self.module = module + for k, v in kwargs.items(): + setattr(self, k, v) + + def __str__(self): + return str(self.__getattribute__('_name')) + + def get_pool_id(self): + return getattr(self, 'PoolId', getattr(self, 'PoolID')) + + def get_quantity_used(self): + return int(getattr(self, 'QuantityUsed')) + + def subscribe(self): + args = "subscription-manager attach --pool %s" % self.get_pool_id() + rc, stdout, stderr = self.module.run_command(args, check_rc=True) + if rc == 0: + return True + else: + return False + + +class RhsmPools(object): + """ + This class is used for manipulating pools subscriptions with RHSM + """ + + def __init__(self, module, consumed=False): + self.module = module + self.products = self._load_product_list(consumed) + + def __iter__(self): + return self.products.__iter__() + + def _load_product_list(self, consumed=False): + """ + Loads list of all available or consumed pools for system in data structure + + Args: + consumed(bool): if True list consumed pools, else list available pools (default False) + """ + args = "subscription-manager list" + if consumed: + args += " --consumed" + else: + args += " --available" + lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + rc, stdout, stderr = self.module.run_command(args, check_rc=True, environ_update=lang_env) + + products = [] + for line in stdout.split('\n'): + # Remove leading+trailing whitespace + line = line.strip() + # An empty line implies the end of a output group + if len(line) == 0: + continue + # If a colon ':' is found, parse + elif ':' in line: + (key, value) = line.split(':', 1) + key = key.strip().replace(" ", "") # To unify + value = value.strip() + if key in ['ProductName', 'SubscriptionName']: + # Remember the name for later processing + products.append(RhsmPool(self.module, _name=value, key=value)) + elif products: + # Associate value with most recently recorded product + products[-1].__setattr__(key, value) + # FIXME - log some warning? + # else: + # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) + return products + + def filter_pools(self, regexp='^$'): + ''' + Return a list of RhsmPools whose pool id matches the provided regular expression + ''' + r = re.compile(regexp) + for product in self.products: + if r.search(product.get_pool_id()): + yield product + + def filter_products(self, regexp='^$'): + ''' + Return a list of RhsmPools whose product name matches the provided regular expression + ''' + r = re.compile(regexp) + for product in self.products: + if r.search(product._name): + yield product + + +class SysPurpose(object): + """ + This class is used for reading and writing to syspurpose.json file + """ + + SYSPURPOSE_FILE_PATH = "/etc/rhsm/syspurpose/syspurpose.json" + + ALLOWED_ATTRIBUTES = ['role', 'usage', 'service_level_agreement', 'addons'] + + def __init__(self, path=None): + """ + Initialize class used for reading syspurpose json file + """ + self.path = path or self.SYSPURPOSE_FILE_PATH + + def update_syspurpose(self, new_syspurpose): + """ + Try to update current syspurpose with new attributes from new_syspurpose + """ + syspurpose = {} + syspurpose_changed = False + for key, value in new_syspurpose.items(): + if key in self.ALLOWED_ATTRIBUTES: + if value is not None: + syspurpose[key] = value + elif key == 'sync': + pass + else: + raise KeyError("Attribute: %s not in list of allowed attributes: %s" % + (key, self.ALLOWED_ATTRIBUTES)) + current_syspurpose = self._read_syspurpose() + if current_syspurpose != syspurpose: + syspurpose_changed = True + # Update current syspurpose with new values + current_syspurpose.update(syspurpose) + # When some key is not listed in new syspurpose, then delete it from current syspurpose + # and ignore custom attributes created by user (e.g. "foo": "bar") + for key in list(current_syspurpose): + if key in self.ALLOWED_ATTRIBUTES and key not in syspurpose: + del current_syspurpose[key] + self._write_syspurpose(current_syspurpose) + return syspurpose_changed + + def _write_syspurpose(self, new_syspurpose): + """ + This function tries to update current new_syspurpose attributes to + json file. + """ + with open(self.path, "w") as fp: + fp.write(json.dumps(new_syspurpose, indent=2, ensure_ascii=False, sort_keys=True)) + + def _read_syspurpose(self): + """ + Read current syspurpuse from json file. + """ + current_syspurpose = {} + try: + with open(self.path, "r") as fp: + content = fp.read() + except IOError: + pass + else: + current_syspurpose = json.loads(content) + return current_syspurpose + + +def main(): + + # Note: the default values for parameters are: + # 'type': 'str', 'default': None, 'required': False + # So there is no need to repeat these values for each parameter. + module = AnsibleModule( + argument_spec={ + 'state': {'default': 'present', 'choices': ['present', 'absent']}, + 'username': {}, + 'password': {'no_log': True}, + 'token': {'no_log': True}, + 'server_hostname': {}, + 'server_insecure': {}, + 'server_prefix': {}, + 'server_port': {}, + 'rhsm_baseurl': {}, + 'rhsm_repo_ca_cert': {}, + 'auto_attach': {'type': 'bool'}, + 'activationkey': {'no_log': True}, + 'org_id': {}, + 'environment': {}, + 'pool_ids': {'default': [], 'type': 'list', 'elements': 'raw'}, + 'consumer_type': {}, + 'consumer_name': {}, + 'consumer_id': {}, + 'force_register': {'default': False, 'type': 'bool'}, + 'server_proxy_hostname': {}, + 'server_proxy_scheme': {}, + 'server_proxy_port': {}, + 'server_proxy_user': {}, + 'server_proxy_password': {'no_log': True}, + 'release': {}, + 'syspurpose': { + 'type': 'dict', + 'options': { + 'role': {}, + 'usage': {}, + 'service_level_agreement': {}, + 'addons': {'type': 'list', 'elements': 'str'}, + 'sync': {'type': 'bool', 'default': False} + } + } + }, + required_together=[['username', 'password'], + ['server_proxy_hostname', 'server_proxy_port'], + ['server_proxy_user', 'server_proxy_password']], + mutually_exclusive=[['activationkey', 'username'], + ['activationkey', 'token'], + ['token', 'username'], + ['activationkey', 'consumer_id'], + ['activationkey', 'environment'], + ['activationkey', 'auto_attach']], + required_if=[['force_register', True, ['username', 'activationkey', 'token'], True]], + ) + + if getuid() != 0: + module.fail_json( + msg="Interacting with subscription-manager requires root permissions ('become: true')" + ) + + # Load RHSM configuration from file + rhsm = Rhsm(module) + + state = module.params['state'] + username = module.params['username'] + password = module.params['password'] + token = module.params['token'] + server_hostname = module.params['server_hostname'] + server_insecure = module.params['server_insecure'] + server_prefix = module.params['server_prefix'] + server_port = module.params['server_port'] + rhsm_baseurl = module.params['rhsm_baseurl'] + rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert'] + auto_attach = module.params['auto_attach'] + activationkey = module.params['activationkey'] + org_id = module.params['org_id'] + if activationkey and not org_id: + module.fail_json(msg='org_id is required when using activationkey') + environment = module.params['environment'] + pool_ids = {} + for value in module.params['pool_ids']: + if isinstance(value, dict): + if len(value) != 1: + module.fail_json(msg='Unable to parse pool_ids option.') + pool_id, quantity = list(value.items())[0] + else: + pool_id, quantity = value, None + pool_ids[pool_id] = quantity + consumer_type = module.params["consumer_type"] + consumer_name = module.params["consumer_name"] + consumer_id = module.params["consumer_id"] + force_register = module.params["force_register"] + server_proxy_hostname = module.params['server_proxy_hostname'] + server_proxy_port = module.params['server_proxy_port'] + server_proxy_user = module.params['server_proxy_user'] + server_proxy_password = module.params['server_proxy_password'] + release = module.params['release'] + syspurpose = module.params['syspurpose'] + + global SUBMAN_CMD + SUBMAN_CMD = module.get_bin_path('subscription-manager', True) + + syspurpose_changed = False + if syspurpose is not None: + try: + syspurpose_changed = SysPurpose().update_syspurpose(syspurpose) + except Exception as err: + module.fail_json(msg="Failed to update syspurpose attributes: %s" % to_native(err)) + + # Ensure system is registered + if state == 'present': + + # Cache the status of the system before the changes + was_registered = rhsm.is_registered + + # Register system + if was_registered and not force_register: + if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True: + try: + rhsm.sync_syspurpose() + except Exception as e: + module.fail_json(msg="Failed to synchronize syspurpose attributes: %s" % to_native(e)) + if pool_ids: + try: + result = rhsm.update_subscriptions_by_pool_ids(pool_ids) + except Exception as e: + module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e))) + else: + module.exit_json(**result) + else: + if syspurpose_changed is True: + module.exit_json(changed=True, msg="Syspurpose attributes changed.") + else: + module.exit_json(changed=False, msg="System already registered.") + else: + if not username and not activationkey and not token: + module.fail_json(msg="state is present but any of the following are missing: username, activationkey, token") + try: + rhsm.enable() + rhsm.configure(**module.params) + rhsm.register(was_registered, username, password, token, auto_attach, activationkey, org_id, + consumer_type, consumer_name, consumer_id, force_register, + environment, release) + if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True: + rhsm.sync_syspurpose() + if pool_ids: + subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids) + else: + subscribed_pool_ids = [] + except Exception as e: + module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e))) + else: + module.exit_json(changed=True, + msg="System successfully registered to '%s'." % server_hostname, + subscribed_pool_ids=subscribed_pool_ids) + + # Ensure system is *not* registered + if state == 'absent': + if not rhsm.is_registered: + module.exit_json(changed=False, msg="System already unregistered.") + else: + try: + rhsm.unregister() + except Exception as e: + module.fail_json(msg="Failed to unregister: %s" % to_native(e)) + else: + module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/database/misc/redis.py b/plugins/modules/redis.py similarity index 74% rename from plugins/modules/database/misc/redis.py rename to plugins/modules/redis.py index 13a1f5060b..f442599368 100644 --- a/plugins/modules/database/misc/redis.py +++ b/plugins/modules/redis.py @@ -1,91 +1,91 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: redis short_description: Various redis commands, replica and flush description: - - Unified utility to interact with redis instances. + - Unified utility to interact with redis instances. extends_documentation_fragment: - - community.general.redis + - community.general.redis + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - command: - description: - - The selected redis command - - C(config) ensures a configuration setting on an instance. - - C(flush) flushes all the instance or a specified db. - - C(replica) sets a redis instance in replica or master mode. (C(slave) is an alias for C(replica).) - choices: [ config, flush, replica, slave ] - type: str - tls: - default: false - version_added: 4.6.0 - login_user: - version_added: 4.6.0 - validate_certs: - version_added: 4.6.0 - ca_certs: - version_added: 4.6.0 - master_host: - description: - - The host of the master instance [replica command] - type: str - master_port: - description: - - The port of the master instance [replica command] - type: int - replica_mode: - description: - - The mode of the redis instance [replica command] - - C(slave) is an alias for C(replica). - default: replica - choices: [ master, replica, slave ] - type: str - aliases: - - slave_mode - db: - description: - - The database to flush (used in db mode) [flush command] - type: int - flush_mode: - description: - - Type of flush (all the dbs in a redis instance or a specific one) - [flush command] - default: all - choices: [ all, db ] - type: str - name: - description: - - A redis config key. - type: str - value: - description: - - A redis config value. When memory size is needed, it is possible - to specify it in the usal form of 1KB, 2M, 400MB where the base is 1024. - Units are case insensitive i.e. 1m = 1mb = 1M = 1MB. - type: str + command: + description: + - The selected redis command. + - V(config) ensures a configuration setting on an instance. + - V(flush) flushes all the instance or a specified db. + - V(replica) sets a redis instance in replica or master mode. (V(slave) is an alias for V(replica)). + choices: [config, flush, replica, slave] + type: str + tls: + default: false + version_added: 4.6.0 + login_user: + version_added: 4.6.0 + validate_certs: + version_added: 4.6.0 + ca_certs: + version_added: 4.6.0 + master_host: + description: + - The host of the master instance [replica command]. + type: str + master_port: + description: + - The port of the master instance [replica command]. + type: int + replica_mode: + description: + - The mode of the redis instance [replica command]. + - V(slave) is an alias for V(replica). + default: replica + choices: [master, replica, slave] + type: str + aliases: + - slave_mode + db: + description: + - The database to flush (used in DB mode) [flush command]. + type: int + flush_mode: + description: + - Type of flush (all the DBs in a redis instance or a specific one) [flush command]. + default: all + choices: [all, db] + type: str + name: + description: + - A redis config key. + type: str + value: + description: + - A redis config value. When memory size is needed, it is possible to specify it in the usual form of 1KB, 2M, 400MB + where the base is 1024. Units are case insensitive, in other words 1m = 1mb = 1M = 1MB. + type: str notes: - - Requires the redis-py Python package on the remote host. You can - install it with pip (pip install redis) or with a package manager. - https://github.com/andymccurdy/redis-py - - If the redis master instance we are making replica of is password protected - this needs to be in the redis.conf in the masterauth variable - + - Requires the C(redis-py) Python package on the remote host. You can install it with pip (C(pip install redis)) or with + a package manager. U(https://github.com/andymccurdy/redis-py). + - If the redis master instance you are making replica of is password protected this needs to be in the C(redis.conf) in + the C(masterauth) variable. seealso: - - module: community.general.redis_info -requirements: [ redis ] + - module: community.general.redis_info +requirements: [redis] author: "Xabier Larrakoetxea (@slok)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Set local redis instance to be a replica of melee.island on port 6377 community.general.redis: command: replica @@ -125,7 +125,17 @@ EXAMPLES = ''' command: config name: lua-time-limit value: 100 -''' + +- name: Connect using TLS and certificate authentication + community.general.redis: + command: config + name: lua-time-limit + value: 100 + tls: true + ca_certs: /etc/redis/certs/ca.crt + client_cert_file: /etc/redis/certs/redis.crt + client_key_file: /etc/redis/certs/redis.key +""" import traceback @@ -138,7 +148,7 @@ except ImportError: else: redis_found = True -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.formatters import human_to_bytes from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.redis import ( diff --git a/plugins/modules/database/misc/redis_data.py b/plugins/modules/redis_data.py similarity index 74% rename from plugins/modules/database/misc/redis_data.py rename to plugins/modules/redis_data.py index 587b37d04f..dfca11c898 100644 --- a/plugins/modules/database/misc/redis_data.py +++ b/plugins/modules/redis_data.py @@ -1,71 +1,74 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Andreas Botzner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: redis_data short_description: Set key value pairs in Redis version_added: 3.7.0 description: - - Set key value pairs in Redis database. + - Set key value pairs in Redis database. author: "Andreas Botzner (@paginabianca)" +attributes: + check_mode: + support: full + diff_mode: + support: none options: - key: - description: - - Database key. - required: true - type: str - value: - description: - - Value that key should be set to. - required: false - type: str - expiration: - description: - - Expiration time in milliseconds. - Setting this flag will always result in a change in the database. - required: false - type: int - non_existing: - description: - - Only set key if it does not already exist. - required: false - type: bool - existing: - description: - - Only set key if it already exists. - required: false - type: bool - keep_ttl: - description: - - Retain the time to live associated with the key. - required: false - type: bool - state: - description: - - State of the key. - default: present - type: str - choices: - - present - - absent + key: + description: + - Database key. + required: true + type: str + value: + description: + - Value that key should be set to. + required: false + type: str + expiration: + description: + - Expiration time in milliseconds. Setting this option always results in a change in the database. + required: false + type: int + non_existing: + description: + - Only set key if it does not already exist. + required: false + type: bool + existing: + description: + - Only set key if it already exists. + required: false + type: bool + keep_ttl: + description: + - Retain the time to live associated with the key. + required: false + type: bool + state: + description: + - State of the key. + default: present + type: str + choices: + - present + - absent extends_documentation_fragment: - community.general.redis.documentation + - community.general.attributes seealso: - - module: community.general.redis_data_incr - - module: community.general.redis_data_info - - module: community.general.redis -''' + - module: community.general.redis_data_incr + - module: community.general.redis_data_info + - module: community.general.redis +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Set key foo=bar on localhost with no username community.general.redis_data: login_host: localhost @@ -109,17 +112,17 @@ EXAMPLES = ''' login_password: supersecret key: foo state: absent -''' +""" -RETURN = ''' +RETURN = r""" old_value: description: Value of key before setting. - returned: on_success if state is C(present) and key exists in database. + returned: on_success if O(state=present) and key exists in database. type: str sample: 'old_value_of_key' value: description: Value key was set to. - returned: on success if state is C(present). + returned: on success if O(state=present). type: str sample: 'new_value_of_key' msg: @@ -127,7 +130,7 @@ msg: returned: always type: str sample: 'Set key: foo to bar' -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redis import ( @@ -138,11 +141,11 @@ def main(): redis_auth_args = redis_auth_argument_spec() module_args = dict( key=dict(type='str', required=True, no_log=False), - value=dict(type='str', required=False), - expiration=dict(type='int', required=False), - non_existing=dict(type='bool', required=False), - existing=dict(type='bool', required=False), - keep_ttl=dict(type='bool', required=False), + value=dict(type='str'), + expiration=dict(type='int'), + non_existing=dict(type='bool'), + existing=dict(type='bool'), + keep_ttl=dict(type='bool'), state=dict(type='str', default='present', choices=['present', 'absent']), ) diff --git a/plugins/modules/database/misc/redis_data_incr.py b/plugins/modules/redis_data_incr.py similarity index 78% rename from plugins/modules/database/misc/redis_data_incr.py rename to plugins/modules/redis_data_incr.py index e9e03941e4..f6c1b67401 100644 --- a/plugins/modules/database/misc/redis_data_incr.py +++ b/plugins/modules/redis_data_incr.py @@ -1,24 +1,29 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Andreas Botzner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: redis_data_incr short_description: Increment keys in Redis version_added: 4.0.0 description: - - Increment integers or float keys in Redis database and get new value. - - Default increment for all keys is 1. For specific increments use the - I(increment_int) and I(increment_float) options. - - When using I(check_mode) the module will try to calculate the value that - Redis would return. If the key is not present, 0.0 is used as value. + - Increment integers or float keys in Redis database and get new value. + - Default increment for all keys is V(1). For specific increments use the O(increment_int) and O(increment_float) options. author: "Andreas Botzner (@paginabianca)" +attributes: + check_mode: + support: partial + details: + - For C(check_mode) to work, the specified O(login_user) needs permission to run the C(GET) command on the key, otherwise + the module fails. + - When using C(check_mode) the module tries to calculate the value that Redis would return. If the key is not present, + V(0.0) is used as value. + diff_mode: + support: none options: key: description: @@ -33,26 +38,22 @@ options: increment_float: description: - Float amount to increment the key by. - - This only works with keys that contain float values - in their string representation. + - This only works with keys that contain float values in their string representation. type: float required: false extends_documentation_fragment: - community.general.redis.documentation - -notes: - - For C(check_mode) to work, the specified I(redis_user) needs permission to - run the C(GET) command on the key, otherwise the module will fail. + - community.general.attributes seealso: - - module: community.general.redis_data - - module: community.general.redis_data_info - - module: community.general.redis -''' + - module: community.general.redis_data + - module: community.general.redis_data_info + - module: community.general.redis +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Increment integer key foo on localhost with no username and print new value community.general.redis_data_incr: login_host: localhost @@ -71,11 +72,11 @@ EXAMPLES = ''' login_password: somepass key: foo increment_float: '20.4' -''' +""" -RETURN = ''' +RETURN = r""" value: - description: Incremented value of key + description: Incremented value of key. returned: on success type: float sample: '4039.4' @@ -84,7 +85,7 @@ msg: returned: always type: str sample: 'Incremented key: foo by 20.4 to 65.9' -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redis import ( @@ -95,8 +96,8 @@ def main(): redis_auth_args = redis_auth_argument_spec() module_args = dict( key=dict(type='str', required=True, no_log=False), - increment_int=dict(type='int', required=False), - increment_float=dict(type='float', required=False), + increment_int=dict(type='int'), + increment_float=dict(type='float'), ) module_args.update(redis_auth_args) diff --git a/plugins/modules/database/misc/redis_data_info.py b/plugins/modules/redis_data_info.py similarity index 86% rename from plugins/modules/database/misc/redis_data_info.py rename to plugins/modules/redis_data_info.py index 7ecfd4a234..ad0ea943b4 100644 --- a/plugins/modules/database/misc/redis_data_info.py +++ b/plugins/modules/redis_data_info.py @@ -1,14 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Andreas Botzner -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: redis_data_info short_description: Get value of key in Redis database version_added: 3.7.0 @@ -24,15 +22,17 @@ options: extends_documentation_fragment: - community.general.redis + - community.general.attributes + - community.general.attributes.info_module seealso: - module: community.general.redis_data - module: community.general.redis_data_incr - module: community.general.redis_info - module: community.general.redis -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get key foo=bar from loalhost with no username community.general.redis_data_info: login_host: localhost @@ -47,9 +47,9 @@ EXAMPLES = ''' validate_certs: true ssl_ca_certs: /path/to/ca/certs key: foo -''' +""" -RETURN = ''' +RETURN = r""" exists: description: If they key exists in the database. returned: on success @@ -64,7 +64,7 @@ msg: returned: always type: str sample: 'Got key: foo with value: bar' -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redis import ( diff --git a/plugins/modules/database/misc/redis_info.py b/plugins/modules/redis_info.py similarity index 69% rename from plugins/modules/database/misc/redis_info.py rename to plugins/modules/redis_info.py index 9762b03c98..f4327a121f 100644 --- a/plugins/modules/database/misc/redis_info.py +++ b/plugins/modules/redis_info.py @@ -1,46 +1,43 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2020, Pavlo Bashynskyi (@levonet) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2020, Pavlo Bashynskyi (@levonet) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: redis_info short_description: Gather information about Redis servers version_added: '0.2.0' description: -- Gathers information and statistics about Redis servers. + - Gathers information and statistics about Redis servers. +extends_documentation_fragment: + - community.general.redis + - community.general.attributes + - community.general.attributes.info_module options: - login_host: - description: - - The host running the database. - type: str - default: localhost - login_port: - description: - - The port to connect to. - type: int - default: 6379 - login_password: - description: - - The password used to authenticate with, when authentication is enabled for the Redis server. - type: str -notes: -- Requires the redis-py Python package on the remote host. You can - install it with pip (C(pip install redis)) or with a package manager. - U(https://github.com/andymccurdy/redis-py) + login_user: + version_added: 7.5.0 + validate_certs: + version_added: 7.5.0 + tls: + default: false + version_added: 7.5.0 + ca_certs: + version_added: 7.5.0 + cluster: + default: false + description: Get informations about cluster status as RV(cluster). + type: bool + version_added: 9.1.0 seealso: -- module: community.general.redis -requirements: [ redis ] + - module: community.general.redis author: "Pavlo Bashynskyi (@levonet)" -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Get server information community.general.redis_info: register: result @@ -48,14 +45,24 @@ EXAMPLES = r''' - name: Print server information ansible.builtin.debug: var: result.info -''' -RETURN = r''' +- name: Get server cluster information + community.general.redis_info: + cluster: true + register: result + +- name: Print server cluster information + ansible.builtin.debug: + var: result.cluster_info +""" + +RETURN = r""" info: description: The default set of server information sections U(https://redis.io/commands/info). returned: success type: dict - sample: { + sample: + { "active_defrag_hits": 0, "active_defrag_key_hits": 0, "active_defrag_key_misses": 0, @@ -183,7 +190,27 @@ info: "used_memory_scripts_human": "0B", "used_memory_startup": 791264 } -''' +cluster: + description: The default set of cluster information sections U(https://redis.io/commands/cluster-info). + returned: success if O(cluster=true) + version_added: 9.1.0 + type: dict + sample: + { + "cluster_state": "ok", + "cluster_slots_assigned": 16384, + "cluster_slots_ok": 16384, + "cluster_slots_pfail": 0, + "cluster_slots_fail": 0, + "cluster_known_nodes": 6, + "cluster_size": 3, + "cluster_current_epoch": 6, + "cluster_my_epoch": 2, + "cluster_stats_messages_sent": 1483972, + "cluster_stats_messages_received": 1483968, + "total_cluster_links_buffer_limit_exceeded": 0 + } +""" import traceback @@ -195,8 +222,10 @@ except ImportError: REDIS_IMP_ERR = traceback.format_exc() HAS_REDIS_PACKAGE = False -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils.redis import ( + fail_imports, redis_auth_argument_spec, redis_auth_params) def redis_client(**client_params): @@ -205,31 +234,35 @@ def redis_client(**client_params): # Module execution. def main(): + module_args = dict( + cluster=dict(type='bool', default=False), + ) + module_args.update(redis_auth_argument_spec(tls_default=False)) module = AnsibleModule( - argument_spec=dict( - login_host=dict(type='str', default='localhost'), - login_port=dict(type='int', default=6379), - login_password=dict(type='str', no_log=True), - ), + argument_spec=module_args, supports_check_mode=True, ) - if not HAS_REDIS_PACKAGE: - module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR) + fail_imports(module, module.params['tls']) - login_host = module.params['login_host'] - login_port = module.params['login_port'] - login_password = module.params['login_password'] + redis_params = redis_auth_params(module) + cluster = module.params['cluster'] # Connect and check - client = redis_client(host=login_host, port=login_port, password=login_password) + client = redis_client(**redis_params) try: client.ping() except Exception as e: module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) info = client.info() - module.exit_json(changed=False, info=info) + + result = dict(changed=False, info=info) + + if cluster: + result['cluster_info'] = client.execute_command('CLUSTER INFO') + + module.exit_json(**result) if __name__ == '__main__': diff --git a/plugins/modules/remote_management/manageiq/manageiq_policies.py b/plugins/modules/remote_management/manageiq/manageiq_policies.py deleted file mode 100644 index 567833d7cc..0000000000 --- a/plugins/modules/remote_management/manageiq/manageiq_policies.py +++ /dev/null @@ -1,356 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Daniel Korn -# (c) 2017, Yaacov Zamir -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: manageiq_policies - -short_description: Management of resource policy_profiles in ManageIQ. -extends_documentation_fragment: -- community.general.manageiq - -author: Daniel Korn (@dkorn) -description: - - The manageiq_policies module supports adding and deleting policy_profiles in ManageIQ. - -options: - state: - type: str - description: - - absent - policy_profiles should not exist, - - present - policy_profiles should exist, - - list - list current policy_profiles and policies. - choices: ['absent', 'present', 'list'] - default: 'present' - policy_profiles: - type: list - elements: dict - description: - - list of dictionaries, each includes the policy_profile 'name' key. - - required if state is present or absent. - resource_type: - type: str - description: - - The type of the resource to which the profile should be [un]assigned. - required: true - choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', - 'data store', 'group', 'resource pool', 'service', 'service template', - 'template', 'tenant', 'user'] - resource_name: - type: str - description: - - The name of the resource to which the profile should be [un]assigned. - - Must be specified if I(resource_id) is not set. Both options are mutually exclusive. - resource_id: - type: int - description: - - The ID of the resource to which the profile should be [un]assigned. - - Must be specified if I(resource_name) is not set. Both options are mutually exclusive. - version_added: 2.2.0 -''' - -EXAMPLES = ''' -- name: Assign new policy_profile for a provider in ManageIQ - community.general.manageiq_policies: - resource_name: 'EngLab' - resource_type: 'provider' - policy_profiles: - - name: openscap profile - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: Unassign a policy_profile for a provider in ManageIQ - community.general.manageiq_policies: - state: absent - resource_name: 'EngLab' - resource_type: 'provider' - policy_profiles: - - name: openscap profile - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: List current policy_profile and policies for a provider in ManageIQ - community.general.manageiq_policies: - state: list - resource_name: 'EngLab' - resource_type: 'provider' - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False -''' - -RETURN = ''' -manageiq_policies: - description: - - List current policy_profile and policies for a provider in ManageIQ - returned: always - type: dict - sample: '{ - "changed": false, - "profiles": [ - { - "policies": [ - { - "active": true, - "description": "OpenSCAP", - "name": "openscap policy" - }, - { - "active": true, - "description": "Analyse incoming container images", - "name": "analyse incoming container images" - }, - { - "active": true, - "description": "Schedule compliance after smart state analysis", - "name": "schedule compliance after smart state analysis" - } - ], - "profile_description": "OpenSCAP profile", - "profile_name": "openscap profile" - } - ] - }' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities - - -class ManageIQPolicies(object): - """ - Object to execute policies management operations of manageiq resources. - """ - - def __init__(self, manageiq, resource_type, resource_id): - self.manageiq = manageiq - - self.module = self.manageiq.module - self.api_url = self.manageiq.api_url - self.client = self.manageiq.client - - self.resource_type = resource_type - self.resource_id = resource_id - self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format( - api_url=self.api_url, - resource_type=resource_type, - resource_id=resource_id) - - def query_profile_href(self, profile): - """ Add or Update the policy_profile href field - - Example: - {name: STR, ...} => {name: STR, href: STR} - """ - resource = self.manageiq.find_collection_resource_or_fail( - "policy_profiles", **profile) - return dict(name=profile['name'], href=resource['href']) - - def query_resource_profiles(self): - """ Returns a set of the profile objects objects assigned to the resource - """ - url = '{resource_url}/policy_profiles?expand=resources' - try: - response = self.client.get(url.format(resource_url=self.resource_url)) - except Exception as e: - msg = "Failed to query {resource_type} policies: {error}".format( - resource_type=self.resource_type, - error=e) - self.module.fail_json(msg=msg) - - resources = response.get('resources', []) - - # clean the returned rest api profile object to look like: - # {profile_name: STR, profile_description: STR, policies: ARR} - profiles = [self.clean_profile_object(profile) for profile in resources] - - return profiles - - def query_profile_policies(self, profile_id): - """ Returns a set of the policy objects assigned to the resource - """ - url = '{api_url}/policy_profiles/{profile_id}?expand=policies' - try: - response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id)) - except Exception as e: - msg = "Failed to query {resource_type} policies: {error}".format( - resource_type=self.resource_type, - error=e) - self.module.fail_json(msg=msg) - - resources = response.get('policies', []) - - # clean the returned rest api policy object to look like: - # {name: STR, description: STR, active: BOOL} - policies = [self.clean_policy_object(policy) for policy in resources] - - return policies - - def clean_policy_object(self, policy): - """ Clean a policy object to have human readable form of: - { - name: STR, - description: STR, - active: BOOL - } - """ - name = policy.get('name') - description = policy.get('description') - active = policy.get('active') - - return dict( - name=name, - description=description, - active=active) - - def clean_profile_object(self, profile): - """ Clean a profile object to have human readable form of: - { - profile_name: STR, - profile_description: STR, - policies: ARR - } - """ - profile_id = profile['id'] - name = profile.get('name') - description = profile.get('description') - policies = self.query_profile_policies(profile_id) - - return dict( - profile_name=name, - profile_description=description, - policies=policies) - - def profiles_to_update(self, profiles, action): - """ Create a list of policies we need to update in ManageIQ. - - Returns: - Whether or not a change took place and a message describing the - operation executed. - """ - profiles_to_post = [] - assigned_profiles = self.query_resource_profiles() - - # make a list of assigned full profile names strings - # e.g. ['openscap profile', ...] - assigned_profiles_set = set([profile['profile_name'] for profile in assigned_profiles]) - - for profile in profiles: - assigned = profile.get('name') in assigned_profiles_set - - if (action == 'unassign' and assigned) or (action == 'assign' and not assigned): - # add/update the policy profile href field - # {name: STR, ...} => {name: STR, href: STR} - profile = self.query_profile_href(profile) - profiles_to_post.append(profile) - - return profiles_to_post - - def assign_or_unassign_profiles(self, profiles, action): - """ Perform assign/unassign action - """ - # get a list of profiles needed to be changed - profiles_to_post = self.profiles_to_update(profiles, action) - if not profiles_to_post: - return dict( - changed=False, - msg="Profiles {profiles} already {action}ed, nothing to do".format( - action=action, - profiles=profiles)) - - # try to assign or unassign profiles to resource - url = '{resource_url}/policy_profiles'.format(resource_url=self.resource_url) - try: - response = self.client.post(url, action=action, resources=profiles_to_post) - except Exception as e: - msg = "Failed to {action} profile: {error}".format( - action=action, - error=e) - self.module.fail_json(msg=msg) - - # check all entities in result to be successful - for result in response['results']: - if not result['success']: - msg = "Failed to {action}: {message}".format( - action=action, - message=result['message']) - self.module.fail_json(msg=msg) - - # successfully changed all needed profiles - return dict( - changed=True, - msg="Successfully {action}ed profiles: {profiles}".format( - action=action, - profiles=profiles)) - - -def main(): - actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'} - argument_spec = dict( - policy_profiles=dict(type='list', elements='dict'), - resource_id=dict(required=False, type='int'), - resource_name=dict(required=False, type='str'), - resource_type=dict(required=True, type='str', - choices=list(manageiq_entities().keys())), - state=dict(required=False, type='str', - choices=['present', 'absent', 'list'], default='present'), - ) - # add the manageiq connection arguments to the arguments - argument_spec.update(manageiq_argument_spec()) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[["resource_id", "resource_name"]], - required_one_of=[["resource_id", "resource_name"]], - required_if=[ - ('state', 'present', ['policy_profiles']), - ('state', 'absent', ['policy_profiles']) - ], - ) - - policy_profiles = module.params['policy_profiles'] - resource_id = module.params['resource_id'] - resource_type_key = module.params['resource_type'] - resource_name = module.params['resource_name'] - state = module.params['state'] - - # get the action and resource type - action = actions[state] - resource_type = manageiq_entities()[resource_type_key] - - manageiq = ManageIQ(module) - - # query resource id, fail if resource does not exist - if resource_id is None: - resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id'] - - manageiq_policies = ManageIQPolicies(manageiq, resource_type, resource_id) - - if action == 'list': - # return a list of current profiles for this object - current_profiles = manageiq_policies.query_resource_profiles() - res_args = dict(changed=False, profiles=current_profiles) - else: - # assign or unassign the profiles - res_args = manageiq_policies.assign_or_unassign_profiles(policy_profiles, action) - - module.exit_json(**res_args) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/remote_management/manageiq/manageiq_tags.py b/plugins/modules/remote_management/manageiq/manageiq_tags.py deleted file mode 100644 index 83ab60ac93..0000000000 --- a/plugins/modules/remote_management/manageiq/manageiq_tags.py +++ /dev/null @@ -1,316 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Daniel Korn -# (c) 2017, Yaacov Zamir -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' - -module: manageiq_tags - -short_description: Management of resource tags in ManageIQ. -extends_documentation_fragment: -- community.general.manageiq - -author: Daniel Korn (@dkorn) -description: - - The manageiq_tags module supports adding, updating and deleting tags in ManageIQ. - -options: - state: - type: str - description: - - absent - tags should not exist, - - present - tags should exist, - - list - list current tags. - choices: ['absent', 'present', 'list'] - default: 'present' - tags: - type: list - elements: dict - description: - - tags - list of dictionaries, each includes 'name' and 'category' keys. - - required if state is present or absent. - resource_type: - type: str - description: - - The relevant resource type in manageiq. - required: true - choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', - 'data store', 'group', 'resource pool', 'service', 'service template', - 'template', 'tenant', 'user'] - resource_name: - type: str - description: - - The name of the resource at which tags will be controlled. - - Must be specified if I(resource_id) is not set. Both options are mutually exclusive. - resource_id: - description: - - The ID of the resource at which tags will be controlled. - - Must be specified if I(resource_name) is not set. Both options are mutually exclusive. - type: int - version_added: 2.2.0 -''' - -EXAMPLES = ''' -- name: Create new tags for a provider in ManageIQ - community.general.manageiq_tags: - resource_name: 'EngLab' - resource_type: 'provider' - tags: - - category: environment - name: prod - - category: owner - name: prod_ops - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: Create new tags for a provider in ManageIQ - community.general.manageiq_tags: - resource_id: 23000000790497 - resource_type: 'provider' - tags: - - category: environment - name: prod - - category: owner - name: prod_ops - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: Remove tags for a provider in ManageIQ - community.general.manageiq_tags: - state: absent - resource_name: 'EngLab' - resource_type: 'provider' - tags: - - category: environment - name: prod - - category: owner - name: prod_ops - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False - -- name: List current tags for a provider in ManageIQ - community.general.manageiq_tags: - state: list - resource_name: 'EngLab' - resource_type: 'provider' - manageiq_connection: - url: 'http://127.0.0.1:3000' - username: 'admin' - password: 'smartvm' - validate_certs: False -''' - -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities - - -def query_resource_id(manageiq, resource_type, resource_name): - """ Query the resource name in ManageIQ. - - Returns: - the resource id if it exists in manageiq, Fail otherwise. - """ - resource = manageiq.find_collection_resource_by(resource_type, name=resource_name) - if resource: - return resource["id"] - else: - msg = "{resource_name} {resource_type} does not exist in manageiq".format( - resource_name=resource_name, resource_type=resource_type) - manageiq.module.fail_json(msg=msg) - - -class ManageIQTags(object): - """ - Object to execute tags management operations of manageiq resources. - """ - - def __init__(self, manageiq, resource_type, resource_id): - self.manageiq = manageiq - - self.module = self.manageiq.module - self.api_url = self.manageiq.api_url - self.client = self.manageiq.client - - self.resource_type = resource_type - self.resource_id = resource_id - self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format( - api_url=self.api_url, - resource_type=resource_type, - resource_id=resource_id) - - def full_tag_name(self, tag): - """ Returns the full tag name in manageiq - """ - return '/managed/{tag_category}/{tag_name}'.format( - tag_category=tag['category'], - tag_name=tag['name']) - - def clean_tag_object(self, tag): - """ Clean a tag object to have human readable form of: - { - full_name: STR, - name: STR, - display_name: STR, - category: STR - } - """ - full_name = tag.get('name') - categorization = tag.get('categorization', {}) - - return dict( - full_name=full_name, - name=categorization.get('name'), - display_name=categorization.get('display_name'), - category=categorization.get('category', {}).get('name')) - - def query_resource_tags(self): - """ Returns a set of the tag objects assigned to the resource - """ - url = '{resource_url}/tags?expand=resources&attributes=categorization' - try: - response = self.client.get(url.format(resource_url=self.resource_url)) - except Exception as e: - msg = "Failed to query {resource_type} tags: {error}".format( - resource_type=self.resource_type, - error=e) - self.module.fail_json(msg=msg) - - resources = response.get('resources', []) - - # clean the returned rest api tag object to look like: - # {full_name: STR, name: STR, display_name: STR, category: STR} - tags = [self.clean_tag_object(tag) for tag in resources] - - return tags - - def tags_to_update(self, tags, action): - """ Create a list of tags we need to update in ManageIQ. - - Returns: - Whether or not a change took place and a message describing the - operation executed. - """ - tags_to_post = [] - assigned_tags = self.query_resource_tags() - - # make a list of assigned full tag names strings - # e.g. ['/managed/environment/prod', ...] - assigned_tags_set = set([tag['full_name'] for tag in assigned_tags]) - - for tag in tags: - assigned = self.full_tag_name(tag) in assigned_tags_set - - if assigned and action == 'unassign': - tags_to_post.append(tag) - elif (not assigned) and action == 'assign': - tags_to_post.append(tag) - - return tags_to_post - - def assign_or_unassign_tags(self, tags, action): - """ Perform assign/unassign action - """ - # get a list of tags needed to be changed - tags_to_post = self.tags_to_update(tags, action) - if not tags_to_post: - return dict( - changed=False, - msg="Tags already {action}ed, nothing to do".format(action=action)) - - # try to assign or unassign tags to resource - url = '{resource_url}/tags'.format(resource_url=self.resource_url) - try: - response = self.client.post(url, action=action, resources=tags) - except Exception as e: - msg = "Failed to {action} tag: {error}".format( - action=action, - error=e) - self.module.fail_json(msg=msg) - - # check all entities in result to be successful - for result in response['results']: - if not result['success']: - msg = "Failed to {action}: {message}".format( - action=action, - message=result['message']) - self.module.fail_json(msg=msg) - - # successfully changed all needed tags - return dict( - changed=True, - msg="Successfully {action}ed tags".format(action=action)) - - -def main(): - actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'} - argument_spec = dict( - tags=dict(type='list', elements='dict'), - resource_id=dict(required=False, type='int'), - resource_name=dict(required=False, type='str'), - resource_type=dict(required=True, type='str', - choices=list(manageiq_entities().keys())), - state=dict(required=False, type='str', - choices=['present', 'absent', 'list'], default='present'), - ) - # add the manageiq connection arguments to the arguments - argument_spec.update(manageiq_argument_spec()) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[["resource_id", "resource_name"]], - required_one_of=[["resource_id", "resource_name"]], - required_if=[ - ('state', 'present', ['tags']), - ('state', 'absent', ['tags']) - ], - ) - - tags = module.params['tags'] - resource_id = module.params['resource_id'] - resource_type_key = module.params['resource_type'] - resource_name = module.params['resource_name'] - state = module.params['state'] - - # get the action and resource type - action = actions[state] - resource_type = manageiq_entities()[resource_type_key] - - manageiq = ManageIQ(module) - - # query resource id, fail if resource does not exist - if resource_id is None: - resource_id = query_resource_id(manageiq, resource_type, resource_name) - - manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id) - - if action == 'list': - # return a list of current tags for this object - current_tags = manageiq_tags.query_resource_tags() - res_args = dict(changed=False, tags=current_tags) - else: - # assign or unassign the tags - res_args = manageiq_tags.assign_or_unassign_tags(tags, action) - - module.exit_json(**res_args) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/remote_management/redfish/redfish_command.py b/plugins/modules/remote_management/redfish/redfish_command.py deleted file mode 100644 index 66609f97fb..0000000000 --- a/plugins/modules/remote_management/redfish/redfish_command.py +++ /dev/null @@ -1,842 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017-2018 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redfish_command -short_description: Manages Out-Of-Band controllers using Redfish APIs -description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - perform an action. - - Manages OOB controller ex. reboot, log management. - - Manages OOB controller users ex. add, remove, update. - - Manages system power ex. on, off, graceful and forced reboot. -options: - category: - required: true - description: - - Category to execute on OOB controller. - type: str - command: - required: true - description: - - List of commands to execute on OOB controller. - type: list - elements: str - baseuri: - required: true - description: - - Base URI of OOB controller. - type: str - username: - description: - - Username for authenticating to OOB controller. - type: str - password: - description: - - Password for authenticating to OOB controller. - type: str - auth_token: - description: - - Security token for authenticating to OOB controller. - type: str - version_added: 2.3.0 - session_uri: - description: - - URI of the session resource. - type: str - version_added: 2.3.0 - id: - required: false - aliases: [ account_id ] - description: - - ID of account to delete/modify. - - Can also be used in account creation to work around vendor issues where the ID of the new user is required in the POST request. - type: str - new_username: - required: false - aliases: [ account_username ] - description: - - Username of account to add/delete/modify. - type: str - new_password: - required: false - aliases: [ account_password ] - description: - - New password of account to add/modify. - type: str - roleid: - required: false - aliases: [ account_roleid ] - description: - - Role of account to add/modify. - type: str - bootdevice: - required: false - description: - - Boot device when setting boot configuration. - type: str - timeout: - description: - - Timeout in seconds for HTTP requests to OOB controller. - default: 10 - type: int - boot_override_mode: - description: - - Boot mode when using an override. - type: str - choices: [ Legacy, UEFI ] - version_added: 3.5.0 - uefi_target: - required: false - description: - - UEFI boot target when bootdevice is "UefiTarget". - type: str - boot_next: - required: false - description: - - BootNext target when bootdevice is "UefiBootNext". - type: str - update_username: - required: false - aliases: [ account_updatename ] - description: - - New user name for updating account_username. - type: str - version_added: '0.2.0' - account_properties: - required: false - description: - - Properties of account service to update. - type: dict - version_added: '0.2.0' - resource_id: - required: false - description: - - ID of the System, Manager or Chassis to modify. - type: str - version_added: '0.2.0' - update_image_uri: - required: false - description: - - URI of the image for the update. - type: str - version_added: '0.2.0' - update_protocol: - required: false - description: - - Protocol for the update. - type: str - version_added: '0.2.0' - update_targets: - required: false - description: - - List of target resource URIs to apply the update to. - type: list - elements: str - version_added: '0.2.0' - update_creds: - required: false - description: - - Credentials for retrieving the update image. - type: dict - version_added: '0.2.0' - suboptions: - username: - required: false - description: - - Username for retrieving the update image. - type: str - password: - required: false - description: - - Password for retrieving the update image. - type: str - virtual_media: - required: false - description: - - Options for VirtualMedia commands. - type: dict - version_added: '0.2.0' - suboptions: - media_types: - required: false - description: - - List of media types appropriate for the image. - type: list - elements: str - image_url: - required: false - description: - - URL of the image to insert or eject. - type: str - inserted: - required: false - description: - - Indicates that the image is treated as inserted on command completion. - type: bool - default: True - write_protected: - required: false - description: - - Indicates that the media is treated as write-protected. - type: bool - default: True - username: - required: false - description: - - Username for accessing the image URL. - type: str - password: - required: false - description: - - Password for accessing the image URL. - type: str - transfer_protocol_type: - required: false - description: - - Network protocol to use with the image. - type: str - transfer_method: - required: false - description: - - Transfer method to use with the image. - type: str - strip_etag_quotes: - description: - - Removes surrounding quotes of etag used in C(If-Match) header - of C(PATCH) requests. - - Only use this option to resolve bad vendor implementation where - C(If-Match) only matches the unquoted etag string. - type: bool - default: false - version_added: 3.7.0 - -author: "Jose Delarosa (@jose-delarosa)" -''' - -EXAMPLES = ''' - - name: Restart system power gracefully - community.general.redfish_command: - category: Systems - command: PowerGracefulRestart - resource_id: 437XR1138R2 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Turn system power off - community.general.redfish_command: - category: Systems - command: PowerForceOff - resource_id: 437XR1138R2 - - - name: Restart system power forcefully - community.general.redfish_command: - category: Systems - command: PowerForceRestart - resource_id: 437XR1138R2 - - - name: Shutdown system power gracefully - community.general.redfish_command: - category: Systems - command: PowerGracefulShutdown - resource_id: 437XR1138R2 - - - name: Turn system power on - community.general.redfish_command: - category: Systems - command: PowerOn - resource_id: 437XR1138R2 - - - name: Reboot system power - community.general.redfish_command: - category: Systems - command: PowerReboot - resource_id: 437XR1138R2 - - - name: Set one-time boot device to {{ bootdevice }} - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - resource_id: 437XR1138R2 - bootdevice: "{{ bootdevice }}" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set one-time boot device to UefiTarget of "/0x31/0x33/0x01/0x01" - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - resource_id: 437XR1138R2 - bootdevice: "UefiTarget" - uefi_target: "/0x31/0x33/0x01/0x01" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set one-time boot device to BootNext target of "Boot0001" - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - resource_id: 437XR1138R2 - bootdevice: "UefiBootNext" - boot_next: "Boot0001" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set persistent boot device override - community.general.redfish_command: - category: Systems - command: EnableContinuousBootOverride - resource_id: 437XR1138R2 - bootdevice: "{{ bootdevice }}" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set one-time boot to BiosSetup - community.general.redfish_command: - category: Systems - command: SetOneTimeBoot - boot_next: BiosSetup - boot_override_mode: Legacy - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Disable persistent boot device override - community.general.redfish_command: - category: Systems - command: DisableBootOverride - - - name: Set system indicator LED to blink using security token for auth - community.general.redfish_command: - category: Systems - command: IndicatorLedBlink - resource_id: 437XR1138R2 - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" - - - name: Add user - community.general.redfish_command: - category: Accounts - command: AddUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - new_username: "{{ new_username }}" - new_password: "{{ new_password }}" - roleid: "{{ roleid }}" - - - name: Add user using new option aliases - community.general.redfish_command: - category: Accounts - command: AddUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - account_password: "{{ account_password }}" - account_roleid: "{{ account_roleid }}" - - - name: Delete user - community.general.redfish_command: - category: Accounts - command: DeleteUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - - - name: Disable user - community.general.redfish_command: - category: Accounts - command: DisableUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - - - name: Enable user - community.general.redfish_command: - category: Accounts - command: EnableUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - - - name: Add and enable user - community.general.redfish_command: - category: Accounts - command: AddUser,EnableUser - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - new_username: "{{ new_username }}" - new_password: "{{ new_password }}" - roleid: "{{ roleid }}" - - - name: Update user password - community.general.redfish_command: - category: Accounts - command: UpdateUserPassword - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - account_password: "{{ account_password }}" - - - name: Update user role - community.general.redfish_command: - category: Accounts - command: UpdateUserRole - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - roleid: "{{ roleid }}" - - - name: Update user name - community.general.redfish_command: - category: Accounts - command: UpdateUserName - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - account_updatename: "{{ account_updatename }}" - - - name: Update user name - community.general.redfish_command: - category: Accounts - command: UpdateUserName - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_username: "{{ account_username }}" - update_username: "{{ update_username }}" - - - name: Update AccountService properties - community.general.redfish_command: - category: Accounts - command: UpdateAccountServiceProperties - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - account_properties: - AccountLockoutThreshold: 5 - AccountLockoutDuration: 600 - - - name: Clear Manager Logs with a timeout of 20 seconds - community.general.redfish_command: - category: Manager - command: ClearLogs - resource_id: BMC - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 - - - name: Create session - community.general.redfish_command: - category: Sessions - command: CreateSession - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Set chassis indicator LED to blink using security token for auth - community.general.redfish_command: - category: Chassis - command: IndicatorLedBlink - resource_id: 1U - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" - - - name: Delete session using security token created by CreateSesssion above - community.general.redfish_command: - category: Sessions - command: DeleteSession - baseuri: "{{ baseuri }}" - auth_token: "{{ result.session.token }}" - session_uri: "{{ result.session.uri }}" - - - name: Clear Sessions - community.general.redfish_command: - category: Sessions - command: ClearSessions - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Simple update - community.general.redfish_command: - category: Update - command: SimpleUpdate - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - update_image_uri: https://example.com/myupdate.img - - - name: Simple update with additional options - community.general.redfish_command: - category: Update - command: SimpleUpdate - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - update_image_uri: //example.com/myupdate.img - update_protocol: FTP - update_targets: - - /redfish/v1/UpdateService/FirmwareInventory/BMC - update_creds: - username: operator - password: supersecretpwd - - - name: Insert Virtual Media - community.general.redfish_command: - category: Manager - command: VirtualMediaInsert - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: 'http://example.com/images/SomeLinux-current.iso' - media_types: - - CD - - DVD - resource_id: BMC - - - name: Eject Virtual Media - community.general.redfish_command: - category: Manager - command: VirtualMediaEject - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - virtual_media: - image_url: 'http://example.com/images/SomeLinux-current.iso' - resource_id: BMC - - - name: Restart manager power gracefully - community.general.redfish_command: - category: Manager - command: GracefulRestart - resource_id: BMC - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Restart manager power gracefully - community.general.redfish_command: - category: Manager - command: PowerGracefulRestart - resource_id: BMC - - - name: Turn manager power off - community.general.redfish_command: - category: Manager - command: PowerForceOff - resource_id: BMC - - - name: Restart manager power forcefully - community.general.redfish_command: - category: Manager - command: PowerForceRestart - resource_id: BMC - - - name: Shutdown manager power gracefully - community.general.redfish_command: - category: Manager - command: PowerGracefulShutdown - resource_id: BMC - - - name: Turn manager power on - community.general.redfish_command: - category: Manager - command: PowerOn - resource_id: BMC - - - name: Reboot manager power - community.general.redfish_command: - category: Manager - command: PowerReboot - resource_id: BMC -''' - -RETURN = ''' -msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils.common.text.converters import to_native - - -# More will be added as module features are expanded -CATEGORY_COMMANDS_ALL = { - "Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart", - "PowerGracefulShutdown", "PowerReboot", "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride", - "IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"], - "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"], - "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser", - "UpdateUserRole", "UpdateUserPassword", "UpdateUserName", - "UpdateAccountServiceProperties"], - "Sessions": ["ClearSessions", "CreateSession", "DeleteSession"], - "Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert", - "VirtualMediaEject", "PowerOn", "PowerForceOff", "PowerForceRestart", - "PowerGracefulRestart", "PowerGracefulShutdown", "PowerReboot"], - "Update": ["SimpleUpdate"] -} - - -def main(): - result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - session_uri=dict(), - id=dict(aliases=["account_id"]), - new_username=dict(aliases=["account_username"]), - new_password=dict(aliases=["account_password"], no_log=True), - roleid=dict(aliases=["account_roleid"]), - update_username=dict(type='str', aliases=["account_updatename"]), - account_properties=dict(type='dict', default={}), - bootdevice=dict(), - timeout=dict(type='int', default=10), - uefi_target=dict(), - boot_next=dict(), - boot_override_mode=dict(choices=['Legacy', 'UEFI']), - resource_id=dict(), - update_image_uri=dict(), - update_protocol=dict(), - update_targets=dict(type='list', elements='str', default=[]), - update_creds=dict( - type='dict', - options=dict( - username=dict(), - password=dict(no_log=True) - ) - ), - virtual_media=dict( - type='dict', - options=dict( - media_types=dict(type='list', elements='str', default=[]), - image_url=dict(), - inserted=dict(type='bool', default=True), - write_protected=dict(type='bool', default=True), - username=dict(), - password=dict(no_log=True), - transfer_protocol_type=dict(), - transfer_method=dict(), - ) - ), - strip_etag_quotes=dict(type='bool', default=False), - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=False - ) - - category = module.params['category'] - command_list = module.params['command'] - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # user to add/modify/delete - user = {'account_id': module.params['id'], - 'account_username': module.params['new_username'], - 'account_password': module.params['new_password'], - 'account_roleid': module.params['roleid'], - 'account_updatename': module.params['update_username'], - 'account_properties': module.params['account_properties']} - - # timeout - timeout = module.params['timeout'] - - # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] - - # update options - update_opts = { - 'update_image_uri': module.params['update_image_uri'], - 'update_protocol': module.params['update_protocol'], - 'update_targets': module.params['update_targets'], - 'update_creds': module.params['update_creds'] - } - - # Boot override options - boot_opts = { - 'bootdevice': module.params['bootdevice'], - 'uefi_target': module.params['uefi_target'], - 'boot_next': module.params['boot_next'], - 'boot_override_mode': module.params['boot_override_mode'], - } - - # VirtualMedia options - virtual_media = module.params['virtual_media'] - - # Etag options - strip_etag_quotes = module.params['strip_etag_quotes'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) - - # Check that Category is valid - if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) - - # Check that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) - - # Organize by Categories / Commands - if category == "Accounts": - ACCOUNTS_COMMANDS = { - "AddUser": rf_utils.add_user, - "EnableUser": rf_utils.enable_user, - "DeleteUser": rf_utils.delete_user, - "DisableUser": rf_utils.disable_user, - "UpdateUserRole": rf_utils.update_user_role, - "UpdateUserPassword": rf_utils.update_user_password, - "UpdateUserName": rf_utils.update_user_name, - "UpdateAccountServiceProperties": rf_utils.update_accountservice_properties - } - - # execute only if we find an Account service resource - result = rf_utils._find_accountservice_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - result = ACCOUNTS_COMMANDS[command](user) - - elif category == "Systems": - # execute only if we find a System resource - result = rf_utils._find_systems_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command.startswith('Power'): - result = rf_utils.manage_system_power(command) - elif command == "SetOneTimeBoot": - boot_opts['override_enabled'] = 'Once' - result = rf_utils.set_boot_override(boot_opts) - elif command == "EnableContinuousBootOverride": - boot_opts['override_enabled'] = 'Continuous' - result = rf_utils.set_boot_override(boot_opts) - elif command == "DisableBootOverride": - boot_opts['override_enabled'] = 'Disabled' - result = rf_utils.set_boot_override(boot_opts) - elif command.startswith('IndicatorLed'): - result = rf_utils.manage_system_indicator_led(command) - - elif category == "Chassis": - result = rf_utils._find_chassis_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - led_commands = ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"] - - # Check if more than one led_command is present - num_led_commands = sum([command in led_commands for command in command_list]) - if num_led_commands > 1: - result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."} - else: - for command in command_list: - if command in led_commands: - result = rf_utils.manage_chassis_indicator_led(command) - - elif category == "Sessions": - # execute only if we find SessionService resources - resource = rf_utils._find_sessionservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "ClearSessions": - result = rf_utils.clear_sessions() - elif command == "CreateSession": - result = rf_utils.create_session() - elif command == "DeleteSession": - result = rf_utils.delete_session(module.params['session_uri']) - - elif category == "Manager": - # execute only if we find a Manager service resource - result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - # standardize on the Power* commands, but allow the the legacy - # GracefulRestart command - if command == 'GracefulRestart': - command = 'PowerGracefulRestart' - - if command.startswith('Power'): - result = rf_utils.manage_manager_power(command) - elif command == 'ClearLogs': - result = rf_utils.clear_logs() - elif command == 'VirtualMediaInsert': - result = rf_utils.virtual_media_insert(virtual_media) - elif command == 'VirtualMediaEject': - result = rf_utils.virtual_media_eject(virtual_media) - - elif category == "Update": - # execute only if we find UpdateService resources - resource = rf_utils._find_updateservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "SimpleUpdate": - result = rf_utils.simple_update(update_opts) - - # Return data back or fail with proper message - if result['ret'] is True: - del result['ret'] - changed = result.get('changed', True) - session = result.get('session', dict()) - module.exit_json(changed=changed, session=session, - msg='Action was successful') - else: - module.fail_json(msg=to_native(result['msg'])) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/redfish/redfish_config.py b/plugins/modules/remote_management/redfish/redfish_config.py deleted file mode 100644 index 39df23ab71..0000000000 --- a/plugins/modules/remote_management/redfish/redfish_config.py +++ /dev/null @@ -1,389 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017-2018 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redfish_config -short_description: Manages Out-Of-Band controllers using Redfish APIs -description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - set or update a configuration attribute. - - Manages BIOS configuration settings. - - Manages OOB controller configuration settings. -options: - category: - required: true - description: - - Category to execute on OOB controller. - type: str - command: - required: true - description: - - List of commands to execute on OOB controller. - type: list - elements: str - baseuri: - required: true - description: - - Base URI of OOB controller. - type: str - username: - description: - - Username for authenticating to OOB controller. - type: str - password: - description: - - Password for authenticating to OOB controller. - type: str - auth_token: - description: - - Security token for authenticating to OOB controller. - type: str - version_added: 2.3.0 - bios_attributes: - required: false - description: - - Dictionary of BIOS attributes to update. - default: {} - type: dict - version_added: '0.2.0' - timeout: - description: - - Timeout in seconds for HTTP requests to OOB controller. - default: 10 - type: int - boot_order: - required: false - description: - - List of BootOptionReference strings specifying the BootOrder. - default: [] - type: list - elements: str - version_added: '0.2.0' - network_protocols: - required: false - description: - - Setting dict of manager services to update. - type: dict - version_added: '0.2.0' - resource_id: - required: false - description: - - ID of the System, Manager or Chassis to modify. - type: str - version_added: '0.2.0' - nic_addr: - required: false - description: - - EthernetInterface Address string on OOB controller. - default: 'null' - type: str - version_added: '0.2.0' - nic_config: - required: false - description: - - Setting dict of EthernetInterface on OOB controller. - type: dict - version_added: '0.2.0' - strip_etag_quotes: - description: - - Removes surrounding quotes of etag used in C(If-Match) header - of C(PATCH) requests. - - Only use this option to resolve bad vendor implementation where - C(If-Match) only matches the unquoted etag string. - type: bool - default: false - version_added: 3.7.0 - hostinterface_config: - required: false - description: - - Setting dict of HostInterface on OOB controller. - type: dict - version_added: '4.1.0' - hostinterface_id: - required: false - description: - - Redfish HostInterface instance ID if multiple HostInterfaces are present. - type: str - version_added: '4.1.0' - -author: "Jose Delarosa (@jose-delarosa)" -''' - -EXAMPLES = ''' - - name: Set BootMode to UEFI - community.general.redfish_config: - category: Systems - command: SetBiosAttributes - resource_id: 437XR1138R2 - bios_attributes: - BootMode: "Uefi" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set multiple BootMode attributes - community.general.redfish_config: - category: Systems - command: SetBiosAttributes - resource_id: 437XR1138R2 - bios_attributes: - BootMode: "Bios" - OneTimeBootMode: "Enabled" - BootSeqRetry: "Enabled" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Enable PXE Boot for NIC1 - community.general.redfish_config: - category: Systems - command: SetBiosAttributes - resource_id: 437XR1138R2 - bios_attributes: - PxeDev1EnDis: Enabled - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set BIOS default settings with a timeout of 20 seconds - community.general.redfish_config: - category: Systems - command: SetBiosDefaultSettings - resource_id: 437XR1138R2 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 - - - name: Set boot order - community.general.redfish_config: - category: Systems - command: SetBootOrder - boot_order: - - Boot0002 - - Boot0001 - - Boot0000 - - Boot0003 - - Boot0004 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set boot order to the default - community.general.redfish_config: - category: Systems - command: SetDefaultBootOrder - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set Manager Network Protocols - community.general.redfish_config: - category: Manager - command: SetNetworkProtocols - network_protocols: - SNMP: - ProtocolEnabled: True - Port: 161 - HTTP: - ProtocolEnabled: False - Port: 8080 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Set Manager NIC - community.general.redfish_config: - category: Manager - command: SetManagerNic - nic_config: - DHCPv4: - DHCPEnabled: False - IPv4StaticAddresses: - Address: 192.168.1.3 - Gateway: 192.168.1.1 - SubnetMask: 255.255.255.0 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Disable Host Interface - community.general.redfish_config: - category: Manager - command: SetHostInterface - hostinterface_config: - InterfaceEnabled: false - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Enable Host Interface for HostInterface resource ID '2' - community.general.redfish_config: - category: Manager - command: SetHostInterface - hostinterface_config: - InterfaceEnabled: true - hostinterface_id: "2" - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" -''' - -RETURN = ''' -msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils.common.text.converters import to_native - - -# More will be added as module features are expanded -CATEGORY_COMMANDS_ALL = { - "Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder", - "SetDefaultBootOrder"], - "Manager": ["SetNetworkProtocols", "SetManagerNic", "SetHostInterface"] -} - - -def main(): - result = {} - module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - bios_attributes=dict(type='dict', default={}), - timeout=dict(type='int', default=10), - boot_order=dict(type='list', elements='str', default=[]), - network_protocols=dict( - type='dict', - default={} - ), - resource_id=dict(), - nic_addr=dict(default='null'), - nic_config=dict( - type='dict', - default={} - ), - strip_etag_quotes=dict(type='bool', default=False), - hostinterface_config=dict(type='dict', default={}), - hostinterface_id=dict(), - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=False - ) - - category = module.params['category'] - command_list = module.params['command'] - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # timeout - timeout = module.params['timeout'] - - # BIOS attributes to update - bios_attributes = module.params['bios_attributes'] - - # boot order - boot_order = module.params['boot_order'] - - # System, Manager or Chassis ID to modify - resource_id = module.params['resource_id'] - - # manager nic - nic_addr = module.params['nic_addr'] - nic_config = module.params['nic_config'] - - # Etag options - strip_etag_quotes = module.params['strip_etag_quotes'] - - # HostInterface config options - hostinterface_config = module.params['hostinterface_config'] - - # HostInterface instance ID - hostinterface_id = module.params['hostinterface_id'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) - - # Check that Category is valid - if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) - - # Check that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) - - # Organize by Categories / Commands - if category == "Systems": - # execute only if we find a System resource - result = rf_utils._find_systems_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command == "SetBiosDefaultSettings": - result = rf_utils.set_bios_default_settings() - elif command == "SetBiosAttributes": - result = rf_utils.set_bios_attributes(bios_attributes) - elif command == "SetBootOrder": - result = rf_utils.set_boot_order(boot_order) - elif command == "SetDefaultBootOrder": - result = rf_utils.set_default_boot_order() - - elif category == "Manager": - # execute only if we find a Manager service resource - result = rf_utils._find_managers_resource() - if result['ret'] is False: - module.fail_json(msg=to_native(result['msg'])) - - for command in command_list: - if command == "SetNetworkProtocols": - result = rf_utils.set_network_protocols(module.params['network_protocols']) - elif command == "SetManagerNic": - result = rf_utils.set_manager_nic(nic_addr, nic_config) - elif command == "SetHostInterface": - result = rf_utils.set_hostinterface_attributes(hostinterface_config, hostinterface_id) - - # Return data back or fail with proper message - if result['ret'] is True: - if result.get('warning'): - module.warn(to_native(result['warning'])) - - module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) - else: - module.fail_json(msg=to_native(result['msg'])) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/redfish/redfish_info.py b/plugins/modules/remote_management/redfish/redfish_info.py deleted file mode 100644 index 886b3f7da1..0000000000 --- a/plugins/modules/remote_management/redfish/redfish_info.py +++ /dev/null @@ -1,494 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2017-2018 Dell EMC Inc. -# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: redfish_info -short_description: Manages Out-Of-Band controllers using Redfish APIs -description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - get information back. - - Information retrieved is placed in a location specified by the user. - - This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.redfish_info) module no longer returns C(ansible_facts)! -options: - category: - required: false - description: - - List of categories to execute on OOB controller. - default: ['Systems'] - type: list - elements: str - command: - required: false - description: - - List of commands to execute on OOB controller. - type: list - elements: str - baseuri: - required: true - description: - - Base URI of OOB controller. - type: str - username: - description: - - Username for authenticating to OOB controller. - type: str - password: - description: - - Password for authenticating to OOB controller. - type: str - auth_token: - description: - - Security token for authenticating to OOB controller. - type: str - version_added: 2.3.0 - timeout: - description: - - Timeout in seconds for HTTP requests to OOB controller. - default: 10 - type: int - -author: "Jose Delarosa (@jose-delarosa)" -''' - -EXAMPLES = ''' - - name: Get CPU inventory - community.general.redfish_info: - category: Systems - command: GetCpuInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}" - - - name: Get CPU model - community.general.redfish_info: - category: Systems - command: GetCpuInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.cpu.entries.0.Model }}" - - - name: Get memory inventory - community.general.redfish_info: - category: Systems - command: GetMemoryInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Get fan inventory with a timeout of 20 seconds - community.general.redfish_info: - category: Chassis - command: GetFanInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 - register: result - - - name: Get Virtual Media information - community.general.redfish_info: - category: Manager - command: GetVirtualMedia - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}" - - - name: Get Volume Inventory - community.general.redfish_info: - category: Systems - command: GetVolumeInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}" - - - name: Get Session information - community.general.redfish_info: - category: Sessions - command: GetSessions - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts.session.entries | to_nice_json }}" - - - name: Get default inventory information - community.general.redfish_info: - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result - - name: Print fetched information - ansible.builtin.debug: - msg: "{{ result.redfish_facts | to_nice_json }}" - - - name: Get several inventories - community.general.redfish_info: - category: Systems - command: GetNicInventory,GetBiosAttributes - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get default system inventory and user information - community.general.redfish_info: - category: Systems,Accounts - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get default system, user and firmware information - community.general.redfish_info: - category: ["Systems", "Accounts", "Update"] - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get Manager NIC inventory information - community.general.redfish_info: - category: Manager - command: GetManagerNicInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get boot override information - community.general.redfish_info: - category: Systems - command: GetBootOverride - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get chassis inventory - community.general.redfish_info: - category: Chassis - command: GetChassisInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get all information available in the Manager category - community.general.redfish_info: - category: Manager - command: all - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get firmware update capability information - community.general.redfish_info: - category: Update - command: GetFirmwareUpdateCapabilities - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get firmware inventory - community.general.redfish_info: - category: Update - command: GetFirmwareInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get software inventory - community.general.redfish_info: - category: Update - command: GetSoftwareInventory - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get Manager Services - community.general.redfish_info: - category: Manager - command: GetNetworkProtocols - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get all information available in all categories - community.general.redfish_info: - category: all - command: all - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get system health report - community.general.redfish_info: - category: Systems - command: GetHealthReport - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get chassis health report - community.general.redfish_info: - category: Chassis - command: GetHealthReport - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get manager health report - community.general.redfish_info: - category: Manager - command: GetHealthReport - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - - name: Get manager Redfish Host Interface inventory - community.general.redfish_info: - category: Manager - command: GetHostInterfaces - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" -''' - -RETURN = ''' -result: - description: different results depending on task - returned: always - type: dict - sample: List of CPUs on system -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils - -CATEGORY_COMMANDS_ALL = { - "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory", - "GetMemoryInventory", "GetNicInventory", "GetHealthReport", - "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory", - "GetBiosAttributes", "GetBootOrder", "GetBootOverride"], - "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower", - "GetChassisThermals", "GetChassisInventory", "GetHealthReport"], - "Accounts": ["ListUsers"], - "Sessions": ["GetSessions"], - "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory"], - "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols", - "GetHealthReport", "GetHostInterfaces"], -} - -CATEGORY_COMMANDS_DEFAULT = { - "Systems": "GetSystemInventory", - "Chassis": "GetFanInventory", - "Accounts": "ListUsers", - "Update": "GetFirmwareInventory", - "Sessions": "GetSessions", - "Manager": "GetManagerNicInventory" -} - - -def main(): - result = {} - category_list = [] - module = AnsibleModule( - argument_spec=dict( - category=dict(type='list', elements='str', default=['Systems']), - command=dict(type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) - ), - required_together=[ - ('username', 'password'), - ], - required_one_of=[ - ('username', 'auth_token'), - ], - mutually_exclusive=[ - ('username', 'auth_token'), - ], - supports_check_mode=True, - ) - - # admin credentials used for authentication - creds = {'user': module.params['username'], - 'pswd': module.params['password'], - 'token': module.params['auth_token']} - - # timeout - timeout = module.params['timeout'] - - # Build root URI - root_uri = "https://" + module.params['baseuri'] - rf_utils = RedfishUtils(creds, root_uri, timeout, module) - - # Build Category list - if "all" in module.params['category']: - for entry in CATEGORY_COMMANDS_ALL: - category_list.append(entry) - else: - # one or more categories specified - category_list = module.params['category'] - - for category in category_list: - command_list = [] - # Build Command list for each Category - if category in CATEGORY_COMMANDS_ALL: - if not module.params['command']: - # True if we don't specify a command --> use default - command_list.append(CATEGORY_COMMANDS_DEFAULT[category]) - elif "all" in module.params['command']: - for entry in range(len(CATEGORY_COMMANDS_ALL[category])): - command_list.append(CATEGORY_COMMANDS_ALL[category][entry]) - # one or more commands - else: - command_list = module.params['command'] - # Verify that all commands are valid - for cmd in command_list: - # Fail if even one command given is invalid - if cmd not in CATEGORY_COMMANDS_ALL[category]: - module.fail_json(msg="Invalid Command: %s" % cmd) - else: - # Fail if even one category given is invalid - module.fail_json(msg="Invalid Category: %s" % category) - - # Organize by Categories / Commands - if category == "Systems": - # execute only if we find a Systems resource - resource = rf_utils._find_systems_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "GetSystemInventory": - result["system"] = rf_utils.get_multi_system_inventory() - elif command == "GetCpuInventory": - result["cpu"] = rf_utils.get_multi_cpu_inventory() - elif command == "GetMemoryInventory": - result["memory"] = rf_utils.get_multi_memory_inventory() - elif command == "GetNicInventory": - result["nic"] = rf_utils.get_multi_nic_inventory(category) - elif command == "GetStorageControllerInventory": - result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory() - elif command == "GetDiskInventory": - result["disk"] = rf_utils.get_multi_disk_inventory() - elif command == "GetVolumeInventory": - result["volume"] = rf_utils.get_multi_volume_inventory() - elif command == "GetBiosAttributes": - result["bios_attribute"] = rf_utils.get_multi_bios_attributes() - elif command == "GetBootOrder": - result["boot_order"] = rf_utils.get_multi_boot_order() - elif command == "GetBootOverride": - result["boot_override"] = rf_utils.get_multi_boot_override() - elif command == "GetHealthReport": - result["health_report"] = rf_utils.get_multi_system_health_report() - - elif category == "Chassis": - # execute only if we find Chassis resource - resource = rf_utils._find_chassis_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "GetFanInventory": - result["fan"] = rf_utils.get_fan_inventory() - elif command == "GetPsuInventory": - result["psu"] = rf_utils.get_psu_inventory() - elif command == "GetChassisThermals": - result["thermals"] = rf_utils.get_chassis_thermals() - elif command == "GetChassisPower": - result["chassis_power"] = rf_utils.get_chassis_power() - elif command == "GetChassisInventory": - result["chassis"] = rf_utils.get_chassis_inventory() - elif command == "GetHealthReport": - result["health_report"] = rf_utils.get_multi_chassis_health_report() - - elif category == "Accounts": - # execute only if we find an Account service resource - resource = rf_utils._find_accountservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "ListUsers": - result["user"] = rf_utils.list_users() - - elif category == "Update": - # execute only if we find UpdateService resources - resource = rf_utils._find_updateservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "GetFirmwareInventory": - result["firmware"] = rf_utils.get_firmware_inventory() - elif command == "GetSoftwareInventory": - result["software"] = rf_utils.get_software_inventory() - elif command == "GetFirmwareUpdateCapabilities": - result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities() - - elif category == "Sessions": - # execute only if we find SessionService resources - resource = rf_utils._find_sessionservice_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "GetSessions": - result["session"] = rf_utils.get_sessions() - - elif category == "Manager": - # execute only if we find a Manager service resource - resource = rf_utils._find_managers_resource() - if resource['ret'] is False: - module.fail_json(msg=resource['msg']) - - for command in command_list: - if command == "GetManagerNicInventory": - result["manager_nics"] = rf_utils.get_multi_nic_inventory(category) - elif command == "GetVirtualMedia": - result["virtual_media"] = rf_utils.get_multi_virtualmedia() - elif command == "GetLogs": - result["log"] = rf_utils.get_logs() - elif command == "GetNetworkProtocols": - result["network_protocols"] = rf_utils.get_network_protocols() - elif command == "GetHealthReport": - result["health_report"] = rf_utils.get_multi_manager_health_report() - elif command == "GetHostInterfaces": - result["host_interfaces"] = rf_utils.get_hostinterfaces() - - # Return data back - module.exit_json(redfish_facts=result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/rhevm.py b/plugins/modules/rhevm.py similarity index 86% rename from plugins/modules/cloud/misc/rhevm.py rename to plugins/modules/rhevm.py index 77b40248b3..422d2739d2 100644 --- a/plugins/modules/cloud/misc/rhevm.py +++ b/plugins/modules/rhevm.py @@ -1,215 +1,222 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Timothy Vandenbrande -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Timothy Vandenbrande +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: rhevm short_description: RHEV/oVirt automation description: - - This module only supports oVirt/RHEV version 3. - - A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4. - - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform. + - This module only supports oVirt/RHEV version 3. + - A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4. + - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform. requirements: - - ovirtsdk + - ovirtsdk author: -- Timothy Vandenbrande (@TimothyVandenbrande) + - Timothy Vandenbrande (@TimothyVandenbrande) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - user: - description: - - The user to authenticate with. - type: str - default: admin@internal - password: - description: - - The password for user authentication. - type: str - required: true - server: - description: - - The name/IP of your RHEV-m/oVirt instance. - type: str - default: 127.0.0.1 - port: - description: - - The port on which the API is reachable. - type: int - default: 443 - insecure_api: - description: - - A boolean switch to make a secure or insecure connection to the server. - type: bool - default: no - name: - description: - - The name of the VM. - type: str - cluster: - description: - - The RHEV/oVirt cluster in which you want you VM to start. - type: str - datacenter: - description: - - The RHEV/oVirt datacenter in which you want you VM to start. - type: str - default: Default - state: - description: - - This serves to create/remove/update or powermanage your VM. - type: str - choices: [ absent, cd, down, info, ping, present, restarted, up ] - default: present - image: - description: - - The template to use for the VM. - type: str - type: - description: - - To define if the VM is a server or desktop. - type: str - choices: [ desktop, host, server ] - default: server - vmhost: - description: - - The host you wish your VM to run on. - type: str - vmcpu: - description: - - The number of CPUs you want in your VM. - type: int - default: 2 - cpu_share: - description: - - This parameter is used to configure the CPU share. - type: int - default: 0 - vmmem: - description: - - The amount of memory you want your VM to use (in GB). - type: int - default: 1 - osver: - description: - - The operating system option in RHEV/oVirt. - type: str - default: rhel_6x64 - mempol: - description: - - The minimum amount of memory you wish to reserve for this system. - type: int - default: 1 - vm_ha: - description: - - To make your VM High Available. - type: bool - default: yes - disks: - description: - - This option uses complex arguments and is a list of disks with the options name, size and domain. - type: list - elements: str - ifaces: - description: - - This option uses complex arguments and is a list of interfaces with the options name and vlan. - type: list - elements: str - aliases: [ interfaces, nics ] - boot_order: - description: - - This option uses complex arguments and is a list of items that specify the bootorder. - type: list - elements: str - default: [ hd, network ] - del_prot: - description: - - This option sets the delete protection checkbox. - type: bool - default: yes - cd_drive: - description: - - The CD you wish to have mounted on the VM when I(state = 'CD'). - type: str - timeout: - description: - - The timeout you wish to define for power actions. - - When I(state = 'up'). - - When I(state = 'down'). - - When I(state = 'restarted'). - type: int -''' + user: + description: + - The user to authenticate with. + type: str + default: admin@internal + password: + description: + - The password for user authentication. + type: str + required: true + server: + description: + - The name/IP of your RHEV-m/oVirt instance. + type: str + default: 127.0.0.1 + port: + description: + - The port on which the API is reachable. + type: int + default: 443 + insecure_api: + description: + - A boolean switch to make a secure or insecure connection to the server. + type: bool + default: false + name: + description: + - The name of the VM. + type: str + cluster: + description: + - The RHEV/oVirt cluster in which you want you VM to start. + type: str + default: '' + datacenter: + description: + - The RHEV/oVirt datacenter in which you want you VM to start. + type: str + default: Default + state: + description: + - This serves to create/remove/update or powermanage your VM. + type: str + choices: [absent, cd, down, info, ping, present, restarted, up] + default: present + image: + description: + - The template to use for the VM. + type: str + type: + description: + - To define if the VM is a server or desktop. + type: str + choices: [desktop, host, server] + default: server + vmhost: + description: + - The host you wish your VM to run on. + type: str + vmcpu: + description: + - The number of CPUs you want in your VM. + type: int + default: 2 + cpu_share: + description: + - This parameter is used to configure the CPU share. + type: int + default: 0 + vmmem: + description: + - The amount of memory you want your VM to use (in GB). + type: int + default: 1 + osver: + description: + - The operating system option in RHEV/oVirt. + type: str + default: rhel_6x64 + mempol: + description: + - The minimum amount of memory you wish to reserve for this system. + type: int + default: 1 + vm_ha: + description: + - To make your VM High Available. + type: bool + default: true + disks: + description: + - This option uses complex arguments and is a list of disks with the options V(name), V(size), and V(domain). + type: list + elements: str + ifaces: + description: + - This option uses complex arguments and is a list of interfaces with the options V(name) and V(vlan). + type: list + elements: str + aliases: [interfaces, nics] + boot_order: + description: + - This option uses complex arguments and is a list of items that specify the bootorder. + type: list + elements: str + default: [hd, network] + del_prot: + description: + - This option sets the delete protection checkbox. + type: bool + default: true + cd_drive: + description: + - The CD you wish to have mounted on the VM when O(state=cd). + type: str + timeout: + description: + - The timeout you wish to define for power actions. + - When O(state=up). + - When O(state=down). + - When O(state=restarted). + type: int +""" -RETURN = r''' +RETURN = r""" vm: - description: Returns all of the VMs variables and execution. - returned: always - type: dict - sample: '{ - "boot_order": [ - "hd", - "network" - ], - "changed": true, - "changes": [ - "Delete Protection" - ], - "cluster": "C1", - "cpu_share": "0", - "created": false, - "datacenter": "Default", - "del_prot": true, - "disks": [ - { - "domain": "ssd-san", - "name": "OS", - "size": 40 - } - ], - "eth0": "00:00:5E:00:53:00", - "eth1": "00:00:5E:00:53:01", - "eth2": "00:00:5E:00:53:02", - "exists": true, - "failed": false, - "ifaces": [ - { - "name": "eth0", - "vlan": "Management" - }, - { - "name": "eth1", - "vlan": "Internal" - }, - { - "name": "eth2", - "vlan": "External" - } - ], - "image": false, - "mempol": "0", - "msg": [ - "VM exists", - "cpu_share was already set to 0", - "VM high availability was already set to True", - "The boot order has already been set", - "VM delete protection has been set to True", - "Disk web2_Disk0_OS already exists", - "The VM starting host was already set to host416" - ], - "name": "web2", - "type": "server", - "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b", - "vm_ha": true, - "vmcpu": "4", - "vmhost": "host416", - "vmmem": "16" - }' -''' + description: Returns all of the VMs variables and execution. + returned: always + type: dict + sample: + { + "boot_order": [ + "hd", + "network" + ], + "changed": true, + "changes": [ + "Delete Protection" + ], + "cluster": "C1", + "cpu_share": "0", + "created": false, + "datacenter": "Default", + "del_prot": true, + "disks": [ + { + "domain": "ssd-san", + "name": "OS", + "size": 40 + } + ], + "eth0": "00:00:5E:00:53:00", + "eth1": "00:00:5E:00:53:01", + "eth2": "00:00:5E:00:53:02", + "exists": true, + "failed": false, + "ifaces": [ + { + "name": "eth0", + "vlan": "Management" + }, + { + "name": "eth1", + "vlan": "Internal" + }, + { + "name": "eth2", + "vlan": "External" + } + ], + "image": false, + "mempol": "0", + "msg": [ + "VM exists", + "cpu_share was already set to 0", + "VM high availability was already set to True", + "The boot order has already been set", + "VM delete protection has been set to True", + "Disk web2_Disk0_OS already exists", + "The VM starting host was already set to host416" + ], + "name": "web2", + "type": "server", + "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b", + "vm_ha": true, + "vmcpu": "4", + "vmhost": "host416", + "vmmem": "16" + } +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Basic get info from VM community.general.rhevm: server: rhevm01 @@ -249,33 +256,33 @@ EXAMPLES = r''' vmcpu: 4 vmmem: 2 ifaces: - - name: eth0 - vlan: vlan2202 - - name: eth1 - vlan: vlan36 - - name: eth2 - vlan: vlan38 - - name: eth3 - vlan: vlan2202 + - name: eth0 + vlan: vlan2202 + - name: eth1 + vlan: vlan36 + - name: eth2 + vlan: vlan38 + - name: eth3 + vlan: vlan2202 disks: - - name: root - size: 10 - domain: ssd-san - - name: swap - size: 10 - domain: 15kiscsi-san - - name: opt - size: 10 - domain: 15kiscsi-san - - name: var - size: 10 - domain: 10kiscsi-san - - name: home - size: 10 - domain: sata-san + - name: root + size: 10 + domain: ssd-san + - name: swap + size: 10 + domain: 15kiscsi-san + - name: opt + size: 10 + domain: 15kiscsi-san + - name: var + size: 10 + domain: 10kiscsi-san + - name: home + size: 10 + domain: sata-san boot_order: - - network - - hd + - network + - hd state: present - name: Add a CD to the disk cd_drive @@ -293,33 +300,33 @@ EXAMPLES = r''' type: host cluster: rhevm01 ifaces: - - name: em1 - - name: em2 - - name: p3p1 - ip: 172.31.224.200 - netmask: 255.255.254.0 - - name: p3p2 - ip: 172.31.225.200 - netmask: 255.255.254.0 - - name: bond0 - bond: - - em1 - - em2 - network: rhevm - ip: 172.31.222.200 - netmask: 255.255.255.0 - management: yes - - name: bond0.36 - network: vlan36 - ip: 10.2.36.200 - netmask: 255.255.254.0 - gateway: 10.2.36.254 - - name: bond0.2202 - network: vlan2202 - - name: bond0.38 - network: vlan38 + - name: em1 + - name: em2 + - name: p3p1 + ip: 172.31.224.200 + netmask: 255.255.254.0 + - name: p3p2 + ip: 172.31.225.200 + netmask: 255.255.254.0 + - name: bond0 + bond: + - em1 + - em2 + network: rhevm + ip: 172.31.222.200 + netmask: 255.255.255.0 + management: true + - name: bond0.36 + network: vlan36 + ip: 10.2.36.200 + netmask: 255.255.254.0 + gateway: 10.2.36.254 + - name: bond0.2202 + network: vlan2202 + - name: bond0.38 + network: vlan38 state: present -''' +""" import time @@ -803,7 +810,7 @@ class RHEVConn(object): setChanged() HOST = self.get_Host(host_name) state = HOST.status.state - while (state != 'non_operational' and state != 'up'): + while state != 'non_operational' and state != 'up': HOST = self.get_Host(host_name) state = HOST.status.state time.sleep(1) @@ -1252,7 +1259,6 @@ def setChanged(): def setMsg(message): - global failed msg.append(message) diff --git a/plugins/modules/packaging/os/rhsm_release.py b/plugins/modules/rhsm_release.py similarity index 69% rename from plugins/modules/packaging/os/rhsm_release.py rename to plugins/modules/rhsm_release.py index 4b76cee274..7034713c04 100644 --- a/plugins/modules/packaging/os/rhsm_release.py +++ b/plugins/modules/rhsm_release.py @@ -1,60 +1,67 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2018, Sean Myers -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Sean Myers +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: rhsm_release short_description: Set or Unset RHSM Release version description: - Sets or unsets the release version used by RHSM repositories. notes: - - This module will fail on an unregistered system. - Use the C(redhat_subscription) module to register a system + - This module fails on an unregistered system. Use the M(community.general.redhat_subscription) module to register a system prior to setting the RHSM release. + - It is possible to interact with C(subscription-manager) only as root, so root permissions are required to successfully + run this module. requirements: - Red Hat Enterprise Linux 6+ with subscription-manager installed +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: release: description: - - RHSM release version to use (use null to unset) - required: true + - RHSM release version to use. + - To unset either pass V(null) for this option, or omit this option. type: str author: - Sean Myers (@seandst) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Set release version to 7.1 - name: Set RHSM release version community.general.rhsm_release: - release: "7.1" + release: "7.1" # Set release version to 6Server - name: Set RHSM release version community.general.rhsm_release: - release: "6Server" + release: "6Server" # Unset release version - name: Unset RHSM release release community.general.rhsm_release: - release: null -''' + release: +""" -RETURN = ''' +RETURN = r""" current_release: - description: The current RHSM release version value + description: The current RHSM release version value. returned: success type: str -''' +""" from ansible.module_utils.basic import AnsibleModule +import os import re # Matches release-like values such as 7.2, 5.10, 6Server, 8 @@ -66,9 +73,9 @@ def _sm_release(module, *args): # pass args to s-m release, e.g. _sm_release(module, '--set', '0.1') becomes # "subscription-manager release --set 0.1" sm_bin = module.get_bin_path('subscription-manager', required=True) - cmd = '{0} release {1}'.format(sm_bin, " ".join(args)) + cmd = [sm_bin, 'release'] + list(args) # delegate nonzero rc handling to run_command - return module.run_command(cmd, check_rc=True) + return module.run_command(cmd, check_rc=True, expand_user_and_vars=False) def get_release(module): @@ -96,11 +103,16 @@ def set_release(module, release): def main(): module = AnsibleModule( argument_spec=dict( - release=dict(type='str', required=True), + release=dict(type='str'), ), supports_check_mode=True ) + if os.getuid() != 0: + module.fail_json( + msg="Interacting with subscription-manager requires root permissions ('become: true')" + ) + target_release = module.params['release'] # sanity check: the target release at least looks like a valid release diff --git a/plugins/modules/packaging/os/rhsm_repository.py b/plugins/modules/rhsm_repository.py similarity index 53% rename from plugins/modules/packaging/os/rhsm_repository.py rename to plugins/modules/rhsm_repository.py index b103ea621a..b5b4eab4dc 100644 --- a/plugins/modules/packaging/os/rhsm_repository.py +++ b/plugins/modules/rhsm_repository.py @@ -1,52 +1,57 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Giovanni Sciortino (@giovannisciortino) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Giovanni Sciortino (@giovannisciortino) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: rhsm_repository short_description: Manage RHSM repositories using the subscription-manager command description: - - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription - Management entitlement platform using the C(subscription-manager) command. + - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) + command. author: Giovanni Sciortino (@giovannisciortino) notes: - - In order to manage RHSM repositories the system must be already registered - to RHSM manually or using the Ansible C(redhat_subscription) module. - + - In order to manage RHSM repositories the system must be already registered to RHSM manually or using the Ansible M(community.general.redhat_subscription) + module. + - It is possible to interact with C(subscription-manager) only as root, so root permissions are required to successfully + run this module. requirements: - subscription-manager +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full options: state: description: - - If state is equal to present or disabled, indicates the desired - repository state. - choices: [present, enabled, absent, disabled] + - If state is equal to present or disabled, indicates the desired repository state. + - In community.general 10.0.0 the states V(present) and V(absent) have been removed. Please use V(enabled) and V(disabled) + instead. + choices: [enabled, disabled] default: "enabled" type: str name: description: - The ID of repositories to enable. - - To operate on several repositories this can accept a comma separated - list or a YAML list. - required: True + - To operate on several repositories this can accept a comma separated list or a YAML list. + required: true type: list elements: str purge: description: - - Disable all currently enabled repositories that are not not specified in C(name). - Only set this to C(True) if passing in a list of repositories to the C(name) field. - Using this with C(loop) will most likely not have the desired result. + - Disable all currently enabled repositories that are not not specified in O(name). Only set this to V(true) if passing + in a list of repositories to the O(name) field. Using this with C(loop) is likely not to have the desired result. type: bool - default: no -''' + default: false +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Enable a RHSM repository community.general.rhsm_repository: name: rhel-7-server-rpms @@ -64,107 +69,100 @@ EXAMPLES = ''' - name: Disable all repositories except rhel-7-server-rpms community.general.rhsm_repository: name: rhel-7-server-rpms - purge: True -''' + purge: true +""" -RETURN = ''' +RETURN = r""" repositories: description: - The list of RHSM repositories with their states. - When this module is used to change the repository states, this list contains the updated states after the changes. returned: success type: list -''' +""" -import re import os from fnmatch import fnmatch from copy import deepcopy from ansible.module_utils.basic import AnsibleModule -def run_subscription_manager(module, arguments): - # Execute subscription-manager with arguments and manage common errors - rhsm_bin = module.get_bin_path('subscription-manager') - if not rhsm_bin: - module.fail_json(msg='The executable file subscription-manager was not found in PATH') +class Rhsm(object): + def __init__(self, module): + self.module = module + self.rhsm_bin = self.module.get_bin_path('subscription-manager', required=True) + self.rhsm_kwargs = { + 'environ_update': dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'), + 'expand_user_and_vars': False, + 'use_unsafe_shell': False, + } - lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)), environ_update=lang_env) + def run_repos(self, arguments): + """ + Execute `subscription-manager repos` with arguments and manage common errors + """ + rc, out, err = self.module.run_command( + [self.rhsm_bin, 'repos'] + arguments, + **self.rhsm_kwargs + ) - if rc == 1 and (err == 'The password you typed is invalid.\nPlease try again.\n' or os.getuid() != 0): - module.fail_json(msg='The executable file subscription-manager must be run using root privileges') - elif rc == 0 and out == 'This system has no repositories available through subscriptions.\n': - module.fail_json(msg='This system has no repositories available through subscriptions') - elif rc == 1: - module.fail_json(msg='subscription-manager failed with the following error: %s' % err) - else: - return rc, out, err + if rc == 0 and out == 'This system has no repositories available through subscriptions.\n': + self.module.fail_json(msg='This system has no repositories available through subscriptions') + elif rc == 1: + self.module.fail_json(msg='subscription-manager failed with the following error: %s' % err) + else: + return rc, out, err + + def list_repositories(self): + """ + Generate RHSM repository list and return a list of dict + """ + rc, out, err = self.run_repos(['--list']) + + repo_id = '' + repo_name = '' + repo_url = '' + repo_enabled = '' + + repo_result = [] + for line in out.splitlines(): + # ignore lines that are: + # - empty + # - "+---------[...]" -- i.e. header + # - " Available Repositories [...]" -- i.e. header + if line == '' or line[0] == '+' or line[0] == ' ': + continue + + if line.startswith('Repo ID: '): + repo_id = line[9:].lstrip() + continue + + if line.startswith('Repo Name: '): + repo_name = line[11:].lstrip() + continue + + if line.startswith('Repo URL: '): + repo_url = line[10:].lstrip() + continue + + if line.startswith('Enabled: '): + repo_enabled = line[9:].lstrip() + + repo = { + "id": repo_id, + "name": repo_name, + "url": repo_url, + "enabled": True if repo_enabled == '1' else False + } + + repo_result.append(repo) + + return repo_result -def get_repository_list(module, list_parameter): - # Generate RHSM repository list and return a list of dict - if list_parameter == 'list_enabled': - rhsm_arguments = ['repos', '--list-enabled'] - elif list_parameter == 'list_disabled': - rhsm_arguments = ['repos', '--list-disabled'] - elif list_parameter == 'list': - rhsm_arguments = ['repos', '--list'] - rc, out, err = run_subscription_manager(module, rhsm_arguments) - - skip_lines = [ - '+----------------------------------------------------------+', - ' Available Repositories in /etc/yum.repos.d/redhat.repo' - ] - repo_id_re = re.compile(r'Repo ID:\s+(.*)') - repo_name_re = re.compile(r'Repo Name:\s+(.*)') - repo_url_re = re.compile(r'Repo URL:\s+(.*)') - repo_enabled_re = re.compile(r'Enabled:\s+(.*)') - - repo_id = '' - repo_name = '' - repo_url = '' - repo_enabled = '' - - repo_result = [] - for line in out.splitlines(): - if line == '' or line in skip_lines: - continue - - repo_id_match = repo_id_re.match(line) - if repo_id_match: - repo_id = repo_id_match.group(1) - continue - - repo_name_match = repo_name_re.match(line) - if repo_name_match: - repo_name = repo_name_match.group(1) - continue - - repo_url_match = repo_url_re.match(line) - if repo_url_match: - repo_url = repo_url_match.group(1) - continue - - repo_enabled_match = repo_enabled_re.match(line) - if repo_enabled_match: - repo_enabled = repo_enabled_match.group(1) - - repo = { - "id": repo_id, - "name": repo_name, - "url": repo_url, - "enabled": True if repo_enabled == '1' else False - } - - repo_result.append(repo) - - return repo_result - - -def repository_modify(module, state, name, purge=False): +def repository_modify(module, rhsm, state, name, purge=False): name = set(name) - current_repo_list = get_repository_list(module, 'list') + current_repo_list = rhsm.list_repositories() updated_repo_list = deepcopy(current_repo_list) matched_existing_repo = {} for repoid in name: @@ -179,7 +177,7 @@ def repository_modify(module, state, name, purge=False): results = [] diff_before = "" diff_after = "" - rhsm_arguments = ['repos'] + rhsm_arguments = [] for repoid in matched_existing_repo: if len(matched_existing_repo[repoid]) == 0: @@ -214,6 +212,9 @@ def repository_modify(module, state, name, purge=False): diff_after.join("Repository '{repoid}' is disabled for this system\n".format(repoid=repoid)) results.append("Repository '{repoid}' is disabled for this system".format(repoid=repoid)) rhsm_arguments.extend(['--disable', repoid]) + for updated_repo in updated_repo_list: + if updated_repo['id'] in difference: + updated_repo['enabled'] = False diff = {'before': diff_before, 'after': diff_after, @@ -221,7 +222,7 @@ def repository_modify(module, state, name, purge=False): 'after_header': "RHSM repositories"} if not module.check_mode and changed: - rc, out, err = run_subscription_manager(module, rhsm_arguments) + rc, out, err = rhsm.run_repos(rhsm_arguments) results = out.splitlines() module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff) @@ -230,16 +231,24 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='list', elements='str', required=True), - state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'), + state=dict(choices=['enabled', 'disabled'], default='enabled'), purge=dict(type='bool', default=False), ), supports_check_mode=True, ) + + if os.getuid() != 0: + module.fail_json( + msg="Interacting with subscription-manager requires root permissions ('become: true')" + ) + + rhsm = Rhsm(module) + name = module.params['name'] state = module.params['state'] purge = module.params['purge'] - repository_modify(module, state, name, purge) + repository_modify(module, rhsm, state, name, purge) if __name__ == '__main__': diff --git a/plugins/modules/database/misc/riak.py b/plugins/modules/riak.py similarity index 75% rename from plugins/modules/database/misc/riak.py rename to plugins/modules/riak.py index 4ee7b5b674..4f3ac14e13 100644 --- a/plugins/modules/database/misc/riak.py +++ b/plugins/modules/riak.py @@ -1,23 +1,27 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, James Martin , Drew Kerrigan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, James Martin , Drew Kerrigan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: riak short_description: This module handles some common Riak operations description: - - This module can be used to join nodes to a cluster, check - the status of the cluster. + - This module can be used to join nodes to a cluster, check the status of the cluster. author: - - "James Martin (@jsmartin)" - - "Drew Kerrigan (@drewkerrigan)" + - "James Martin (@jsmartin)" + - "Drew Kerrigan (@drewkerrigan)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: command: description: @@ -26,17 +30,17 @@ options: type: str config_dir: description: - - The path to the riak configuration directory + - The path to the riak configuration directory. default: /etc/riak type: path http_conn: description: - - The ip address and port that is listening for Riak HTTP queries + - The IP address and port that is listening for Riak HTTP queries. default: 127.0.0.1:8098 type: str target_node: description: - - The target node for certain operations (join, ping) + - The target node for certain operations (join, ping). default: riak@127.0.0.1 type: str wait_for_handoffs: @@ -56,13 +60,13 @@ options: type: str validate_certs: description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool - default: 'yes' -''' + default: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Join's a Riak node to another node" community.general.riak: command: join @@ -70,12 +74,12 @@ EXAMPLES = ''' - name: Wait for handoffs to finish. Use with async and poll. community.general.riak: - wait_for_handoffs: yes + wait_for_handoffs: true - name: Wait for riak_kv service to startup community.general.riak: wait_for_service: kv -''' +""" import json import time @@ -85,7 +89,7 @@ from ansible.module_utils.urls import fetch_url def ring_check(module, riak_admin_bin): - cmd = '%s ringready' % riak_admin_bin + cmd = riak_admin_bin + ['ringready'] rc, out, err = module.run_command(cmd) if rc == 0 and 'TRUE All nodes agree on the ring' in out: return True @@ -97,15 +101,13 @@ def main(): module = AnsibleModule( argument_spec=dict( - command=dict(required=False, default=None, choices=[ - 'ping', 'kv_test', 'join', 'plan', 'commit']), + command=dict(choices=['ping', 'kv_test', 'join', 'plan', 'commit']), config_dir=dict(default='/etc/riak', type='path'), - http_conn=dict(required=False, default='127.0.0.1:8098'), - target_node=dict(default='riak@127.0.0.1', required=False), + http_conn=dict(default='127.0.0.1:8098'), + target_node=dict(default='riak@127.0.0.1'), wait_for_handoffs=dict(default=0, type='int'), wait_for_ring=dict(default=0, type='int'), - wait_for_service=dict( - required=False, default=None, choices=['kv']), + wait_for_service=dict(choices=['kv']), validate_certs=dict(default=True, type='bool')) ) @@ -119,6 +121,7 @@ def main(): # make sure riak commands are on the path riak_bin = module.get_bin_path('riak') riak_admin_bin = module.get_bin_path('riak-admin') + riak_admin_bin = [riak_admin_bin] if riak_admin_bin is not None else [riak_bin, 'admin'] timeout = time.time() + 120 while True: @@ -148,7 +151,7 @@ def main(): version=version) if command == 'ping': - cmd = '%s ping %s' % (riak_bin, target_node) + cmd = [riak_bin, 'ping', target_node] rc, out, err = module.run_command(cmd) if rc == 0: result['ping'] = out @@ -156,7 +159,7 @@ def main(): module.fail_json(msg=out) elif command == 'kv_test': - cmd = '%s test' % riak_admin_bin + cmd = riak_admin_bin + ['test'] rc, out, err = module.run_command(cmd) if rc == 0: result['kv_test'] = out @@ -167,7 +170,7 @@ def main(): if nodes.count(node_name) == 1 and len(nodes) > 1: result['join'] = 'Node is already in cluster or staged to be in cluster.' else: - cmd = '%s cluster join %s' % (riak_admin_bin, target_node) + cmd = riak_admin_bin + ['cluster', 'join', target_node] rc, out, err = module.run_command(cmd) if rc == 0: result['join'] = out @@ -176,7 +179,7 @@ def main(): module.fail_json(msg=out) elif command == 'plan': - cmd = '%s cluster plan' % riak_admin_bin + cmd = riak_admin_bin + ['cluster', 'plan'] rc, out, err = module.run_command(cmd) if rc == 0: result['plan'] = out @@ -186,7 +189,7 @@ def main(): module.fail_json(msg=out) elif command == 'commit': - cmd = '%s cluster commit' % riak_admin_bin + cmd = riak_admin_bin + ['cluster', 'commit'] rc, out, err = module.run_command(cmd) if rc == 0: result['commit'] = out @@ -198,7 +201,7 @@ def main(): if wait_for_handoffs: timeout = time.time() + wait_for_handoffs while True: - cmd = '%s transfers' % riak_admin_bin + cmd = riak_admin_bin + ['transfers'] rc, out, err = module.run_command(cmd) if 'No transfers active' in out: result['handoffs'] = 'No transfers active.' @@ -208,7 +211,7 @@ def main(): module.fail_json(msg='Timeout waiting for handoffs.') if wait_for_service: - cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name] + cmd = riak_admin_bin + ['wait_for_service', 'riak_%s' % wait_for_service, node_name] rc, out, err = module.run_command(cmd) result['service'] = out diff --git a/plugins/modules/notification/rocketchat.py b/plugins/modules/rocketchat.py similarity index 61% rename from plugins/modules/notification/rocketchat.py rename to plugins/modules/rocketchat.py index 500560e417..8bbc1e153b 100644 --- a/plugins/modules/notification/rocketchat.py +++ b/plugins/modules/rocketchat.py @@ -1,40 +1,44 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2016, Deepak Kothandan -# (c) 2015, Stefan Berggren -# (c) 2014, Ramon de la Fuente +# Copyright (c) 2016, Deepak Kothandan +# Copyright (c) 2015, Stefan Berggren +# Copyright (c) 2014, Ramon de la Fuente # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: rocketchat short_description: Send notifications to Rocket Chat description: - - The C(rocketchat) module sends notifications to Rocket Chat via the Incoming WebHook integration + - This module sends notifications to Rocket Chat through the Incoming WebHook integration. author: "Ramon de la Fuente (@ramondelafuente)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: domain: type: str description: - - The domain for your environment without protocol. (i.e. - C(example.com) or C(chat.example.com)) + - The domain for your environment without protocol. (For example V(example.com) or V(chat.example.com)). required: true token: type: str description: - - Rocket Chat Incoming Webhook integration token. This provides - authentication to Rocket Chat's Incoming webhook for posting - messages. + - Rocket Chat Incoming Webhook integration token. This provides authentication to Rocket Chat's Incoming webhook for + posting messages. required: true protocol: type: str description: - - Specify the protocol used to send notification messages before the webhook url. (i.e. http or https) + - Specify the protocol used to send notification messages before the webhook URL (that is, V(http) or V(https)). default: https choices: - 'http' @@ -46,8 +50,8 @@ options: channel: type: str description: - - Channel to send the message to. If absent, the message goes to the channel selected for the I(token) - specified during the creation of webhook. + - Channel to send the message to. If absent, the message goes to the channel selected for the O(token) specified during + the creation of webhook. username: type: str description: @@ -57,30 +61,32 @@ options: type: str description: - URL for the message sender's icon. - default: "https://www.ansible.com/favicon.ico" + default: "https://docs.ansible.com/favicon.ico" icon_emoji: type: str description: - - Emoji for the message sender. The representation for the available emojis can be - got from Rocket Chat. (for example :thumbsup:) (if I(icon_emoji) is set, I(icon_url) will not be used) + - Emoji for the message sender. The representation for the available emojis can be got from Rocket Chat. + - For example V(:thumbsup:). + - If O(icon_emoji) is set, O(icon_url) is not used. link_names: type: int description: - - Automatically create links for channels and usernames in I(msg). + - Automatically create links for channels and usernames in O(msg). default: 1 choices: - 1 - 0 validate_certs: description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool - default: 'yes' + default: true color: type: str description: - - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message + - Allow text to use default colors - use the default of V(normal) to not send a custom color bar at the start of the + message. default: 'normal' choices: - 'normal' @@ -92,28 +98,37 @@ options: elements: dict description: - Define a list of attachments. -''' + is_pre740: + description: + - If V(true), the payload matches Rocket.Chat prior to 7.4.0 format. This format has been used by the module since its + inception, but is no longer supported by Rocket.Chat 7.4.0. + - The default value of the option, V(true), is B(deprecated) since community.general 11.2.0 and will change to V(false) in community.general 13.0.0. + - This parameter is going to be removed in a future release when Rocket.Chat 7.4.0 becomes the minimum supported version. + type: bool + version_added: 10.5.0 +""" -EXAMPLES = """ -- name: Send notification message via Rocket Chat +EXAMPLES = r""" +- name: Send notification message through Rocket Chat community.general.rocketchat: token: thetoken/generatedby/rocketchat domain: chat.example.com msg: '{{ inventory_hostname }} completed' delegate_to: localhost -- name: Send notification message via Rocket Chat all options +- name: Send notification message through Rocket Chat all options community.general.rocketchat: domain: chat.example.com token: thetoken/generatedby/rocketchat msg: '{{ inventory_hostname }} completed' - channel: #ansible + channel: "#ansible" username: 'Ansible on {{ inventory_hostname }}' icon_url: http://www.example.com/some-image-file.png link_names: 0 delegate_to: localhost -- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in rocketchat +- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured + in rocketchat community.general.rocketchat: token: thetoken/generatedby/rocketchat domain: chat.example.com @@ -129,25 +144,18 @@ EXAMPLES = """ domain: chat.example.com attachments: - text: Display my system load on host A and B - color: #ff00dd + color: "#ff00dd" title: System load fields: - title: System A value: 'load average: 0,74, 0,66, 0,63' - short: True + short: true - title: System B value: 'load average: 5,16, 4,64, 2,43' - short: True + short: true delegate_to: localhost """ -RETURN = """ -changed: - description: A flag indicating if any change was made or not. - returned: success - type: bool - sample: false -""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url @@ -156,14 +164,14 @@ from ansible.module_utils.urls import fetch_url ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s' -def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments): +def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments, is_pre740): payload = {} if color == "normal" and text is not None: payload = dict(text=text) elif text is not None: payload = dict(attachments=[dict(text=text, color=color)]) if channel is not None: - if (channel[0] == '#') or (channel[0] == '@'): + if channel[0] == '#' or channel[0] == '@': payload['channel'] = channel else: payload['channel'] = '#' + channel @@ -186,18 +194,23 @@ def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon attachment['fallback'] = attachment['text'] payload['attachments'].append(attachment) - payload = "payload=" + module.jsonify(payload) + payload = module.jsonify(payload) + if is_pre740: + payload = "payload=" + payload return payload -def do_notify_rocketchat(module, domain, token, protocol, payload): +def do_notify_rocketchat(module, domain, token, protocol, payload, is_pre740): if token.count('/') < 1: module.fail_json(msg="Invalid Token specified, provide a valid token") rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token) - response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload) + headers = None + if not is_pre740: + headers = {'Content-type': 'application/json'} + response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload, headers=headers) if info['status'] != 200: module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) @@ -208,15 +221,16 @@ def main(): domain=dict(type='str', required=True), token=dict(type='str', required=True, no_log=True), protocol=dict(type='str', default='https', choices=['http', 'https']), - msg=dict(type='str', required=False), + msg=dict(type='str'), channel=dict(type='str'), username=dict(type='str', default='Ansible'), - icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'), + icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'), icon_emoji=dict(type='str'), link_names=dict(type='int', default=1, choices=[0, 1]), validate_certs=dict(default=True, type='bool'), color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']), - attachments=dict(type='list', elements='dict', required=False) + attachments=dict(type='list', elements='dict'), + is_pre740=dict(type='bool') ) ) @@ -231,9 +245,19 @@ def main(): link_names = module.params['link_names'] color = module.params['color'] attachments = module.params['attachments'] + is_pre740 = module.params['is_pre740'] - payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments) - do_notify_rocketchat(module, domain, token, protocol, payload) + if is_pre740 is None: + module.deprecate( + "The default value 'true' for 'is_pre740' is deprecated and will change to 'false' in community.general 13.0.0." + " You can explicitly set 'is_pre740' in your task to avoid this deprecation warning", + version="13.0.0", + collection_name="community.general", + ) + is_pre740 = True + + payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments, is_pre740) + do_notify_rocketchat(module, domain, token, protocol, payload, is_pre740) module.exit_json(msg="OK") diff --git a/plugins/modules/monitoring/rollbar_deployment.py b/plugins/modules/rollbar_deployment.py similarity index 62% rename from plugins/modules/monitoring/rollbar_deployment.py rename to plugins/modules/rollbar_deployment.py index cea3bfdf51..383573d8c7 100644 --- a/plugins/modules/monitoring/rollbar_deployment.py +++ b/plugins/modules/rollbar_deployment.py @@ -1,21 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright 2014, Max Riveiro, -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: rollbar_deployment author: "Max Riveiro (@kavu)" short_description: Notify Rollbar about app deployments description: - - Notify Rollbar about app deployments - (see https://rollbar.com/docs/deploys_other/) + - Notify Rollbar about app deployments (see U(https://rollbar.com/docs/deploys_other/)). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: token: type: str @@ -25,7 +29,7 @@ options: environment: type: str description: - - Name of the environment being deployed, e.g. 'production'. + - Name of the environment being deployed, for example V(production). required: true revision: type: str @@ -45,7 +49,7 @@ options: comment: type: str description: - - Deploy comment (e.g. what is being deployed). + - Deploy comment (for example what is being deployed). required: false url: type: str @@ -55,35 +59,35 @@ options: default: 'https://api.rollbar.com/api/1/deploy/' validate_certs: description: - - If C(no), SSL certificates for the target url will not be validated. - This should only be used on personally controlled sites using - self-signed certificates. + - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled + sites using self-signed certificates. required: false - default: 'yes' + default: true type: bool -''' +""" -EXAMPLES = ''' - - name: Rollbar deployment notification - community.general.rollbar_deployment: - token: AAAAAA - environment: staging - user: ansible - revision: '4.2' - rollbar_user: admin - comment: Test Deploy +EXAMPLES = r""" +- name: Rollbar deployment notification + community.general.rollbar_deployment: + token: AAAAAA + environment: staging + user: ansible + revision: '4.2' + rollbar_user: admin + comment: Test Deploy + +- name: Notify rollbar about current git revision deployment by current user + community.general.rollbar_deployment: + token: "{{ rollbar_access_token }}" + environment: production + revision: "{{ lookup('pipe', 'git rev-parse HEAD') }}" + user: "{{ lookup('env', 'USER') }}" +""" - - name: Notify rollbar about current git revision deployment by current user - community.general.rollbar_deployment: - token: "{{ rollbar_access_token }}" - environment: production - revision: "{{ lookup('pipe', 'git rev-parse HEAD') }}" - user: "{{ lookup('env', 'USER') }}" -''' import traceback +from urllib.parse import urlencode from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url @@ -95,13 +99,10 @@ def main(): token=dict(required=True, no_log=True), environment=dict(required=True), revision=dict(required=True), - user=dict(required=False), - rollbar_user=dict(required=False), - comment=dict(required=False), - url=dict( - required=False, - default='https://api.rollbar.com/api/1/deploy/' - ), + user=dict(), + rollbar_user=dict(), + comment=dict(), + url=dict(default='https://api.rollbar.com/api/1/deploy/'), validate_certs=dict(default=True, type='bool'), ), supports_check_mode=True diff --git a/plugins/modules/rpm_ostree_pkg.py b/plugins/modules/rpm_ostree_pkg.py new file mode 100644 index 0000000000..a543986706 --- /dev/null +++ b/plugins/modules/rpm_ostree_pkg.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# Copyright (c) 2018, Dusty Mabe +# Copyright (c) 2018, Ansible Project +# Copyright (c) 2021, Abhijeet Kasurde +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: rpm_ostree_pkg +short_description: Install or uninstall overlay additional packages +version_added: "2.0.0" +description: + - Install or uninstall overlay additional packages using C(rpm-ostree) command. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + name: + description: + - Name of overlay package to install or remove. + required: true + type: list + elements: str + aliases: [pkg] + state: + description: + - State of the overlay package. + - V(present) simply ensures that a desired package is installed. + - V(absent) removes the specified package. + choices: ['absent', 'present'] + default: 'present' + type: str + apply_live: + description: + - Adds the options C(--apply-live) when O(state=present). + - Option is ignored when O(state=absent). + - For more information, please see U(https://coreos.github.io/rpm-ostree/apply-live/). + type: bool + default: false + version_added: 10.1.0 +author: + - Dusty Mabe (@dustymabe) + - Abhijeet Kasurde (@Akasurde) +""" + +EXAMPLES = r""" +- name: Install overlay package + community.general.rpm_ostree_pkg: + name: nfs-utils + state: present + +- name: Remove overlay package + community.general.rpm_ostree_pkg: + name: nfs-utils + state: absent + +- name: Apply the overlay package live + community.general.rpm_ostree_pkg: + name: nfs-utils + state: present + apply_live: true + +# In case a different transaction is currently running the module would fail. +# Adding a delay can help mitigate this problem: +- name: Install overlay package + community.general.rpm_ostree_pkg: + name: nfs-utils + state: present + register: rpm_ostree_pkg + until: rpm_ostree_pkg is not failed + retries: 10 + delay: 30 +""" + +RETURN = r""" +action: + description: Action performed. + returned: always + type: str + sample: 'install' +packages: + description: A list of packages specified. + returned: always + type: list + sample: ["nfs-utils"] +cmd: + description: Full command used for performed action. + returned: always + type: str + sample: 'rpm-ostree uninstall --allow-inactive --idempotent --unchanged-exit-77 nfs-utils' +needs_reboot: + description: Determine if machine needs a reboot to apply current changes. + returned: success + type: bool + sample: true + version_added: 10.1.0 +""" + +from ansible.module_utils.basic import AnsibleModule + + +class RpmOstreePkg: + def __init__(self, module): + self.module = module + self.params = module.params + self.state = module.params['state'] + + def ensure(self): + results = dict( + rc=0, + changed=False, + action='', + packages=[], + stdout='', + stderr='', + cmd='', + needs_reboot=False, + ) + + # Ensure rpm-ostree command exists + cmd = [self.module.get_bin_path('rpm-ostree', required=True)] + + # Decide action to perform + if self.state == 'present': + results['action'] = 'install' + cmd.append('install') + elif self.state == 'absent': + results['action'] = 'uninstall' + cmd.append('uninstall') + + # Add the options to the command line + if self.params['apply_live'] and self.state == 'present': + cmd.extend(['--apply-live', '--assumeyes']) + + # Additional parameters + cmd.extend(['--allow-inactive', '--idempotent', '--unchanged-exit-77']) + for pkg in self.params['name']: + cmd.append(pkg) + results['packages'].append(pkg) + + rc, out, err = self.module.run_command(cmd) + + # Determine if system needs a reboot to apply change + if 'Changes queued for next boot. Run "systemctl reboot" to start a reboot' in out: + results['needs_reboot'] = True + + results.update(dict( + rc=rc, + cmd=' '.join(cmd), + stdout=out, + stderr=err, + )) + + # A few possible options: + # - rc=0 - succeeded in making a change + # - rc=77 - no change was needed + # - rc=? - error + if rc == 0: + results['changed'] = True + elif rc == 77: + results['changed'] = False + results['rc'] = 0 + else: + self.module.fail_json(msg='non-zero return code', **results) + + self.module.exit_json(**results) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict( + default="present", + choices=['absent', 'present'] + ), + name=dict( + aliases=["pkg"], + required=True, + type='list', + elements='str', + ), + apply_live=dict( + type='bool', + default=False, + ), + ), + ) + + rpm_ostree_pkg = RpmOstreePkg(module) + rpm_ostree_pkg.ensure() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/rundeck_acl_policy.py b/plugins/modules/rundeck_acl_policy.py new file mode 100644 index 0000000000..e93363cea2 --- /dev/null +++ b/plugins/modules/rundeck_acl_policy.py @@ -0,0 +1,243 @@ +#!/usr/bin/python + +# Copyright (c) 2017, Loic Blot +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: rundeck_acl_policy + +short_description: Manage Rundeck ACL policies +description: + - Create, update and remove Rundeck ACL policies through HTTP API. +author: "Loic Blot (@nerzhul)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + type: str + description: + - Create or remove Rundeck project. + choices: ['present', 'absent'] + default: 'present' + name: + type: str + description: + - Sets the project name. + required: true + api_token: + description: + - Sets the token to authenticate against Rundeck API. + aliases: ["token"] + project: + type: str + description: + - Sets the project which receive the ACL policy. + - If unset, it is a system ACL policy. + policy: + type: str + description: + - Sets the ACL policy content. + - ACL policy content is a YAML object as described in U(http://rundeck.org/docs/man5/aclpolicy.html). + - It can be a YAML string or a pure Ansible inventory YAML object. + client_cert: + version_added: '0.2.0' + client_key: + version_added: '0.2.0' + force: + version_added: '0.2.0' + force_basic_auth: + version_added: '0.2.0' + http_agent: + version_added: '0.2.0' + url_password: + version_added: '0.2.0' + url_username: + version_added: '0.2.0' + use_proxy: + version_added: '0.2.0' + validate_certs: + version_added: '0.2.0' +extends_documentation_fragment: + - ansible.builtin.url + - community.general.attributes + - community.general.rundeck +""" + +EXAMPLES = r""" +- name: Create or update a rundeck ACL policy in project Ansible + community.general.rundeck_acl_policy: + name: "Project_01" + api_version: 18 + url: "https://rundeck.example.org" + token: "mytoken" + state: present + project: "Ansible" + policy: + description: "my policy" + context: + application: rundeck + for: + project: + - allow: read + by: + group: "build" + +- name: Remove a rundeck system policy + community.general.rundeck_acl_policy: + name: "Project_01" + url: "https://rundeck.example.org" + token: "mytoken" + state: absent +""" + +RETURN = r""" +rundeck_response: + description: Rundeck response when a failure occurs. + returned: failed + type: str +before: + description: Dictionary containing ACL policy information before modification. + returned: success + type: dict +after: + description: Dictionary containing ACL policy information after modification. + returned: success + type: dict +""" + +# import module snippets +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rundeck import ( + api_argument_spec, + api_request, +) + + +class RundeckACLManager: + def __init__(self, module): + self.module = module + if module.params.get("project"): + self.endpoint = "project/%s/acl/%s.aclpolicy" % ( + self.module.params["project"], + self.module.params["name"], + ) + else: + self.endpoint = "system/acl/%s.aclpolicy" % self.module.params["name"] + + def get_acl(self): + resp, info = api_request( + module=self.module, + endpoint=self.endpoint, + ) + + return resp + + def create_or_update_acl(self): + facts = self.get_acl() + if facts is None: + # If in check mode don't create project, simulate a fake project creation + if self.module.check_mode: + self.module.exit_json(changed=True, before={}, after=self.module.params["policy"]) + + resp, info = api_request( + module=self.module, + endpoint=self.endpoint, + method="POST", + data={"contents": self.module.params["policy"]}, + ) + + if info["status"] == 201: + self.module.exit_json(changed=True, before={}, after=self.get_acl()) + elif info["status"] == 400: + self.module.fail_json(msg="Unable to validate acl %s. Please ensure it is a valid ACL" % + self.module.params["name"]) + elif info["status"] == 409: + self.module.fail_json(msg="ACL %s already exists" % self.module.params["name"]) + else: + self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"], + before={}, after=self.get_acl()) + else: + if facts["contents"] == self.module.params["policy"]: + self.module.exit_json(changed=False, before=facts, after=facts) + + if self.module.check_mode: + self.module.exit_json(changed=True, before=facts, after=facts) + + resp, info = api_request( + module=self.module, + endpoint=self.endpoint, + method="PUT", + data={"contents": self.module.params["policy"]}, + ) + + if info["status"] == 200: + self.module.exit_json(changed=True, before=facts, after=self.get_acl()) + elif info["status"] == 400: + self.module.fail_json(msg="Unable to validate acl %s. Please ensure it is a valid ACL" % + self.module.params["name"]) + elif info["status"] == 404: + self.module.fail_json(msg="ACL %s doesn't exists. Cannot update." % self.module.params["name"]) + + def remove_acl(self): + facts = self.get_acl() + + if facts is None: + self.module.exit_json(changed=False, before={}, after={}) + else: + # If not in check mode, remove the project + if not self.module.check_mode: + api_request( + module=self.module, + endpoint=self.endpoint, + method="DELETE", + ) + + self.module.exit_json(changed=True, before=facts, after={}) + + +def main(): + # Also allow the user to set values for fetch_url + argument_spec = api_argument_spec() + argument_spec.update(dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + policy=dict(type='str'), + project=dict(type='str'), + )) + + argument_spec['api_token']['aliases'] = ['token'] + + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ['state', 'present', ['policy']], + ], + supports_check_mode=True, + ) + + if not bool(re.match("[a-zA-Z0-9,.+_-]+", module.params["name"])): + module.fail_json(msg="Name contains forbidden characters. The policy can contain the characters: a-zA-Z0-9,.+_-") + + if module.params["api_version"] < 14: + module.fail_json(msg="API version should be at least 14") + + rundeck = RundeckACLManager(module) + if module.params['state'] == 'present': + rundeck.create_or_update_acl() + elif module.params['state'] == 'absent': + rundeck.remove_acl() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/rundeck_job_executions_info.py b/plugins/modules/rundeck_job_executions_info.py new file mode 100644 index 0000000000..4c4bd85d09 --- /dev/null +++ b/plugins/modules/rundeck_job_executions_info.py @@ -0,0 +1,191 @@ +#!/usr/bin/python + +# Copyright (c) 2021, Phillipe Smith +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: rundeck_job_executions_info +short_description: Query executions for a Rundeck job +description: + - This module gets the list of executions for a specified Rundeck job. +author: "Phillipe Smith (@phsmith)" +version_added: 3.8.0 +options: + job_id: + type: str + description: + - The job unique ID. + required: true + status: + type: str + description: + - The job status to filter. + choices: [succeeded, failed, aborted, running] + max: + type: int + description: + - Max results to return. + default: 20 + offset: + type: int + description: + - The start point to return the results. + default: 0 +extends_documentation_fragment: + - community.general.rundeck + - url + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Get Rundeck job executions info + community.general.rundeck_job_executions_info: + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + job_id: "xxxxxxxxxxxxxxxxx" + register: rundeck_job_executions_info + +- name: Show Rundeck job executions info + ansible.builtin.debug: + var: rundeck_job_executions_info.executions +""" + +RETURN = r""" +paging: + description: Results pagination info. + returned: success + type: dict + contains: + count: + description: Number of results in the response. + type: int + returned: success + total: + description: Total number of results. + type: int + returned: success + offset: + description: Offset from first of all results. + type: int + returned: success + max: + description: Maximum number of results per page. + type: int + returned: success + sample: + { + "count": 20, + "total": 100, + "offset": 0, + "max": 20 + } +executions: + description: Job executions list. + returned: always + type: list + elements: dict + sample: + [ + { + "id": 1, + "href": "https://rundeck.example.org/api/39/execution/1", + "permalink": "https://rundeck.example.org/project/myproject/execution/show/1", + "status": "succeeded", + "project": "myproject", + "executionType": "user", + "user": "admin", + "date-started": { + "unixtime": 1633525515026, + "date": "2021-10-06T13:05:15Z" + }, + "date-ended": { + "unixtime": 1633525518386, + "date": "2021-10-06T13:05:18Z" + }, + "job": { + "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "averageDuration": 6381, + "name": "Test", + "group": "", + "project": "myproject", + "description": "", + "options": { + "exit_code": "0" + }, + "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a" + }, + "description": "Plugin[com.batix.rundeck.plugins.AnsiblePlaybookInlineWorkflowStep, nodeStep: false]", + "argstring": "-exit_code 0", + "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068" + } + ] +""" + +from urllib.parse import quote +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rundeck import ( + api_argument_spec, + api_request +) + + +class RundeckJobExecutionsInfo(object): + def __init__(self, module): + self.module = module + self.url = self.module.params["url"] + self.api_version = self.module.params["api_version"] + self.job_id = self.module.params["job_id"] + self.offset = self.module.params["offset"] + self.max = self.module.params["max"] + self.status = self.module.params["status"] or "" + + def job_executions(self): + response, info = api_request( + module=self.module, + endpoint="job/%s/executions?offset=%s&max=%s&status=%s" + % (quote(self.job_id), self.offset, self.max, self.status), + method="GET" + ) + + if info["status"] != 200: + self.module.fail_json( + msg=info["msg"], + executions=response + ) + + self.module.exit_json(msg="Executions info result", **response) + + +def main(): + argument_spec = api_argument_spec() + argument_spec.update(dict( + job_id=dict(required=True, type="str"), + offset=dict(type="int", default=0), + max=dict(type="int", default=20), + status=dict( + type="str", + choices=["succeeded", "failed", "aborted", "running"] + ) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if module.params["api_version"] < 14: + module.fail_json(msg="API version should be at least 14") + + rundeck = RundeckJobExecutionsInfo(module) + rundeck.job_executions() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/web_infrastructure/rundeck_job_run.py b/plugins/modules/rundeck_job_run.py similarity index 63% rename from plugins/modules/web_infrastructure/rundeck_job_run.py rename to plugins/modules/rundeck_job_run.py index 1a591ad15f..768e67967a 100644 --- a/plugins/modules/web_infrastructure/rundeck_job_run.py +++ b/plugins/modules/rundeck_job_run.py @@ -1,76 +1,80 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2021, Phillipe Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2021, Phillipe Smith +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: rundeck_job_run short_description: Run a Rundeck job description: - - This module runs a Rundeck job specified by ID. + - This module runs a Rundeck job specified by ID. author: "Phillipe Smith (@phsmith)" version_added: 3.8.0 +attributes: + check_mode: + support: none + diff_mode: + support: none options: - job_id: - type: str - description: - - The job unique ID. - required: true - job_options: - type: dict - description: - - The job options for the steps. - - Numeric values must be quoted. - filter_nodes: - type: str - description: - - Filter the nodes where the jobs must run. - - See U(https://docs.rundeck.com/docs/manual/11-node-filters.html#node-filter-syntax). - run_at_time: - type: str - description: - - Schedule the job execution to run at specific date and time. - - ISO-8601 date and time format like C(2021-10-05T15:45:00-03:00). - loglevel: - type: str - description: - - Log level configuration. - choices: [debug, verbose, info, warn, error] - default: info - wait_execution: - type: bool - description: - - Wait until the job finished the execution. - default: true - wait_execution_delay: - type: int - description: - - Delay, in seconds, between job execution status check requests. - default: 5 - wait_execution_timeout: - type: int - description: - - Job execution wait timeout in seconds. - - If the timeout is reached, the job will be aborted. - - Keep in mind that there is a sleep based on I(wait_execution_delay) after each job status check. - default: 120 - abort_on_timeout: - type: bool - description: - - Send a job abort request if exceeded the I(wait_execution_timeout) specified. - default: false + job_id: + type: str + description: + - The job unique ID. + required: true + job_options: + type: dict + description: + - The job options for the steps. + - Numeric values must be quoted. + filter_nodes: + type: str + description: + - Filter the nodes where the jobs must run. + - See U(https://docs.rundeck.com/docs/manual/11-node-filters.html#node-filter-syntax). + run_at_time: + type: str + description: + - Schedule the job execution to run at specific date and time. + - ISO-8601 date and time format like V(2021-10-05T15:45:00-03:00). + loglevel: + type: str + description: + - Log level configuration. + choices: [debug, verbose, info, warn, error] + default: info + wait_execution: + type: bool + description: + - Wait until the job finished the execution. + default: true + wait_execution_delay: + type: int + description: + - Delay, in seconds, between job execution status check requests. + default: 5 + wait_execution_timeout: + type: int + description: + - Job execution wait timeout in seconds. + - If the timeout is reached, the job is aborted. + - Keep in mind that there is a sleep based on O(wait_execution_delay) after each job status check. + default: 120 + abort_on_timeout: + type: bool + description: + - Send a job abort request if exceeded the O(wait_execution_timeout) specified. + default: false extends_documentation_fragment: - community.general.rundeck - - url -''' + - ansible.builtin.url + - community.general.attributes +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Run a Rundeck job community.general.rundeck_job_run: url: "https://rundeck.example.org" @@ -90,9 +94,9 @@ EXAMPLES = ''' api_token: "mytoken" job_id: "xxxxxxxxxxxxxxxxx" job_options: - option_1: "value_1" - option_2: "value_3" - option_3: "value_3" + option_1: "value_1" + option_2: "value_3" + option_3: "value_3" register: rundeck_job_run - name: Run a Rundeck job with timeout, delay between status check and abort on timeout @@ -123,63 +127,62 @@ EXAMPLES = ''' job_id: "xxxxxxxxxxxxxxxxx" wait_execution: false register: rundeck_job_run -''' +""" -RETURN = ''' +RETURN = r""" execution_info: - description: Rundeck job execution metadata. - returned: always - type: dict - sample: { - "msg": "Job execution succeeded!", - "execution_info": { - "id": 1, - "href": "https://rundeck.example.org/api/39/execution/1", - "permalink": "https://rundeck.example.org/project/myproject/execution/show/1", - "status": "succeeded", - "project": "myproject", - "executionType": "user", - "user": "admin", - "date-started": { - "unixtime": 1633449020784, - "date": "2021-10-05T15:50:20Z" - }, - "date-ended": { - "unixtime": 1633449026358, - "date": "2021-10-05T15:50:26Z" - }, - "job": { - "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", - "averageDuration": 4917, - "name": "Test", - "group": "", - "project": "myproject", - "description": "", - "options": { - "exit_code": "0" - }, - "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", - "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a" - }, - "description": "sleep 5 && echo 'Test!' && exit ${option.exit_code}", - "argstring": "-exit_code 0", - "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068", - "successfulNodes": [ - "localhost" - ], - "output": "Test!" - } + description: Rundeck job execution metadata. + returned: always + type: dict + sample: + { + "msg": "Job execution succeeded!", + "execution_info": { + "id": 1, + "href": "https://rundeck.example.org/api/39/execution/1", + "permalink": "https://rundeck.example.org/project/myproject/execution/show/1", + "status": "succeeded", + "project": "myproject", + "executionType": "user", + "user": "admin", + "date-started": { + "unixtime": 1633449020784, + "date": "2021-10-05T15:50:20Z" + }, + "date-ended": { + "unixtime": 1633449026358, + "date": "2021-10-05T15:50:26Z" + }, + "job": { + "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "averageDuration": 4917, + "name": "Test", + "group": "", + "project": "myproject", + "description": "", + "options": { + "exit_code": "0" + }, + "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", + "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a" + }, + "description": "sleep 5 && echo 'Test!' && exit ${option.exit_code}", + "argstring": "-exit_code 0", + "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068", + "successfulNodes": [ + "localhost" + ], + "output": "Test!" + } } -''' +""" # Modules import -import json from datetime import datetime, timedelta from time import sleep +from urllib.parse import quote from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.six.moves.urllib.parse import quote from ansible_collections.community.general.plugins.module_utils.rundeck import ( api_argument_spec, api_request diff --git a/plugins/modules/rundeck_project.py b/plugins/modules/rundeck_project.py new file mode 100644 index 0000000000..47db41a744 --- /dev/null +++ b/plugins/modules/rundeck_project.py @@ -0,0 +1,194 @@ +#!/usr/bin/python + +# Ansible module to manage rundeck projects +# Copyright (c) 2017, Loic Blot +# Sponsored by Infopro Digital. http://www.infopro-digital.com/ +# Sponsored by E.T.A.I. http://www.etai.fr/ +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: rundeck_project + +short_description: Manage Rundeck projects +description: + - Create and remove Rundeck projects through HTTP API. +author: "Loic Blot (@nerzhul)" +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + state: + type: str + description: + - Create or remove Rundeck project. + choices: ['present', 'absent'] + default: 'present' + name: + type: str + description: + - Sets the project name. + required: true + api_token: + description: + - Sets the token to authenticate against Rundeck API. + aliases: ["token"] + client_cert: + version_added: '0.2.0' + client_key: + version_added: '0.2.0' + force: + version_added: '0.2.0' + force_basic_auth: + version_added: '0.2.0' + http_agent: + version_added: '0.2.0' + url_password: + version_added: '0.2.0' + url_username: + version_added: '0.2.0' + use_proxy: + version_added: '0.2.0' + validate_certs: + version_added: '0.2.0' +extends_documentation_fragment: + - ansible.builtin.url + - community.general.attributes + - community.general.rundeck +""" + +EXAMPLES = r""" +- name: Create a rundeck project + community.general.rundeck_project: + name: "Project_01" + label: "Project 01" + description: "My Project 01" + url: "https://rundeck.example.org" + api_version: 39 + api_token: "mytoken" + state: present + +- name: Remove a rundeck project + community.general.rundeck_project: + name: "Project_01" + url: "https://rundeck.example.org" + api_token: "mytoken" + state: absent +""" + +RETURN = r""" +rundeck_response: + description: Rundeck response when a failure occurs. + returned: failed + type: str +before: + description: Dictionary containing project information before modification. + returned: success + type: dict +after: + description: Dictionary containing project information after modification. + returned: success + type: dict +""" + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.rundeck import ( + api_argument_spec, + api_request, +) + + +class RundeckProjectManager(object): + def __init__(self, module): + self.module = module + + def get_project_facts(self): + resp, info = api_request( + module=self.module, + endpoint="project/%s" % self.module.params["name"], + ) + + return resp + + def create_or_update_project(self): + facts = self.get_project_facts() + + if facts is None: + # If in check mode don't create project, simulate a fake project creation + if self.module.check_mode: + self.module.exit_json( + changed=True, + before={}, + after={ + "name": self.module.params["name"] + }, + ) + + resp, info = api_request( + module=self.module, + endpoint="projects", + method="POST", + data={ + "name": self.module.params["name"], + "config": {}, + } + ) + + if info["status"] == 201: + self.module.exit_json(changed=True, before={}, after=self.get_project_facts()) + else: + self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"], + before={}, after=self.get_project_facts()) + else: + self.module.exit_json(changed=False, before=facts, after=facts) + + def remove_project(self): + facts = self.get_project_facts() + if facts is None: + self.module.exit_json(changed=False, before={}, after={}) + else: + # If not in check mode, remove the project + if not self.module.check_mode: + api_request( + module=self.module, + endpoint="project/%s" % self.module.params["name"], + method="DELETE", + ) + + self.module.exit_json(changed=True, before=facts, after={}) + + +def main(): + # Also allow the user to set values for fetch_url + argument_spec = api_argument_spec() + argument_spec.update(dict( + state=dict(type='str', choices=['present', 'absent'], default='present'), + name=dict(required=True, type='str'), + )) + + argument_spec['api_token']['aliases'] = ['token'] + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if module.params["api_version"] < 14: + module.fail_json(msg="API version should be at least 14") + + rundeck = RundeckProjectManager(module) + if module.params['state'] == 'present': + rundeck.create_or_update_project() + elif module.params['state'] == 'absent': + rundeck.remove_project() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/runit.py b/plugins/modules/runit.py similarity index 80% rename from plugins/modules/system/runit.py rename to plugins/modules/runit.py index 811248317c..d5acba36d3 100644 --- a/plugins/modules/system/runit.py +++ b/plugins/modules/runit.py @@ -1,53 +1,56 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2015, Brian Coca -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Brian Coca +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: runit author: -- James Sumners (@jsumners) + - James Sumners (@jsumners) short_description: Manage runit services description: - - Controls runit services on remote hosts using the sv utility. + - Controls runit services on remote hosts using the sv utility. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the service to manage. - type: str - required: yes - state: - description: - - C(started)/C(stopped) are idempotent actions that will not run - commands unless necessary. C(restarted) will always bounce the - service (sv restart) and C(killed) will always bounce the service (sv force-stop). - C(reloaded) will send a HUP (sv reload). - C(once) will run a normally downed sv once (sv once), not really - an idempotent operation. - type: str - choices: [ killed, once, reloaded, restarted, started, stopped ] - enabled: - description: - - Whether the service is enabled or not, if disabled it also implies stopped. - type: bool - service_dir: - description: - - directory runsv watches for services - type: str - default: /var/service - service_src: - description: - - directory where services are defined, the source of symlinks to service_dir. - type: str - default: /etc/sv -''' + name: + description: + - Name of the service to manage. + type: str + required: true + state: + description: + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. + - V(restarted) always bounces the service (sv restart) and V(killed) always bounces the service (sv force-stop). + - V(reloaded) always sends a HUP (sv reload). + - V(once) runs a normally downed sv once (sv once), not really an idempotent operation. + type: str + choices: [killed, once, reloaded, restarted, started, stopped] + enabled: + description: + - Whether the service is enabled or not, if disabled it also implies stopped. + type: bool + service_dir: + description: + - Directory runsv watches for services. + type: str + default: /var/service + service_src: + description: + - Directory where services are defined, the source of symlinks to O(service_dir). + type: str + default: /etc/sv +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Start sv dnscache, if not running community.general.runit: name: dnscache @@ -78,7 +81,7 @@ EXAMPLES = r''' name: dnscache state: reloaded service_dir: /run/service -''' +""" import os import re diff --git a/plugins/modules/notification/say.py b/plugins/modules/say.py similarity index 57% rename from plugins/modules/notification/say.py rename to plugins/modules/say.py index 1c66adf66e..84dc65a840 100644 --- a/plugins/modules/notification/say.py +++ b/plugins/modules/say.py @@ -1,47 +1,53 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, Michael DeHaan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2013, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: say -short_description: Makes a computer to speak. +short_description: Makes a computer to speak description: - - makes a computer speak! Amuse your friends, annoy your coworkers! + - Makes a computer speak! Amuse your friends, annoy your coworkers! notes: - - In 2.5, this module has been renamed from C(osx_say) to M(community.general.say). - - If you like this module, you may also be interested in the osx_say callback plugin. - - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on a Linux host. + - In 2.5, this module has been renamed from C(osx_say) to M(community.general.say). + - If you like this module, you may also be interested in the osx_say callback plugin. + - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on + a Linux host. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: msg: type: str description: - What to say + - What to say. required: true voice: type: str description: - What voice to use + - What voice to use. required: false -requirements: [ say or espeak or espeak-ng ] +requirements: [say or espeak or espeak-ng] author: - - "Ansible Core Team" - - "Michael DeHaan (@mpdehaan)" -''' + - "Ansible Core Team" + - "Michael DeHaan (@mpdehaan)" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Makes a computer to speak community.general.say: msg: '{{ inventory_hostname }} is all done' voice: Zarvox delegate_to: localhost -''' +""" import platform from ansible.module_utils.basic import AnsibleModule @@ -59,7 +65,7 @@ def main(): module = AnsibleModule( argument_spec=dict( msg=dict(required=True), - voice=dict(required=False), + voice=dict(), ), supports_check_mode=True ) diff --git a/plugins/modules/cloud/scaleway/scaleway_compute.py b/plugins/modules/scaleway_compute.py similarity index 91% rename from plugins/modules/cloud/scaleway/scaleway_compute.py rename to plugins/modules/scaleway_compute.py index a195d7fb93..4cc23f9571 100644 --- a/plugins/modules/cloud/scaleway/scaleway_compute.py +++ b/plugins/modules/scaleway_compute.py @@ -1,73 +1,78 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Scaleway Compute management module # # Copyright (C) 2018 Online SAS. # https://www.scaleway.com # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_compute short_description: Scaleway compute management module author: Remy Leone (@remyleone) description: - - "This module manages compute instances on Scaleway." + - This module manages compute instances on Scaleway. extends_documentation_fragment: -- community.general.scaleway + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 options: public_ip: type: str description: - - Manage public IP on a Scaleway server - - Could be Scaleway IP address UUID - - C(dynamic) Means that IP is destroyed at the same time the host is destroyed - - C(absent) Means no public IP at all + - Manage public IP on a Scaleway server. + - Could be Scaleway IP address UUID. + - V(dynamic) Means that IP is destroyed at the same time the host is destroyed. + - V(absent) Means no public IP at all. default: absent enable_ipv6: description: - - Enable public IPv6 connectivity on the instance + - Enable public IPv6 connectivity on the instance. default: false type: bool image: type: str description: - - Image identifier used to start the instance with + - Image identifier used to start the instance with. required: true name: type: str description: - - Name of the instance - + - Name of the instance. organization: type: str description: - Organization identifier. - - Exactly one of I(project) and I(organization) must be specified. - + - Exactly one of O(project) and O(organization) must be specified. project: type: str description: - Project identifier. - - Exactly one of I(project) and I(organization) must be specified. + - Exactly one of O(project) and O(organization) must be specified. version_added: 4.3.0 state: type: str description: - - Indicate desired state of the instance. + - Indicate desired state of the instance. default: present choices: - present @@ -80,60 +85,65 @@ options: type: list elements: str description: - - List of tags to apply to the instance (5 max) + - List of tags to apply to the instance (5 max). required: false default: [] region: type: str description: - - Scaleway compute zone + - Scaleway compute zone. required: true choices: - ams1 - EMEA-NL-EVS + - ams2 + - ams3 - par1 - EMEA-FR-PAR1 - par2 - EMEA-FR-PAR2 + - par3 - waw1 - EMEA-PL-WAW1 + - waw2 + - waw3 commercial_type: type: str description: - - Commercial name of the compute node + - Commercial name of the compute node. required: true wait: description: - - Wait for the instance to reach its desired state before returning. + - Wait for the instance to reach its desired state before returning. type: bool - default: 'no' + default: false wait_timeout: type: int description: - - Time to wait for the server to reach the expected state + - Time to wait for the server to reach the expected state. required: false default: 300 wait_sleep_time: type: int description: - - Time to wait before every attempt to check the state of the server + - Time to wait before every attempt to check the state of the server. required: false default: 3 security_group: type: str description: - - Security group unique identifier - - If no value provided, the default security group or current security group will be used + - Security group unique identifier. + - If no value provided, the default security group or current security group is used. required: false -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a server community.general.scaleway_compute: name: foobar @@ -167,16 +177,16 @@ EXAMPLES = ''' project: 951df375-e094-4d26-97c1-ba548eeb9c42 region: ams1 commercial_type: VC1S -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import datetime import time from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import quote as urlquote +from ansible_collections.community.general.plugins.module_utils.datetime import now from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway SCALEWAY_SERVER_STATES = ( @@ -229,9 +239,9 @@ def wait_to_complete_state_transition(compute_api, server, wait=None): wait_timeout = compute_api.module.params["wait_timeout"] wait_sleep_time = compute_api.module.params["wait_sleep_time"] - start = datetime.datetime.utcnow() + start = now() end = start + datetime.timedelta(seconds=wait_timeout) - while datetime.datetime.utcnow() < end: + while now() < end: compute_api.module.debug("We are going to wait for the server to finish its transition") if fetch_state(compute_api, server) not in SCALEWAY_TRANSITIONS_STATES: compute_api.module.debug("It seems that the server is not in transition anymore.") @@ -579,9 +589,11 @@ def server_attributes_should_be_changed(compute_api, target_server, wished_serve compute_api.module.debug("Checking if server attributes should be changed") compute_api.module.debug("Current Server: %s" % target_server) compute_api.module.debug("Wished Server: %s" % wished_server) - debug_dict = dict((x, (target_server[x], wished_server[x])) - for x in PATCH_MUTABLE_SERVER_ATTRIBUTES - if x in target_server and x in wished_server) + debug_dict = { + x: (target_server[x], wished_server[x]) + for x in PATCH_MUTABLE_SERVER_ATTRIBUTES + if x in target_server and x in wished_server + } compute_api.module.debug("Debug dict %s" % debug_dict) try: for key in PATCH_MUTABLE_SERVER_ATTRIBUTES: @@ -607,7 +619,7 @@ def server_change_attributes(compute_api, target_server, wished_server): # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]: # Setting all key to current value except ID - key_dict = dict((x, target_server[key][x]) for x in target_server[key].keys() if x != "id") + key_dict = {x: target_server[key][x] for x in target_server[key].keys() if x != "id"} # Setting ID to the user specified ID key_dict["id"] = wished_server[key] patch_payload[key] = key_dict diff --git a/plugins/modules/scaleway_compute_private_network.py b/plugins/modules/scaleway_compute_private_network.py new file mode 100644 index 0000000000..33be950f22 --- /dev/null +++ b/plugins/modules/scaleway_compute_private_network.py @@ -0,0 +1,220 @@ +#!/usr/bin/python +# +# Scaleway VPC management module +# +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_compute_private_network +short_description: Scaleway compute - private network management +version_added: 5.2.0 +author: Pascal MANGIN (@pastral) +description: + - This module add or remove a private network to a compute instance (U(https://developer.scaleway.com)). +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the VPC. + default: present + choices: + - present + - absent + + project: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example V(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 + + compute_id: + type: str + description: + - ID of the compute instance (see M(community.general.scaleway_compute)). + required: true + + private_network_id: + type: str + description: + - ID of the private network (see M(community.general.scaleway_private_network)). + required: true +""" + +EXAMPLES = r""" +- name: Plug a VM to a private network + community.general.scaleway_compute_private_network: + project: '{{ scw_project }}' + state: present + region: par1 + compute_id: "12345678-f1e6-40ec-83e5-12345d67ed89" + private_network_id: "22345678-f1e6-40ec-83e5-12345d67ed89" + register: nicsvpc_creation_task + +- name: Unplug a VM from a private network + community.general.scaleway_compute_private_network: + project: '{{ scw_project }}' + state: absent + region: par1 + compute_id: "12345678-f1e6-40ec-83e5-12345d67ed89" + private_network_id: "22345678-f1e6-40ec-83e5-12345d67ed89" +""" + +RETURN = r""" +scaleway_compute_private_network: + description: Information on the VPC. + returned: success when O(state=present) + type: dict + sample: + { + "created_at": "2022-01-15T11:11:12.676445Z", + "id": "12345678-f1e6-40ec-83e5-12345d67ed89", + "name": "network", + "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "tags": [ + "tag1", + "tag2", + "tag3", + "tag4", + "tag5" + ], + "updated_at": "2022-01-15T11:12:04.624837Z", + "zone": "fr-par-2" + } +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway +from ansible.module_utils.basic import AnsibleModule + + +def get_nics_info(api, compute_id, private_network_id): + + response = api.get('servers/' + compute_id + '/private_nics') + if not response.ok: + msg = "Error during get servers information: %s: '%s' (%s)" % (response.info['msg'], response.json['message'], response.json) + api.module.fail_json(msg=msg) + + i = 0 + list_nics = response.json['private_nics'] + + while i < len(list_nics): + if list_nics[i]['private_network_id'] == private_network_id: + return list_nics[i] + i += 1 + + return None + + +def present_strategy(api, compute_id, private_network_id): + + changed = False + nic = get_nics_info(api, compute_id, private_network_id) + if nic is not None: + return changed, nic + + data = {"private_network_id": private_network_id} + changed = True + if api.module.check_mode: + return changed, {"status": "a private network would be add to a server"} + + response = api.post(path='servers/' + compute_id + '/private_nics', data=data) + + if not response.ok: + api.module.fail_json(msg='Error when adding a private network to a server [{0}: {1}]'.format(response.status_code, response.json)) + + return changed, response.json + + +def absent_strategy(api, compute_id, private_network_id): + + changed = False + nic = get_nics_info(api, compute_id, private_network_id) + if nic is None: + return changed, {} + + changed = True + if api.module.check_mode: + return changed, {"status": "private network would be destroyed"} + + response = api.delete('servers/' + compute_id + '/private_nics/' + nic['id']) + + if not response.ok: + api.module.fail_json(msg='Error deleting private network from server [{0}: {1}]'.format( + response.status_code, response.json)) + + return changed, response.json + + +def core(module): + + compute_id = module.params['compute_id'] + pn_id = module.params['private_network_id'] + + region = module.params["region"] + module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + api = Scaleway(module=module) + if module.params["state"] == "absent": + changed, summary = absent_strategy(api=api, compute_id=compute_id, private_network_id=pn_id) + else: + changed, summary = present_strategy(api=api, compute_id=compute_id, private_network_id=pn_id) + module.exit_json(changed=changed, scaleway_compute_private_network=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['absent', 'present']), + project=dict(required=True), + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + compute_id=dict(required=True), + private_network_id=dict(required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_container.py b/plugins/modules/scaleway_container.py new file mode 100644 index 0000000000..2281c555c1 --- /dev/null +++ b/plugins/modules/scaleway_container.py @@ -0,0 +1,424 @@ +#!/usr/bin/python +# +# Scaleway Serverless container management module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_container +short_description: Scaleway Container management +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module manages container on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.scaleway_waitable_resource + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +requirements: + - passlib[argon2] >= 1.7.4 + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the container. + default: present + choices: + - present + - absent + + namespace_id: + type: str + description: + - Container namespace identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example V(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container namespace. + required: true + + description: + description: + - Description of the container namespace. + type: str + default: '' + + min_scale: + description: + - Minimum number of replicas for the container. + type: int + + max_scale: + description: + - Maximum number of replicas for the container. + type: int + + environment_variables: + description: + - Environment variables of the container namespace. + - Injected in container at runtime. + type: dict + default: {} + + secret_environment_variables: + description: + - Secret environment variables of the container namespace. + - Updating those values does not output a C(changed) state in Ansible. + - Injected in container at runtime. + type: dict + default: {} + + cpu_limit: + description: + - Resources define performance characteristics of your container. + - They are allocated to your container at runtime. + - Unit is 1/1000 of a VCPU. + type: int + version_added: 11.3.0 + + memory_limit: + description: + - Resources define performance characteristics of your container. + - They are allocated to your container at runtime. + - Unit is MB of memory. + type: int + + container_timeout: + description: + - The length of time your handler can spend processing a request before being stopped. + type: str + + privacy: + description: + - Privacy policies define whether a container can be executed anonymously. + - Choose V(public) to enable anonymous execution, or V(private) to protect your container with an authentication mechanism + provided by the Scaleway API. + type: str + default: public + choices: + - public + - private + + registry_image: + description: + - The name of image used for the container. + type: str + required: true + + max_concurrency: + description: + - Maximum number of connections per container. + - This parameter is used to trigger autoscaling. + type: int + + protocol: + description: + - Communication protocol of the container. + type: str + default: http1 + choices: + - http1 + - h2c + + port: + description: + - Listen port used to expose the container. + type: int + + redeploy: + description: + - Redeploy the container if update is required. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Create a container + community.general.scaleway_container: + namespace_id: '{{ scw_container_namespace }}' + state: present + region: fr-par + name: my-awesome-container + registry_image: rg.fr-par.scw.cloud/funcscwtestrgy2f9zw/nginx:latest + environment_variables: + MY_VAR: my_value + secret_environment_variables: + MY_SECRET_VAR: my_secret_value + register: container_creation_task + +- name: Make sure container is deleted + community.general.scaleway_container: + namespace_id: '{{ scw_container_namespace }}' + state: absent + region: fr-par + name: my-awesome-container +""" + +RETURN = r""" +container: + description: The container information. + returned: when O(state=present) + type: dict + sample: + cpu_limit: 140 + description: Container used for testing scaleway_container ansible module + domain_name: cnansibletestgfogtjod-cn-ansible-test.functions.fnc.fr-par.scw.cloud + environment_variables: + MY_VAR: my_value + error_message: null + http_option: "" + id: c9070eb0-d7a4-48dd-9af3-4fb139890721 + max_concurrency: 50 + max_scale: 5 + memory_limit: 256 + min_scale: 0 + name: cn-ansible-test + namespace_id: 75e299f1-d1e5-4e6b-bc6e-4fb51cfe1e69 + port: 80 + privacy: public + protocol: http1 + region: fr-par + registry_image: rg.fr-par.scw.cloud/namespace-ansible-ci/nginx:latest + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: created + timeout: 300s +""" + +from copy import deepcopy + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed, + SecretVariables +) +from ansible.module_utils.basic import AnsibleModule + +STABLE_STATES = ( + "ready", + "created", + "absent" +) + +MUTABLE_ATTRIBUTES = ( + "description", + "min_scale", + "max_scale", + "environment_variables", + "cpu_limit", + "memory_limit", + "timeout", + "privacy", + "registry_image", + "max_concurrency", + "protocol", + "port", + "secret_environment_variables" +) + + +def payload_from_wished_cn(wished_cn): + payload = { + "namespace_id": wished_cn["namespace_id"], + "name": wished_cn["name"], + "description": wished_cn["description"], + "min_scale": wished_cn["min_scale"], + "max_scale": wished_cn["max_scale"], + "environment_variables": wished_cn["environment_variables"], + "secret_environment_variables": SecretVariables.dict_to_list(wished_cn["secret_environment_variables"]), + "cpu_limit": wished_cn["cpu_limit"], + "memory_limit": wished_cn["memory_limit"], + "timeout": wished_cn["timeout"], + "privacy": wished_cn["privacy"], + "registry_image": wished_cn["registry_image"], + "max_concurrency": wished_cn["max_concurrency"], + "protocol": wished_cn["protocol"], + "port": wished_cn["port"], + "redeploy": wished_cn["redeploy"] + } + + return payload + + +def absent_strategy(api, wished_cn): + changed = False + + cn_list = api.fetch_all_resources("containers") + cn_lookup = {cn["name"]: cn for cn in cn_list} + + if wished_cn["name"] not in cn_lookup: + return changed, {} + + target_cn = cn_lookup[wished_cn["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Container would be destroyed"} + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_cn["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting container [{0}: {1}]'.format( + response.status_code, response.json)) + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) + return changed, response.json + + +def present_strategy(api, wished_cn): + changed = False + + cn_list = api.fetch_all_resources("containers") + cn_lookup = {cn["name"]: cn for cn in cn_list} + + payload_cn = payload_from_wished_cn(wished_cn) + + if wished_cn["name"] not in cn_lookup: + changed = True + if api.module.check_mode: + return changed, {"status": "A container would be created."} + + # Creation doesn't support `redeploy` parameter + del payload_cn["redeploy"] + + # Create container + api.warn(payload_cn) + creation_response = api.post(path=api.api_path, + data=payload_cn) + + if not creation_response.ok: + msg = "Error during container creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_cn = cn_lookup[wished_cn["name"]] + decoded_target_cn = deepcopy(target_cn) + decoded_target_cn["secret_environment_variables"] = SecretVariables.decode(decoded_target_cn["secret_environment_variables"], + payload_cn["secret_environment_variables"]) + patch_payload = resource_attributes_should_be_changed(target=decoded_target_cn, + wished=payload_cn, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES) + + if not patch_payload: + return changed, target_cn + + changed = True + if api.module.check_mode: + return changed, {"status": "Container attributes would be changed."} + + cn_patch_response = api.patch(path=api.api_path + "/%s" % target_cn["id"], + data=patch_payload) + + if not cn_patch_response.ok: + api.module.fail_json(msg='Error during container attributes update: [{0}: {1}]'.format( + cn_patch_response.status_code, cn_patch_response.json['message'])) + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % target_cn["id"]) + return changed, response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + SecretVariables.ensure_scaleway_secret_package(module) + + region = module.params["region"] + wished_container = { + "state": module.params["state"], + "namespace_id": module.params["namespace_id"], + "name": module.params["name"], + "description": module.params['description'], + "min_scale": module.params["min_scale"], + "max_scale": module.params["max_scale"], + "environment_variables": module.params['environment_variables'], + "secret_environment_variables": module.params['secret_environment_variables'], + "cpu_limit": module.params["cpu_limit"], + "memory_limit": module.params["memory_limit"], + "timeout": module.params["container_timeout"], + "privacy": module.params["privacy"], + "registry_image": module.params["registry_image"], + "max_concurrency": module.params["max_concurrency"], + "protocol": module.params["protocol"], + "port": module.params["port"], + "redeploy": module.params["redeploy"] + } + + api = Scaleway(module=module) + api.api_path = "containers/v1beta1/regions/%s/containers" % region + + changed, summary = state_strategy[wished_container["state"]](api=api, wished_cn=wished_container) + + module.exit_json(changed=changed, container=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(scaleway_waitable_resource_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + namespace_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True), + description=dict(type='str', default=''), + min_scale=dict(type='int'), + max_scale=dict(type='int'), + cpu_limit=dict(type='int'), + memory_limit=dict(type='int'), + container_timeout=dict(type='str'), + privacy=dict(type='str', default='public', choices=['public', 'private']), + registry_image=dict(type='str', required=True), + max_concurrency=dict(type='int'), + protocol=dict(type='str', default='http1', choices=['http1', 'h2c']), + port=dict(type='int'), + redeploy=dict(type='bool', default=False), + environment_variables=dict(type='dict', default={}), + secret_environment_variables=dict(type='dict', default={}, no_log=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_container_info.py b/plugins/modules/scaleway_container_info.py new file mode 100644 index 0000000000..fd729bf336 --- /dev/null +++ b/plugins/modules/scaleway_container_info.py @@ -0,0 +1,153 @@ +#!/usr/bin/python +# +# Scaleway Serverless container info module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_container_info +short_description: Retrieve information on Scaleway Container +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module return information about a container on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + namespace_id: + type: str + description: + - Container namespace identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container. + required: true +""" + +EXAMPLES = r""" +- name: Get a container info + community.general.scaleway_container_info: + namespace_id: '{{ scw_container_namespace }}' + region: fr-par + name: my-awesome-container + register: container_info_task +""" + +RETURN = r""" +container: + description: The container information. + returned: always + type: dict + sample: + cpu_limit: 140 + description: Container used for testing scaleway_container ansible module + domain_name: cnansibletestgfogtjod-cn-ansible-test.functions.fnc.fr-par.scw.cloud + environment_variables: + MY_VAR: my_value + error_message: null + http_option: "" + id: c9070eb0-d7a4-48dd-9af3-4fb139890721 + max_concurrency: 50 + max_scale: 5 + memory_limit: 256 + min_scale: 0 + name: cn-ansible-test + namespace_id: 75e299f1-d1e5-4e6b-bc6e-4fb51cfe1e69 + port: 80 + privacy: public + protocol: http1 + region: fr-par + registry_image: rg.fr-par.scw.cloud/namespace-ansible-ci/nginx:latest + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: created + timeout: 300s +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, +) +from ansible.module_utils.basic import AnsibleModule + + +def info_strategy(api, wished_cn): + cn_list = api.fetch_all_resources("containers") + cn_lookup = {cn["name"]: cn for cn in cn_list} + + if wished_cn["name"] not in cn_lookup: + msg = "Error during container lookup: Unable to find container named '%s' in namespace '%s'" % (wished_cn["name"], + wished_cn["namespace_id"]) + + api.module.fail_json(msg=msg) + + target_cn = cn_lookup[wished_cn["name"]] + + response = api.get(path=api.api_path + "/%s" % target_cn["id"]) + if not response.ok: + msg = "Error during container lookup: %s: '%s' (%s)" % (response.info['msg'], + response.json['message'], + response.json) + api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + wished_container = { + "namespace_id": module.params["namespace_id"], + "name": module.params["name"] + } + + api = Scaleway(module=module) + api.api_path = "containers/v1beta1/regions/%s/containers" % region + + summary = info_strategy(api=api, wished_cn=wished_container) + + module.exit_json(changed=False, container=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + namespace_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_container_namespace.py b/plugins/modules/scaleway_container_namespace.py new file mode 100644 index 0000000000..2d76f75315 --- /dev/null +++ b/plugins/modules/scaleway_container_namespace.py @@ -0,0 +1,294 @@ +#!/usr/bin/python +# +# Scaleway Serverless container namespace management module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_container_namespace +short_description: Scaleway Container namespace management +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module manages container namespaces on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.scaleway_waitable_resource + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +requirements: + - passlib[argon2] >= 1.7.4 + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the container namespace. + default: present + choices: + - present + - absent + + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example V(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container namespace. + required: true + + description: + description: + - Description of the container namespace. + type: str + default: '' + + environment_variables: + description: + - Environment variables of the container namespace. + - Injected in containers at runtime. + type: dict + default: {} + + secret_environment_variables: + description: + - Secret environment variables of the container namespace. + - Updating those values does not output a C(changed) state in Ansible. + - Injected in containers at runtime. + type: dict + default: {} +""" + +EXAMPLES = r""" +- name: Create a container namespace + community.general.scaleway_container_namespace: + project_id: '{{ scw_project }}' + state: present + region: fr-par + name: my-awesome-container-namespace + environment_variables: + MY_VAR: my_value + secret_environment_variables: + MY_SECRET_VAR: my_secret_value + register: container_namespace_creation_task + +- name: Make sure container namespace is deleted + community.general.scaleway_container_namespace: + project_id: '{{ scw_project }}' + state: absent + region: fr-par + name: my-awesome-container-namespace +""" + +RETURN = r""" +container_namespace: + description: The container namespace information. + returned: when O(state=present) + type: dict + sample: + description: "" + environment_variables: + MY_VAR: my_value + error_message: null + id: 531a1fd7-98d2-4a74-ad77-d398324304b8 + name: my-awesome-container-namespace + organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 + project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98 + region: fr-par + registry_endpoint: "" + registry_namespace_id: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: pending +""" + +from copy import deepcopy + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + scaleway_waitable_resource_argument_spec, + resource_attributes_should_be_changed, SecretVariables +) +from ansible.module_utils.basic import AnsibleModule + +STABLE_STATES = ( + "ready", + "absent" +) + +MUTABLE_ATTRIBUTES = ( + "description", + "environment_variables", + "secret_environment_variables" +) + + +def payload_from_wished_cn(wished_cn): + payload = { + "project_id": wished_cn["project_id"], + "name": wished_cn["name"], + "description": wished_cn["description"], + "environment_variables": wished_cn["environment_variables"], + "secret_environment_variables": SecretVariables.dict_to_list(wished_cn["secret_environment_variables"]) + } + + return payload + + +def absent_strategy(api, wished_cn): + changed = False + + cn_list = api.fetch_all_resources("namespaces") + cn_lookup = {cn["name"]: cn for cn in cn_list} + + if wished_cn["name"] not in cn_lookup: + return changed, {} + + target_cn = cn_lookup[wished_cn["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Container namespace would be destroyed"} + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_cn["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting container namespace [{0}: {1}]'.format( + response.status_code, response.json)) + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) + return changed, response.json + + +def present_strategy(api, wished_cn): + changed = False + + cn_list = api.fetch_all_resources("namespaces") + cn_lookup = {cn["name"]: cn for cn in cn_list} + + payload_cn = payload_from_wished_cn(wished_cn) + + if wished_cn["name"] not in cn_lookup: + changed = True + if api.module.check_mode: + return changed, {"status": "A container namespace would be created."} + + # Create container namespace + api.warn(payload_cn) + creation_response = api.post(path=api.api_path, + data=payload_cn) + + if not creation_response.ok: + msg = "Error during container namespace creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_cn = cn_lookup[wished_cn["name"]] + decoded_target_cn = deepcopy(target_cn) + decoded_target_cn["secret_environment_variables"] = SecretVariables.decode(decoded_target_cn["secret_environment_variables"], + payload_cn["secret_environment_variables"]) + patch_payload = resource_attributes_should_be_changed(target=decoded_target_cn, + wished=payload_cn, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES) + + if not patch_payload: + return changed, target_cn + + changed = True + if api.module.check_mode: + return changed, {"status": "Container namespace attributes would be changed."} + + cn_patch_response = api.patch(path=api.api_path + "/%s" % target_cn["id"], + data=patch_payload) + + if not cn_patch_response.ok: + api.module.fail_json(msg='Error during container namespace attributes update: [{0}: {1}]'.format( + cn_patch_response.status_code, cn_patch_response.json['message'])) + + api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % target_cn["id"]) + return changed, cn_patch_response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + SecretVariables.ensure_scaleway_secret_package(module) + + region = module.params["region"] + wished_container_namespace = { + "state": module.params["state"], + "project_id": module.params["project_id"], + "name": module.params["name"], + "description": module.params['description'], + "environment_variables": module.params['environment_variables'], + "secret_environment_variables": module.params['secret_environment_variables'] + } + + api = Scaleway(module=module) + api.api_path = "containers/v1beta1/regions/%s/namespaces" % region + + changed, summary = state_strategy[wished_container_namespace["state"]](api=api, wished_cn=wished_container_namespace) + + module.exit_json(changed=changed, container_namespace=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(scaleway_waitable_resource_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True), + description=dict(type='str', default=''), + environment_variables=dict(type='dict', default={}), + secret_environment_variables=dict(type='dict', default={}, no_log=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_container_namespace_info.py b/plugins/modules/scaleway_container_namespace_info.py new file mode 100644 index 0000000000..efd1b3b816 --- /dev/null +++ b/plugins/modules/scaleway_container_namespace_info.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# +# Scaleway Serverless container namespace info module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_container_namespace_info +short_description: Retrieve information on Scaleway Container namespace +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module return information about a container namespace on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container namespace. + required: true +""" + +EXAMPLES = r""" +- name: Get a container namespace info + community.general.scaleway_container_namespace_info: + project_id: '{{ scw_project }}' + region: fr-par + name: my-awesome-container-namespace + register: container_namespace_info_task +""" + +RETURN = r""" +container_namespace: + description: The container namespace information. + returned: always + type: dict + sample: + description: "" + environment_variables: + MY_VAR: my_value + error_message: + id: 531a1fd7-98d2-4a74-ad77-d398324304b8 + name: my-awesome-container-namespace + organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 + project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98 + region: fr-par + registry_endpoint: "" + registry_namespace_id: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: pending +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, +) +from ansible.module_utils.basic import AnsibleModule + + +def info_strategy(api, wished_cn): + cn_list = api.fetch_all_resources("namespaces") + cn_lookup = {cn["name"]: cn for cn in cn_list} + + if wished_cn["name"] not in cn_lookup: + msg = "Error during container namespace lookup: Unable to find container namespace named '%s' in project '%s'" % (wished_cn["name"], + wished_cn["project_id"]) + + api.module.fail_json(msg=msg) + + target_cn = cn_lookup[wished_cn["name"]] + + response = api.get(path=api.api_path + "/%s" % target_cn["id"]) + if not response.ok: + msg = "Error during container namespace lookup: %s: '%s' (%s)" % (response.info['msg'], + response.json['message'], + response.json) + api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + wished_container_namespace = { + "project_id": module.params["project_id"], + "name": module.params["name"] + } + + api = Scaleway(module=module) + api.api_path = "containers/v1beta1/regions/%s/namespaces" % region + + summary = info_strategy(api=api, wished_cn=wished_container_namespace) + + module.exit_json(changed=False, container_namespace=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_container_registry.py b/plugins/modules/scaleway_container_registry.py new file mode 100644 index 0000000000..179b9b5ff7 --- /dev/null +++ b/plugins/modules/scaleway_container_registry.py @@ -0,0 +1,270 @@ +#!/usr/bin/python +# +# Scaleway Container registry management module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_container_registry +short_description: Scaleway Container registry management module +version_added: 5.8.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module manages container registries on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.scaleway_waitable_resource + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the container registry. + default: present + choices: + - present + - absent + + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example V(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container registry. + required: true + + description: + description: + - Description of the container registry. + type: str + default: '' + + privacy_policy: + type: str + description: + - Default visibility policy. + - Everyone can pull images from a V(public) registry. + choices: + - public + - private + default: private +""" + +EXAMPLES = r""" +- name: Create a container registry + community.general.scaleway_container_registry: + project_id: '{{ scw_project }}' + state: present + region: fr-par + name: my-awesome-container-registry + register: container_registry_creation_task + +- name: Make sure container registry is deleted + community.general.scaleway_container_registry: + project_id: '{{ scw_project }}' + state: absent + region: fr-par + name: my-awesome-container-registry +""" + +RETURN = r""" +container_registry: + description: The container registry information. + returned: when O(state=present) + type: dict + sample: + created_at: "2022-10-14T09:51:07.949716Z" + description: Managed by Ansible + endpoint: rg.fr-par.scw.cloud/my-awesome-registry + id: 0d7d5270-7864-49c2-920b-9fd6731f3589 + image_count: 0 + is_public: false + name: my-awesome-registry + organization_id: 10697b59-5c34-4d24-8d15-9ff2d3b89f58 + project_id: 3da4f0b2-06be-4773-8ec4-5dfa435381be + region: fr-par + size: 0 + status: ready + status_message: "" + updated_at: "2022-10-14T09:51:07.949716Z" +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed +) +from ansible.module_utils.basic import AnsibleModule + +STABLE_STATES = ( + "ready", + "absent" +) + +MUTABLE_ATTRIBUTES = ( + "description", + "is_public" +) + + +def payload_from_wished_cr(wished_cr): + payload = { + "project_id": wished_cr["project_id"], + "name": wished_cr["name"], + "description": wished_cr["description"], + "is_public": wished_cr["privacy_policy"] == "public" + } + + return payload + + +def absent_strategy(api, wished_cr): + changed = False + + cr_list = api.fetch_all_resources("namespaces") + cr_lookup = {cr["name"]: cr for cr in cr_list} + + if wished_cr["name"] not in cr_lookup: + return changed, {} + + target_cr = cr_lookup[wished_cr["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Container registry would be destroyed"} + + api.wait_to_complete_state_transition(resource=target_cr, stable_states=STABLE_STATES, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_cr["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting container registry [{0}: {1}]'.format( + response.status_code, response.json)) + + api.wait_to_complete_state_transition(resource=target_cr, stable_states=STABLE_STATES) + return changed, response.json + + +def present_strategy(api, wished_cr): + changed = False + + cr_list = api.fetch_all_resources("namespaces") + cr_lookup = {cr["name"]: cr for cr in cr_list} + + payload_cr = payload_from_wished_cr(wished_cr) + + if wished_cr["name"] not in cr_lookup: + changed = True + if api.module.check_mode: + return changed, {"status": "A container registry would be created."} + + # Create container registry + api.warn(payload_cr) + creation_response = api.post(path=api.api_path, + data=payload_cr) + + if not creation_response.ok: + msg = "Error during container registry creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_cr = cr_lookup[wished_cr["name"]] + patch_payload = resource_attributes_should_be_changed(target=target_cr, + wished=payload_cr, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES) + + if not patch_payload: + return changed, target_cr + + changed = True + if api.module.check_mode: + return changed, {"status": "Container registry attributes would be changed."} + + cr_patch_response = api.patch(path=api.api_path + "/%s" % target_cr["id"], + data=patch_payload) + + if not cr_patch_response.ok: + api.module.fail_json(msg='Error during container registry attributes update: [{0}: {1}]'.format( + cr_patch_response.status_code, cr_patch_response.json['message'])) + + api.wait_to_complete_state_transition(resource=target_cr, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % target_cr["id"]) + return changed, response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + region = module.params["region"] + wished_container_registry = { + "state": module.params["state"], + "project_id": module.params["project_id"], + "name": module.params["name"], + "description": module.params['description'], + "privacy_policy": module.params['privacy_policy'] + } + + api = Scaleway(module=module) + api.api_path = "registry/v1/regions/%s/namespaces" % region + + changed, summary = state_strategy[wished_container_registry["state"]](api=api, wished_cr=wished_container_registry) + + module.exit_json(changed=changed, container_registry=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(scaleway_waitable_resource_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True), + description=dict(type='str', default=''), + privacy_policy=dict(type='str', default='private', choices=['public', 'private']) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_container_registry_info.py b/plugins/modules/scaleway_container_registry_info.py new file mode 100644 index 0000000000..6dac97234b --- /dev/null +++ b/plugins/modules/scaleway_container_registry_info.py @@ -0,0 +1,143 @@ +#!/usr/bin/python +# +# Scaleway Serverless container registry info module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_container_registry_info +short_description: Scaleway Container registry info module +version_added: 5.8.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module return information about a container registry on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the container registry. + required: true +""" + +EXAMPLES = r""" +- name: Get a container registry info + community.general.scaleway_container_registry_info: + project_id: '{{ scw_project }}' + region: fr-par + name: my-awesome-container-registry + register: container_registry_info_task +""" + +RETURN = r""" +container_registry: + description: The container registry information. + returned: always + type: dict + sample: + created_at: "2022-10-14T09:51:07.949716Z" + description: Managed by Ansible + endpoint: rg.fr-par.scw.cloud/my-awesome-registry + id: 0d7d5270-7864-49c2-920b-9fd6731f3589 + image_count: 0 + is_public: false + name: my-awesome-registry + organization_id: 10697b59-5c34-4d24-8d15-9ff2d3b89f58 + project_id: 3da4f0b2-06be-4773-8ec4-5dfa435381be + region: fr-par + size: 0 + status: ready + status_message: "" + updated_at: "2022-10-14T09:51:07.949716Z" +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, +) +from ansible.module_utils.basic import AnsibleModule + + +def info_strategy(api, wished_cn): + cn_list = api.fetch_all_resources("namespaces") + cn_lookup = {cn["name"]: cn for cn in cn_list} + + if wished_cn["name"] not in cn_lookup: + msg = "Error during container registries lookup: Unable to find container registry named '%s' in project '%s'" % (wished_cn["name"], + wished_cn["project_id"]) + + api.module.fail_json(msg=msg) + + target_cn = cn_lookup[wished_cn["name"]] + + response = api.get(path=api.api_path + "/%s" % target_cn["id"]) + if not response.ok: + msg = "Error during container registry lookup: %s: '%s' (%s)" % (response.info['msg'], + response.json['message'], + response.json) + api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + wished_container_namespace = { + "project_id": module.params["project_id"], + "name": module.params["name"] + } + + api = Scaleway(module=module) + api.api_path = "registry/v1/regions/%s/namespaces" % region + + summary = info_strategy(api=api, wished_cn=wished_container_namespace) + + module.exit_json(changed=False, container_registry=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_database_backup.py b/plugins/modules/scaleway_database_backup.py similarity index 69% rename from plugins/modules/cloud/scaleway/scaleway_database_backup.py rename to plugins/modules/scaleway_database_backup.py index 35f35f820a..33497c41a9 100644 --- a/plugins/modules/cloud/scaleway/scaleway_database_backup.py +++ b/plugins/modules/scaleway_database_backup.py @@ -1,168 +1,180 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Scaleway database backups management module # # Copyright (C) 2020 Guillaume Rodriguez (g.rodriguez@opendecide.com). # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_database_backup short_description: Scaleway database backups management module version_added: 1.2.0 author: Guillaume Rodriguez (@guillaume_ro_fr) description: - - This module manages database backups on Scaleway account U(https://developer.scaleway.com). + - This module manages database backups on Scaleway account U(https://developer.scaleway.com). extends_documentation_fragment: - - community.general.scaleway + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + options: state: description: - - Indicate desired state of the database backup. - - C(present) creates a backup. - - C(absent) deletes the backup. - - C(exported) creates a download link for the backup. - - C(restored) restores the backup to a new database. + - Indicate desired state of the database backup. + - V(present) creates a backup. + - V(absent) deletes the backup. + - V(exported) creates a download link for the backup. + - V(restored) restores the backup to a new database. type: str default: present choices: - - present - - absent - - exported - - restored + - present + - absent + - exported + - restored region: description: - - Scaleway region to use (for example C(fr-par)). + - Scaleway region to use (for example V(fr-par)). type: str required: true choices: - - fr-par - - nl-ams - - pl-waw + - fr-par + - nl-ams + - pl-waw id: description: - - UUID used to identify the database backup. - - Required for C(absent), C(exported) and C(restored) states. + - UUID used to identify the database backup. + - Required for V(absent), V(exported) and V(restored) states. type: str name: description: - - Name used to identify the database backup. - - Required for C(present) state. - - Ignored when C(state=absent), C(state=exported) or C(state=restored). + - Name used to identify the database backup. + - Required for V(present) state. + - Ignored when O(state=absent), O(state=exported) or O(state=restored). type: str required: false database_name: description: - - Name used to identify the database. - - Required for C(present) and C(restored) states. - - Ignored when C(state=absent) or C(state=exported). + - Name used to identify the database. + - Required for V(present) and V(restored) states. + - Ignored when O(state=absent) or O(state=exported). type: str required: false instance_id: description: - - UUID of the instance associated to the database backup. - - Required for C(present) and C(restored) states. - - Ignored when C(state=absent) or C(state=exported). + - UUID of the instance associated to the database backup. + - Required for V(present) and V(restored) states. + - Ignored when O(state=absent) or O(state=exported). type: str required: false expires_at: description: - - Expiration datetime of the database backup (ISO 8601 format). - - Ignored when C(state=absent), C(state=exported) or C(state=restored). + - Expiration datetime of the database backup (ISO 8601 format). + - Ignored when O(state=absent), O(state=exported) or O(state=restored). type: str required: false wait: description: - - Wait for the instance to reach its desired state before returning. + - Wait for the instance to reach its desired state before returning. type: bool default: false wait_timeout: description: - - Time to wait for the backup to reach the expected state. + - Time to wait for the backup to reach the expected state. type: int required: false default: 300 wait_sleep_time: description: - - Time to wait before every attempt to check the state of the backup. + - Time to wait before every attempt to check the state of the backup. type: int required: false default: 3 -''' +""" -EXAMPLES = ''' - - name: Create a backup - community.general.scaleway_database_backup: - name: 'my_backup' - state: present - region: 'fr-par' - database_name: 'my-database' - instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' +EXAMPLES = r""" +- name: Create a backup + community.general.scaleway_database_backup: + name: 'my_backup' + state: present + region: 'fr-par' + database_name: 'my-database' + instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' - - name: Export a backup - community.general.scaleway_database_backup: - id: '6ef1125a-037e-494f-a911-6d9c49a51691' - state: exported - region: 'fr-par' +- name: Export a backup + community.general.scaleway_database_backup: + id: '6ef1125a-037e-494f-a911-6d9c49a51691' + state: exported + region: 'fr-par' - - name: Restore a backup - community.general.scaleway_database_backup: - id: '6ef1125a-037e-494f-a911-6d9c49a51691' - state: restored - region: 'fr-par' - database_name: 'my-new-database' - instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' +- name: Restore a backup + community.general.scaleway_database_backup: + id: '6ef1125a-037e-494f-a911-6d9c49a51691' + state: restored + region: 'fr-par' + database_name: 'my-new-database' + instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb' - - name: Remove a backup - community.general.scaleway_database_backup: - id: '6ef1125a-037e-494f-a911-6d9c49a51691' - state: absent - region: 'fr-par' -''' +- name: Remove a backup + community.general.scaleway_database_backup: + id: '6ef1125a-037e-494f-a911-6d9c49a51691' + state: absent + region: 'fr-par' +""" -RETURN = ''' +RETURN = r""" metadata: - description: Backup metadata. - returned: when C(state=present), C(state=exported) or C(state=restored) - type: dict - sample: { - "metadata": { - "created_at": "2020-08-06T12:42:05.631049Z", - "database_name": "my-database", - "download_url": null, - "download_url_expires_at": null, - "expires_at": null, - "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07", - "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49", - "instance_name": "my-instance", - "name": "backup_name", - "region": "fr-par", - "size": 600000, - "status": "ready", - "updated_at": "2020-08-06T12:42:10.581649Z" - } + description: Backup metadata. + returned: when O(state=present), O(state=exported), or O(state=restored) + type: dict + sample: + { + "metadata": { + "created_at": "2020-08-06T12:42:05.631049Z", + "database_name": "my-database", + "download_url": null, + "download_url_expires_at": null, + "expires_at": null, + "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07", + "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49", + "instance_name": "my-instance", + "name": "backup_name", + "region": "fr-par", + "size": 600000, + "status": "ready", + "updated_at": "2020-08-06T12:42:10.581649Z" + } } -''' +""" import datetime import time from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) from ansible_collections.community.general.plugins.module_utils.scaleway import ( Scaleway, scaleway_argument_spec, @@ -182,9 +194,9 @@ def wait_to_complete_state_transition(module, account_api, backup=None): if backup is None or backup['status'] in stable_states: return backup - start = datetime.datetime.utcnow() + start = now() end = start + datetime.timedelta(seconds=wait_timeout) - while datetime.datetime.utcnow() < end: + while now() < end: module.debug('We are going to wait for the backup to finish its transition') response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id'])) @@ -344,8 +356,8 @@ def main(): region=dict(required=True, choices=SCALEWAY_REGIONS), id=dict(), name=dict(type='str'), - database_name=dict(required=False), - instance_id=dict(required=False), + database_name=dict(), + instance_id=dict(), expires_at=dict(), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300), diff --git a/plugins/modules/scaleway_function.py b/plugins/modules/scaleway_function.py new file mode 100644 index 0000000000..a8d0680c71 --- /dev/null +++ b/plugins/modules/scaleway_function.py @@ -0,0 +1,394 @@ +#!/usr/bin/python +# +# Scaleway Serverless function management module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_function +short_description: Scaleway Function management +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module manages function on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.scaleway_waitable_resource + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +requirements: + - passlib[argon2] >= 1.7.4 + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the function. + default: present + choices: + - present + - absent + + namespace_id: + type: str + description: + - Function namespace identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example V(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the function. + required: true + + description: + description: + - Description of the function. + type: str + default: '' + + min_scale: + description: + - Minimum number of replicas for the function. + type: int + + max_scale: + description: + - Maximum number of replicas for the function. + type: int + + environment_variables: + description: + - Environment variables of the function. + - Injected in function at runtime. + type: dict + default: {} + + secret_environment_variables: + description: + - Secret environment variables of the function. + - Updating those values does not output a C(changed) state in Ansible. + - Injected in function at runtime. + type: dict + default: {} + + runtime: + description: + - Runtime of the function. + - See U(https://www.scaleway.com/en/docs/compute/functions/reference-content/functions-lifecycle/) for all available + runtimes. + type: str + required: true + + memory_limit: + description: + - Resources define performance characteristics of your function. + - They are allocated to your function at runtime. + type: int + + function_timeout: + description: + - The length of time your handler can spend processing a request before being stopped. + type: str + + handler: + description: + - The C(module-name.export) value in your function. + type: str + + privacy: + description: + - Privacy policies define whether a function can be executed anonymously. + - Choose V(public) to enable anonymous execution, or V(private) to protect your function with an authentication mechanism + provided by the Scaleway API. + type: str + default: public + choices: + - public + - private + + redeploy: + description: + - Redeploy the function if update is required. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Create a function + community.general.scaleway_function: + namespace_id: '{{ scw_function_namespace }}' + region: fr-par + state: present + name: my-awesome-function + runtime: python3 + environment_variables: + MY_VAR: my_value + secret_environment_variables: + MY_SECRET_VAR: my_secret_value + register: function_creation_task + +- name: Make sure function is deleted + community.general.scaleway_function: + namespace_id: '{{ scw_function_namespace }}' + region: fr-par + state: absent + name: my-awesome-function +""" + +RETURN = r""" +function: + description: The function information. + returned: when O(state=present) + type: dict + sample: + cpu_limit: 140 + description: Function used for testing scaleway_function ansible module + domain_name: fnansibletestfxamabuc-fn-ansible-test.functions.fnc.fr-par.scw.cloud + environment_variables: + MY_VAR: my_value + error_message: null + handler: handler.handle + http_option: "" + id: ceb64dc4-4464-4196-8e20-ecef705475d3 + max_scale: 5 + memory_limit: 256 + min_scale: 0 + name: fn-ansible-test + namespace_id: 82737d8d-0ebb-4d89-b0ad-625876eca50d + privacy: public + region: fr-par + runtime: python310 + runtime_message: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: created + timeout: 300s +""" + +from copy import deepcopy + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed, + SecretVariables +) +from ansible.module_utils.basic import AnsibleModule + +STABLE_STATES = ( + "ready", + "created", + "absent" +) + +VERIFIABLE_MUTABLE_ATTRIBUTES = ( + "description", + "min_scale", + "max_scale", + "environment_variables", + "runtime", + "memory_limit", + "timeout", + "handler", + "privacy", + "secret_environment_variables" +) + +MUTABLE_ATTRIBUTES = VERIFIABLE_MUTABLE_ATTRIBUTES + ( + "redeploy", +) + + +def payload_from_wished_fn(wished_fn): + payload = { + "namespace_id": wished_fn["namespace_id"], + "name": wished_fn["name"], + "description": wished_fn["description"], + "min_scale": wished_fn["min_scale"], + "max_scale": wished_fn["max_scale"], + "runtime": wished_fn["runtime"], + "memory_limit": wished_fn["memory_limit"], + "timeout": wished_fn["timeout"], + "handler": wished_fn["handler"], + "privacy": wished_fn["privacy"], + "redeploy": wished_fn["redeploy"], + "environment_variables": wished_fn["environment_variables"], + "secret_environment_variables": SecretVariables.dict_to_list(wished_fn["secret_environment_variables"]) + } + + return payload + + +def absent_strategy(api, wished_fn): + changed = False + + fn_list = api.fetch_all_resources("functions") + fn_lookup = {fn["name"]: fn for fn in fn_list} + + if wished_fn["name"] not in fn_lookup: + return changed, {} + + target_fn = fn_lookup[wished_fn["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Function would be destroyed"} + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_fn["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting function [{0}: {1}]'.format( + response.status_code, response.json)) + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) + return changed, response.json + + +def present_strategy(api, wished_fn): + changed = False + + fn_list = api.fetch_all_resources("functions") + fn_lookup = {fn["name"]: fn for fn in fn_list} + + payload_fn = payload_from_wished_fn(wished_fn) + + if wished_fn["name"] not in fn_lookup: + changed = True + if api.module.check_mode: + return changed, {"status": "A function would be created."} + + # Creation doesn't support `redeploy` parameter + del payload_fn["redeploy"] + + # Create function + api.warn(payload_fn) + creation_response = api.post(path=api.api_path, + data=payload_fn) + + if not creation_response.ok: + msg = "Error during function creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_fn = fn_lookup[wished_fn["name"]] + decoded_target_fn = deepcopy(target_fn) + decoded_target_fn["secret_environment_variables"] = SecretVariables.decode(decoded_target_fn["secret_environment_variables"], + payload_fn["secret_environment_variables"]) + + patch_payload = resource_attributes_should_be_changed(target=decoded_target_fn, + wished=payload_fn, + verifiable_mutable_attributes=VERIFIABLE_MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES) + + if not patch_payload: + return changed, target_fn + + changed = True + if api.module.check_mode: + return changed, {"status": "Function attributes would be changed."} + + fn_patch_response = api.patch(path=api.api_path + "/%s" % target_fn["id"], + data=patch_payload) + + if not fn_patch_response.ok: + api.module.fail_json(msg='Error during function attributes update: [{0}: {1}]'.format( + fn_patch_response.status_code, fn_patch_response.json['message'])) + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % target_fn["id"]) + return changed, response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + SecretVariables.ensure_scaleway_secret_package(module) + + region = module.params["region"] + wished_function = { + "state": module.params["state"], + "namespace_id": module.params["namespace_id"], + "name": module.params["name"], + "description": module.params['description'], + "min_scale": module.params['min_scale'], + "max_scale": module.params['max_scale'], + "runtime": module.params["runtime"], + "memory_limit": module.params["memory_limit"], + "timeout": module.params["function_timeout"], + "handler": module.params["handler"], + "privacy": module.params["privacy"], + "redeploy": module.params["redeploy"], + "environment_variables": module.params['environment_variables'], + "secret_environment_variables": module.params['secret_environment_variables'] + } + + api = Scaleway(module=module) + api.api_path = "functions/v1beta1/regions/%s/functions" % region + + changed, summary = state_strategy[wished_function["state"]](api=api, wished_fn=wished_function) + + module.exit_json(changed=changed, function=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(scaleway_waitable_resource_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + namespace_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True), + description=dict(type='str', default=''), + min_scale=dict(type='int'), + max_scale=dict(type='int'), + runtime=dict(type='str', required=True), + memory_limit=dict(type='int'), + function_timeout=dict(type='str'), + handler=dict(type='str'), + privacy=dict(type='str', default='public', choices=['public', 'private']), + redeploy=dict(type='bool', default=False), + environment_variables=dict(type='dict', default={}), + secret_environment_variables=dict(type='dict', default={}, no_log=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_function_info.py b/plugins/modules/scaleway_function_info.py new file mode 100644 index 0000000000..d41e45fba2 --- /dev/null +++ b/plugins/modules/scaleway_function_info.py @@ -0,0 +1,152 @@ +#!/usr/bin/python +# +# Scaleway Serverless function info module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_function_info +short_description: Retrieve information on Scaleway Function +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module return information about a function on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + namespace_id: + type: str + description: + - Container namespace identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the function. + required: true +""" + +EXAMPLES = r""" +- name: Get a function info + community.general.scaleway_function_info: + namespace_id: '{{ scw_function_namespace }}' + region: fr-par + name: my-awesome-function + register: function_info_task +""" + +RETURN = r""" +function: + description: The function information. + returned: always + type: dict + sample: + cpu_limit: 140 + description: Function used for testing scaleway_function ansible module + domain_name: fnansibletestfxamabuc-fn-ansible-test.functions.fnc.fr-par.scw.cloud + environment_variables: + MY_VAR: my_value + error_message: + handler: handler.handle + http_option: "" + id: ceb64dc4-4464-4196-8e20-ecef705475d3 + max_scale: 5 + memory_limit: 256 + min_scale: 0 + name: fn-ansible-test + namespace_id: 82737d8d-0ebb-4d89-b0ad-625876eca50d + privacy: public + region: fr-par + runtime: python310 + runtime_message: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: created + timeout: 300s +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway +) +from ansible.module_utils.basic import AnsibleModule + + +def info_strategy(api, wished_fn): + fn_list = api.fetch_all_resources("functions") + fn_lookup = {fn["name"]: fn for fn in fn_list} + + if wished_fn["name"] not in fn_lookup: + msg = "Error during function lookup: Unable to find function named '%s' in namespace '%s'" % (wished_fn["name"], + wished_fn["namespace_id"]) + + api.module.fail_json(msg=msg) + + target_fn = fn_lookup[wished_fn["name"]] + + response = api.get(path=api.api_path + "/%s" % target_fn["id"]) + if not response.ok: + msg = "Error during function lookup: %s: '%s' (%s)" % (response.info['msg'], + response.json['message'], + response.json) + api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + wished_function = { + "namespace_id": module.params["namespace_id"], + "name": module.params["name"] + } + + api = Scaleway(module=module) + api.api_path = "functions/v1beta1/regions/%s/functions" % region + + summary = info_strategy(api=api, wished_fn=wished_function) + + module.exit_json(changed=False, function=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + namespace_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_function_namespace.py b/plugins/modules/scaleway_function_namespace.py new file mode 100644 index 0000000000..b5600831b5 --- /dev/null +++ b/plugins/modules/scaleway_function_namespace.py @@ -0,0 +1,296 @@ +#!/usr/bin/python +# +# Scaleway Serverless function namespace management module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_function_namespace +short_description: Scaleway Function namespace management +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module manages function namespaces on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.scaleway_waitable_resource + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +requirements: + - passlib[argon2] >= 1.7.4 + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 + +options: + state: + type: str + description: + - Indicate desired state of the function namespace. + default: present + choices: + - present + - absent + + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example V(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the function namespace. + required: true + + description: + description: + - Description of the function namespace. + type: str + default: '' + + environment_variables: + description: + - Environment variables of the function namespace. + - Injected in functions at runtime. + type: dict + default: {} + + secret_environment_variables: + description: + - Secret environment variables of the function namespace. + - Updating those values does not output a C(changed) state in Ansible. + - Injected in functions at runtime. + type: dict + default: {} +""" + +EXAMPLES = r""" +- name: Create a function namespace + community.general.scaleway_function_namespace: + project_id: '{{ scw_project }}' + state: present + region: fr-par + name: my-awesome-function-namespace + environment_variables: + MY_VAR: my_value + secret_environment_variables: + MY_SECRET_VAR: my_secret_value + register: function_namespace_creation_task + +- name: Make sure function namespace is deleted + community.general.scaleway_function_namespace: + project_id: '{{ scw_project }}' + state: absent + region: fr-par + name: my-awesome-function-namespace +""" + +RETURN = r""" +function_namespace: + description: The function namespace information. + returned: when O(state=present) + type: dict + sample: + description: "" + environment_variables: + MY_VAR: my_value + error_message: + id: 531a1fd7-98d2-4a74-ad77-d398324304b8 + name: my-awesome-function-namespace + organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 + project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98 + region: fr-par + registry_endpoint: "" + registry_namespace_id: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: pending +""" + +from copy import deepcopy + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, + scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed, + SecretVariables +) +from ansible.module_utils.basic import AnsibleModule + + +STABLE_STATES = ( + "ready", + "absent" +) + +MUTABLE_ATTRIBUTES = ( + "description", + "environment_variables", + "secret_environment_variables", +) + + +def payload_from_wished_fn(wished_fn): + payload = { + "project_id": wished_fn["project_id"], + "name": wished_fn["name"], + "description": wished_fn["description"], + "environment_variables": wished_fn["environment_variables"], + "secret_environment_variables": SecretVariables.dict_to_list(wished_fn["secret_environment_variables"]) + } + + return payload + + +def absent_strategy(api, wished_fn): + changed = False + + fn_list = api.fetch_all_resources("namespaces") + fn_lookup = {fn["name"]: fn for fn in fn_list} + + if wished_fn["name"] not in fn_lookup: + return changed, {} + + target_fn = fn_lookup[wished_fn["name"]] + changed = True + if api.module.check_mode: + return changed, {"status": "Function namespace would be destroyed"} + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES, force_wait=True) + response = api.delete(path=api.api_path + "/%s" % target_fn["id"]) + if not response.ok: + api.module.fail_json(msg='Error deleting function namespace [{0}: {1}]'.format( + response.status_code, response.json)) + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) + return changed, response.json + + +def present_strategy(api, wished_fn): + changed = False + + fn_list = api.fetch_all_resources("namespaces") + fn_lookup = {fn["name"]: fn for fn in fn_list} + + payload_fn = payload_from_wished_fn(wished_fn) + + if wished_fn["name"] not in fn_lookup: + changed = True + if api.module.check_mode: + return changed, {"status": "A function namespace would be created."} + + # Create function namespace + api.warn(payload_fn) + creation_response = api.post(path=api.api_path, + data=payload_fn) + + if not creation_response.ok: + msg = "Error during function namespace creation: %s: '%s' (%s)" % (creation_response.info['msg'], + creation_response.json['message'], + creation_response.json) + api.module.fail_json(msg=msg) + + api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % creation_response.json["id"]) + return changed, response.json + + target_fn = fn_lookup[wished_fn["name"]] + decoded_target_fn = deepcopy(target_fn) + decoded_target_fn["secret_environment_variables"] = SecretVariables.decode(decoded_target_fn["secret_environment_variables"], + payload_fn["secret_environment_variables"]) + + patch_payload = resource_attributes_should_be_changed(target=decoded_target_fn, + wished=payload_fn, + verifiable_mutable_attributes=MUTABLE_ATTRIBUTES, + mutable_attributes=MUTABLE_ATTRIBUTES) + + if not patch_payload: + return changed, target_fn + + changed = True + if api.module.check_mode: + return changed, {"status": "Function namespace attributes would be changed."} + + fn_patch_response = api.patch(path=api.api_path + "/%s" % target_fn["id"], + data=patch_payload) + + if not fn_patch_response.ok: + api.module.fail_json(msg='Error during function namespace attributes update: [{0}: {1}]'.format( + fn_patch_response.status_code, fn_patch_response.json['message'])) + + api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES) + response = api.get(path=api.api_path + "/%s" % target_fn["id"]) + return changed, response.json + + +state_strategy = { + "present": present_strategy, + "absent": absent_strategy +} + + +def core(module): + SecretVariables.ensure_scaleway_secret_package(module) + + region = module.params["region"] + wished_function_namespace = { + "state": module.params["state"], + "project_id": module.params["project_id"], + "name": module.params["name"], + "description": module.params['description'], + "environment_variables": module.params['environment_variables'], + "secret_environment_variables": module.params['secret_environment_variables'] + } + + api = Scaleway(module=module) + api.api_path = "functions/v1beta1/regions/%s/namespaces" % region + + changed, summary = state_strategy[wished_function_namespace["state"]](api=api, wished_fn=wished_function_namespace) + + module.exit_json(changed=changed, function_namespace=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(scaleway_waitable_resource_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True), + description=dict(type='str', default=''), + environment_variables=dict(type='dict', default={}), + secret_environment_variables=dict(type='dict', default={}, no_log=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_function_namespace_info.py b/plugins/modules/scaleway_function_namespace_info.py new file mode 100644 index 0000000000..89c0fdfa61 --- /dev/null +++ b/plugins/modules/scaleway_function_namespace_info.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# +# Scaleway Serverless function namespace info module +# +# Copyright (c) 2022, Guillaume MARTINEZ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: scaleway_function_namespace_info +short_description: Retrieve information on Scaleway Function namespace +version_added: 6.0.0 +author: Guillaume MARTINEZ (@Lunik) +description: + - This module return information about a function namespace on Scaleway account. +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + project_id: + type: str + description: + - Project identifier. + required: true + + region: + type: str + description: + - Scaleway region to use (for example C(fr-par)). + required: true + choices: + - fr-par + - nl-ams + - pl-waw + + name: + type: str + description: + - Name of the function namespace. + required: true +""" + +EXAMPLES = r""" +- name: Get a function namespace info + community.general.scaleway_function_namespace_info: + project_id: '{{ scw_project }}' + region: fr-par + name: my-awesome-function-namespace + register: function_namespace_info_task +""" + +RETURN = r""" +function_namespace: + description: The function namespace information. + returned: always + type: dict + sample: + description: "" + environment_variables: + MY_VAR: my_value + error_message: + id: 531a1fd7-98d2-4a74-ad77-d398324304b8 + name: my-awesome-function-namespace + organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0 + project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98 + region: fr-par + registry_endpoint: "" + registry_namespace_id: "" + secret_environment_variables: + - key: MY_SECRET_VAR + value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg + status: pending +""" + +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway, +) +from ansible.module_utils.basic import AnsibleModule + + +def info_strategy(api, wished_fn): + fn_list = api.fetch_all_resources("namespaces") + fn_lookup = {fn["name"]: fn for fn in fn_list} + + if wished_fn["name"] not in fn_lookup: + msg = "Error during function namespace lookup: Unable to find function namespace named '%s' in project '%s'" % (wished_fn["name"], + wished_fn["project_id"]) + + api.module.fail_json(msg=msg) + + target_fn = fn_lookup[wished_fn["name"]] + + response = api.get(path=api.api_path + "/%s" % target_fn["id"]) + if not response.ok: + msg = "Error during function namespace lookup: %s: '%s' (%s)" % (response.info['msg'], + response.json['message'], + response.json) + api.module.fail_json(msg=msg) + + return response.json + + +def core(module): + region = module.params["region"] + wished_function_namespace = { + "project_id": module.params["project_id"], + "name": module.params["name"] + } + + api = Scaleway(module=module) + api.api_path = "functions/v1beta1/regions/%s/namespaces" % region + + summary = info_strategy(api=api, wished_fn=wished_function_namespace) + + module.exit_json(changed=False, function_namespace=summary) + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + project_id=dict(type='str', required=True), + region=dict(type='str', required=True, choices=SCALEWAY_REGIONS), + name=dict(type='str', required=True) + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + core(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/scaleway_image_info.py b/plugins/modules/scaleway_image_info.py new file mode 100644 index 0000000000..9cffb1aca0 --- /dev/null +++ b/plugins/modules/scaleway_image_info.py @@ -0,0 +1,136 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: scaleway_image_info +short_description: Gather information about the Scaleway images available +description: + - Gather information about the Scaleway images available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + region: + type: str + description: + - Scaleway compute zone. + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 +""" + +EXAMPLES = r""" +- name: Gather Scaleway images information + community.general.scaleway_image_info: + region: par1 + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_image_info }}" +""" + +RETURN = r""" +scaleway_image_info: + description: + - Response from Scaleway API. + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' + returned: success + type: list + elements: dict + sample: + [ + { + "arch": "x86_64", + "creation_date": "2018-07-17T16:18:49.276456+00:00", + "default_bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": false, + "dtb": "", + "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.9.93 rev1" + }, + "extra_volumes": [], + "from_server": null, + "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0", + "modification_date": "2018-07-17T16:42:06.319315+00:00", + "name": "Debian Stretch", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "public": true, + "root_volume": { + "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd", + "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18", + "size": 25000000000, + "volume_type": "l_ssd" + }, + "state": "available" + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION) + + +class ScalewayImageInfo(Scaleway): + + def __init__(self, module): + super(ScalewayImageInfo, self).__init__(module) + self.name = 'images' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_image_info=ScalewayImageInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_ip.py b/plugins/modules/scaleway_ip.py similarity index 78% rename from plugins/modules/cloud/scaleway/scaleway_ip.py rename to plugins/modules/scaleway_ip.py index 7901aaade4..0edf8f3d31 100644 --- a/plugins/modules/cloud/scaleway/scaleway_ip.py +++ b/plugins/modules/scaleway_ip.py @@ -1,31 +1,38 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Scaleway IP management module # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_ip short_description: Scaleway IP management module author: Remy Leone (@remyleone) description: - - This module manages IP on Scaleway account - U(https://developer.scaleway.com) + - This module manages IP on Scaleway account U(https://developer.scaleway.com). extends_documentation_fragment: -- community.general.scaleway + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 options: state: type: str description: - - Indicate desired state of the IP. + - Indicate desired state of the IP. default: present choices: - present @@ -34,42 +41,45 @@ options: organization: type: str description: - - Scaleway organization identifier + - Scaleway organization identifier. required: true region: type: str description: - - Scaleway region to use (for example par1). + - Scaleway region to use (for example par1). required: true choices: - ams1 - EMEA-NL-EVS + - ams2 + - ams3 - par1 - EMEA-FR-PAR1 - par2 - EMEA-FR-PAR2 + - par3 - waw1 - EMEA-PL-WAW1 + - waw2 + - waw3 id: type: str description: - - id of the Scaleway IP (UUID) - + - ID of the Scaleway IP (UUID). server: type: str description: - - id of the server you want to attach an IP to. - - To unattach an IP don't specify this option - + - ID of the server you want to attach an IP to. + - To unattach an IP do not specify this option. reverse: type: str description: - - Reverse to assign to the IP -''' + - Reverse to assign to the IP. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create an IP community.general.scaleway_ip: organization: '{{ scw_org }}' @@ -82,28 +92,29 @@ EXAMPLES = ''' id: '{{ ip_creation_task.scaleway_ip.id }}' state: absent region: par1 -''' +""" -RETURN = ''' +RETURN = r""" data: - description: This is only present when C(state=present) - returned: when C(state=present) - type: dict - sample: { + description: This is only present when O(state=present). + returned: when O(state=present) + type: dict + sample: + { "ips": [ { - "organization": "951df375-e094-4d26-97c1-ba548eeb9c42", - "reverse": null, - "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477", - "server": { - "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1", - "name": "ansible_tuto-1" - }, - "address": "212.47.232.136" + "organization": "951df375-e094-4d26-97c1-ba548eeb9c42", + "reverse": null, + "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477", + "server": { + "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1", + "name": "ansible_tuto-1" + }, + "address": "212.47.232.136" } - ] + ] } -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway from ansible.module_utils.basic import AnsibleModule @@ -137,11 +148,11 @@ def ip_attributes_should_be_changed(api, target_ip, wished_ip): def payload_from_wished_ip(wished_ip): - return dict( - (k, v) + return { + k: v for k, v in wished_ip.items() if k != 'id' and v is not None - ) + } def present_strategy(api, wished_ip): @@ -153,8 +164,7 @@ def present_strategy(api, wished_ip): response.status_code, response.json['message'])) ips_list = response.json["ips"] - ip_lookup = dict((ip["id"], ip) - for ip in ips_list) + ip_lookup = {ip["id"]: ip for ip in ips_list} if wished_ip["id"] not in ip_lookup.keys(): changed = True @@ -204,8 +214,7 @@ def absent_strategy(api, wished_ip): api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format( status_code, response.json['message'])) - ip_lookup = dict((ip["id"], ip) - for ip in ips_list) + ip_lookup = {ip["id"]: ip for ip in ips_list} if wished_ip["id"] not in ip_lookup.keys(): return changed, {} diff --git a/plugins/modules/cloud/scaleway/scaleway_ip_info.py b/plugins/modules/scaleway_ip_info.py similarity index 59% rename from plugins/modules/cloud/scaleway/scaleway_ip_info.py rename to plugins/modules/scaleway_ip_info.py index 189ee1cf05..36196583cf 100644 --- a/plugins/modules/cloud/scaleway/scaleway_ip_info.py +++ b/plugins/modules/scaleway_ip_info.py @@ -1,23 +1,28 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_ip_info -short_description: Gather information about the Scaleway ips available. +short_description: Gather information about the Scaleway IPs available description: - - Gather information about the Scaleway ips available. + - Gather information about the Scaleway IPs available. author: - "Yanis Guenane (@Spredzy)" - "Remy Leone (@remyleone)" extends_documentation_fragment: -- community.general.scaleway + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 options: region: @@ -28,47 +33,51 @@ options: choices: - ams1 - EMEA-NL-EVS + - ams2 + - ams3 - par1 - EMEA-FR-PAR1 - par2 - EMEA-FR-PAR2 + - par3 - waw1 - EMEA-PL-WAW1 -''' + - waw2 + - waw3 +""" -EXAMPLES = r''' -- name: Gather Scaleway ips information +EXAMPLES = r""" +- name: Gather Scaleway IPs information community.general.scaleway_ip_info: region: par1 register: result - ansible.builtin.debug: msg: "{{ result.scaleway_ip_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_ip_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - For more details please refer to U(https://developers.scaleway.com/en/products/instance/api/). returned: success type: list elements: dict sample: - "scaleway_ip_info": [ - { - "address": "163.172.170.243", - "id": "ea081794-a581-8899-8451-386ddaf0a451", - "organization": "3f709602-5e6c-4619-b80c-e324324324af", - "reverse": null, - "server": { - "id": "12f19bc7-109c-4517-954c-e6b3d0311363", - "name": "scw-e0d158" - } + [ + { + "address": "163.172.170.243", + "id": "ea081794-a581-8899-8451-386ddaf0a451", + "organization": "3f709602-5e6c-4619-b80c-e324324324af", + "reverse": null, + "server": { + "id": "12f19bc7-109c-4517-954c-e6b3d0311363", + "name": "scw-e0d158" } + } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/cloud/scaleway/scaleway_lb.py b/plugins/modules/scaleway_lb.py similarity index 79% rename from plugins/modules/cloud/scaleway/scaleway_lb.py rename to plugins/modules/scaleway_lb.py index 2112ae4411..6e0aaa91f1 100644 --- a/plugins/modules/cloud/scaleway/scaleway_lb.py +++ b/plugins/modules/scaleway_lb.py @@ -1,52 +1,59 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Scaleway Load-balancer management module # # Copyright (C) 2018 Online SAS. # https://www.scaleway.com # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_lb short_description: Scaleway load-balancer management module author: Remy Leone (@remyleone) description: - - "This module manages load-balancers on Scaleway." + - This module manages load-balancers on Scaleway. extends_documentation_fragment: -- community.general.scaleway + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 options: name: type: str description: - - Name of the load-balancer + - Name of the load-balancer. required: true description: type: str description: - - Description of the load-balancer + - Description of the load-balancer. required: true organization_id: type: str description: - - Organization identifier + - Organization identifier. required: true state: type: str description: - - Indicate desired state of the instance. + - Indicate desired state of the instance. default: present choices: - present @@ -55,7 +62,7 @@ options: region: type: str description: - - Scaleway zone + - Scaleway zone. required: true choices: - nl-ams @@ -65,31 +72,31 @@ options: tags: type: list elements: str + default: [] description: - - List of tags to apply to the load-balancer - + - List of tags to apply to the load-balancer. wait: description: - - Wait for the load-balancer to reach its desired state before returning. + - Wait for the load-balancer to reach its desired state before returning. type: bool - default: 'no' + default: false wait_timeout: type: int description: - - Time to wait for the load-balancer to reach the expected state + - Time to wait for the load-balancer to reach the expected state. required: false default: 300 wait_sleep_time: type: int description: - - Time to wait before every attempt to check the state of the load-balancer + - Time to wait before every attempt to check the state of the load-balancer. required: false default: 3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a load-balancer community.general.scaleway_lb: name: foobar @@ -105,38 +112,42 @@ EXAMPLES = ''' state: absent organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42 region: fr-par -''' +""" -RETURNS = ''' -{ - "scaleway_lb": { +RETURN = r""" +scaleway_lb: + description: The load-balancer object. + returned: success + type: dict + sample: + { "backend_count": 0, "frontend_count": 0, "description": "Description of my load-balancer", "id": "00000000-0000-0000-0000-000000000000", "instances": [ - { - "id": "00000000-0000-0000-0000-000000000000", - "ip_address": "10.0.0.1", - "region": "fr-par", - "status": "ready" - }, - { - "id": "00000000-0000-0000-0000-000000000000", - "ip_address": "10.0.0.2", - "region": "fr-par", - "status": "ready" - } + { + "id": "00000000-0000-0000-0000-000000000000", + "ip_address": "10.0.0.1", + "region": "fr-par", + "status": "ready" + }, + { + "id": "00000000-0000-0000-0000-000000000000", + "ip_address": "10.0.0.2", + "region": "fr-par", + "status": "ready" + } ], "ip": [ - { - "id": "00000000-0000-0000-0000-000000000000", - "ip_address": "192.168.0.1", - "lb_id": "00000000-0000-0000-0000-000000000000", - "region": "fr-par", - "organization_id": "00000000-0000-0000-0000-000000000000", - "reverse": "" - } + { + "id": "00000000-0000-0000-0000-000000000000", + "ip_address": "192.168.0.1", + "lb_id": "00000000-0000-0000-0000-000000000000", + "region": "fr-par", + "organization_id": "00000000-0000-0000-0000-000000000000", + "reverse": "" + } ], "name": "lb_ansible_test", "organization_id": "00000000-0000-0000-0000-000000000000", @@ -146,13 +157,13 @@ RETURNS = ''' "first_tag", "second_tag" ] - } -} -''' + } +""" import datetime import time from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.datetime import now from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_REGIONS, SCALEWAY_ENDPOINT, scaleway_argument_spec, Scaleway STABLE_STATES = ( @@ -200,9 +211,9 @@ def wait_to_complete_state_transition(api, lb, force_wait=False): wait_timeout = api.module.params["wait_timeout"] wait_sleep_time = api.module.params["wait_sleep_time"] - start = datetime.datetime.utcnow() + start = now() end = start + datetime.timedelta(seconds=wait_timeout) - while datetime.datetime.utcnow() < end: + while now() < end: api.module.debug("We are going to wait for the load-balancer to finish its transition") state = fetch_state(api, lb) if state in STABLE_STATES: @@ -215,12 +226,12 @@ def wait_to_complete_state_transition(api, lb, force_wait=False): def lb_attributes_should_be_changed(target_lb, wished_lb): - diff = dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr]) + diff = {attr: wished_lb[attr] for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr]} if diff: - return dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES) - else: - return diff + return {attr: wished_lb[attr] for attr in MUTABLE_ATTRIBUTES} + + return {} def present_strategy(api, wished_lb): @@ -232,8 +243,7 @@ def present_strategy(api, wished_lb): response.status_code, response.json['message'])) lbs_list = response.json["lbs"] - lb_lookup = dict((lb["name"], lb) - for lb in lbs_list) + lb_lookup = {lb["name"]: lb for lb in lbs_list} if wished_lb["name"] not in lb_lookup.keys(): changed = True @@ -289,8 +299,7 @@ def absent_strategy(api, wished_lb): api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format( status_code, response.json['message'])) - lb_lookup = dict((lb["name"], lb) - for lb in lbs_list) + lb_lookup = {lb["name"]: lb for lb in lbs_list} if wished_lb["name"] not in lb_lookup.keys(): return changed, {} diff --git a/plugins/modules/cloud/scaleway/scaleway_organization_info.py b/plugins/modules/scaleway_organization_info.py similarity index 54% rename from plugins/modules/cloud/scaleway/scaleway_organization_info.py rename to plugins/modules/scaleway_organization_info.py index a09d1bb5d8..873d15b794 100644 --- a/plugins/modules/cloud/scaleway/scaleway_organization_info.py +++ b/plugins/modules/scaleway_organization_info.py @@ -1,72 +1,77 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_organization_info -short_description: Gather information about the Scaleway organizations available. +short_description: Gather information about the Scaleway organizations available description: - Gather information about the Scaleway organizations available. author: - "Yanis Guenane (@Spredzy)" - "Remy Leone (@remyleone)" + +attributes: + action_group: + version_added: 11.3.0 + options: api_url: description: - - Scaleway API URL + - Scaleway API URL. default: 'https://account.scaleway.com' aliases: ['base_url'] extends_documentation_fragment: -- community.general.scaleway + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway organizations information community.general.scaleway_organization_info: register: result - ansible.builtin.debug: msg: "{{ result.scaleway_organization_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_organization_info: - description: Response from Scaleway API + description: Response from Scaleway API. returned: success - type: complex + type: list + elements: dict sample: - "scaleway_organization_info": [ - { - "address_city_name": "Paris", - "address_country_code": "FR", - "address_line1": "42 Rue de l'univers", - "address_line2": null, - "address_postal_code": "75042", - "address_subdivision_code": "FR-75", - "creation_date": "2018-08-06T13:43:28.508575+00:00", - "currency": "EUR", - "customer_class": "individual", - "id": "3f709602-5e6c-4619-b80c-e8432ferewtr", - "locale": "fr_FR", - "modification_date": "2018-08-06T14:56:41.401685+00:00", - "name": "James Bond", - "support_id": "694324", - "support_level": "basic", - "support_pin": "9324", - "users": [], - "vat_number": null, - "warnings": [] - } + [ + { + "address_city_name": "Paris", + "address_country_code": "FR", + "address_line1": "42 Rue de l'univers", + "address_line2": null, + "address_postal_code": "75042", + "address_subdivision_code": "FR-75", + "creation_date": "2018-08-06T13:43:28.508575+00:00", + "currency": "EUR", + "customer_class": "individual", + "id": "3f709602-5e6c-4619-b80c-e8432ferewtr", + "locale": "fr_FR", + "modification_date": "2018-08-06T14:56:41.401685+00:00", + "name": "James Bond", + "support_id": "694324", + "support_level": "basic", + "support_pin": "9324", + "users": [], + "vat_number": null, + "warnings": [] + } ] -''' +""" from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/cloud/scaleway/scaleway_private_network.py b/plugins/modules/scaleway_private_network.py similarity index 79% rename from plugins/modules/cloud/scaleway/scaleway_private_network.py rename to plugins/modules/scaleway_private_network.py index 996a3cce27..040477a246 100644 --- a/plugins/modules/cloud/scaleway/scaleway_private_network.py +++ b/plugins/modules/scaleway_private_network.py @@ -1,32 +1,39 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Scaleway VPC management module # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_private_network short_description: Scaleway private network management version_added: 4.5.0 author: Pascal MANGIN (@pastral) description: - - This module manages private network on Scaleway account - (U(https://developer.scaleway.com)). + - This module manages private network on Scaleway account (U(https://developer.scaleway.com)). extends_documentation_fragment: -- community.general.scaleway + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 options: state: type: str description: - - Indicate desired state of the VPC. + - Indicate desired state of the VPC. default: present choices: - present @@ -41,33 +48,36 @@ options: region: type: str description: - - Scaleway region to use (for example C(par1)). + - Scaleway region to use (for example V(par1)). required: true choices: - ams1 - EMEA-NL-EVS + - ams2 + - ams3 - par1 - EMEA-FR-PAR1 - par2 - EMEA-FR-PAR2 + - par3 - waw1 - EMEA-PL-WAW1 + - waw2 + - waw3 name: type: str description: - - Name of the VPC. - + - Name of the VPC. tags: type: list elements: str description: - - List of tags to apply to the instance. + - List of tags to apply to the instance. default: [] +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create an private network community.general.scaleway_vpc: project: '{{ scw_project }}' @@ -81,31 +91,31 @@ EXAMPLES = ''' name: 'foo' state: absent region: par1 -''' +""" -RETURN = ''' +RETURN = r""" scaleway_private_network: - description: Information on the VPC. - returned: success when C(state=present) - type: dict - sample: - { - "created_at": "2022-01-15T11:11:12.676445Z", - "id": "12345678-f1e6-40ec-83e5-12345d67ed89", - "name": "network", - "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", - "tags": [ - "tag1", - "tag2", - "tag3", - "tag4", - "tag5" - ], - "updated_at": "2022-01-15T11:12:04.624837Z", - "zone": "fr-par-2" - } -''' + description: Information on the VPC. + returned: success when O(state=present) + type: dict + sample: + { + "created_at": "2022-01-15T11:11:12.676445Z", + "id": "12345678-f1e6-40ec-83e5-12345d67ed89", + "name": "network", + "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90", + "tags": [ + "tag1", + "tag2", + "tag3", + "tag4", + "tag5" + ], + "updated_at": "2022-01-15T11:12:04.624837Z", + "zone": "fr-par-2" + } +""" from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group.py b/plugins/modules/scaleway_security_group.py similarity index 78% rename from plugins/modules/cloud/scaleway/scaleway_security_group.py rename to plugins/modules/scaleway_security_group.py index f9faee6104..ea25234588 100644 --- a/plugins/modules/cloud/scaleway/scaleway_security_group.py +++ b/plugins/modules/scaleway_security_group.py @@ -1,34 +1,40 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Scaleway Security Group management module # # Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com). # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_security_group short_description: Scaleway Security Group management module author: Antoine Barbare (@abarbare) description: - - This module manages Security Group on Scaleway account - U(https://developer.scaleway.com). + - This module manages Security Group on Scaleway account U(https://developer.scaleway.com). extends_documentation_fragment: -- community.general.scaleway + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 options: state: description: - Indicate desired state of the Security Group. type: str - choices: [ absent, present ] + choices: [absent, present] default: present organization: @@ -39,18 +45,23 @@ options: region: description: - - Scaleway region to use (for example C(par1)). + - Scaleway region to use (for example V(par1)). type: str required: true choices: - ams1 - EMEA-NL-EVS + - ams2 + - ams3 - par1 - EMEA-FR-PAR1 - par2 - EMEA-FR-PAR2 + - par3 - waw1 - EMEA-PL-WAW1 + - waw2 + - waw3 name: description: @@ -73,21 +84,21 @@ options: description: - Default policy for incoming traffic. type: str - choices: [ accept, drop ] + choices: [accept, drop] outbound_default_policy: description: - Default policy for outcoming traffic. type: str - choices: [ accept, drop ] + choices: [accept, drop] organization_default: description: - Create security group to be the default one. type: bool -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a Security Group community.general.scaleway_security_group: state: present @@ -100,28 +111,29 @@ EXAMPLES = ''' outbound_default_policy: accept organization_default: false register: security_group_creation_task -''' +""" -RETURN = ''' +RETURN = r""" data: - description: This is only present when C(state=present) - returned: when C(state=present) - type: dict - sample: { - "scaleway_security_group": { - "description": "my security group description", - "enable_default_security": true, - "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae", - "inbound_default_policy": "accept", - "name": "security_group", - "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9", - "organization_default": false, - "outbound_default_policy": "accept", - "servers": [], - "stateful": false - } + description: This is only present when O(state=present). + returned: when O(state=present) + type: dict + sample: + { + "scaleway_security_group": { + "description": "my security group description", + "enable_default_security": true, + "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae", + "inbound_default_policy": "accept", + "name": "security_group", + "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9", + "organization_default": false, + "outbound_default_policy": "accept", + "servers": [], + "stateful": false + } } -''' +""" from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway from ansible.module_utils.basic import AnsibleModule @@ -129,11 +141,11 @@ from uuid import uuid4 def payload_from_security_group(security_group): - return dict( - (k, v) + return { + k: v for k, v in security_group.items() if k != 'id' and v is not None - ) + } def present_strategy(api, security_group): @@ -143,8 +155,7 @@ def present_strategy(api, security_group): if not response.ok: api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) - security_group_lookup = dict((sg['name'], sg) - for sg in response.json['security_groups']) + security_group_lookup = {sg['name']: sg for sg in response.json['security_groups']} if security_group['name'] not in security_group_lookup.keys(): ret['changed'] = True @@ -175,8 +186,7 @@ def absent_strategy(api, security_group): if not response.ok: api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)) - security_group_lookup = dict((sg['name'], sg) - for sg in response.json['security_groups']) + security_group_lookup = {sg['name']: sg for sg in response.json['security_groups']} if security_group['name'] not in security_group_lookup.keys(): return ret diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_info.py b/plugins/modules/scaleway_security_group_info.py similarity index 63% rename from plugins/modules/cloud/scaleway/scaleway_security_group_info.py rename to plugins/modules/scaleway_security_group_info.py index a15044e6a4..7ec1fe5b3f 100644 --- a/plugins/modules/cloud/scaleway/scaleway_security_group_info.py +++ b/plugins/modules/scaleway_security_group_info.py @@ -1,21 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_security_group_info -short_description: Gather information about the Scaleway security groups available. +short_description: Gather information about the Scaleway security groups available description: - Gather information about the Scaleway security groups available. author: - "Yanis Guenane (@Spredzy)" - "Remy Leone (@remyleone)" + +attributes: + action_group: + version_added: 11.3.0 + options: region: type: str @@ -25,18 +28,25 @@ options: choices: - ams1 - EMEA-NL-EVS + - ams2 + - ams3 - par1 - EMEA-FR-PAR1 - par2 - EMEA-FR-PAR2 + - par3 - waw1 - EMEA-PL-WAW1 + - waw2 + - waw3 extends_documentation_fragment: -- community.general.scaleway + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway security groups information community.general.scaleway_security_group_info: region: par1 @@ -44,35 +54,34 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.scaleway_security_group_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_security_group_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' returned: success type: list elements: dict sample: - "scaleway_security_group_info": [ - { - "description": "test-ams", - "enable_default_security": true, - "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51", - "name": "test-ams", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "organization_default": false, - "servers": [ - { - "id": "12f19bc7-108c-4517-954c-e6b3d0311363", - "name": "scw-e0d158" - } - ] - } + [ + { + "description": "test-ams", + "enable_default_security": true, + "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51", + "name": "test-ams", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "organization_default": false, + "servers": [ + { + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "name": "scw-e0d158" + } + ] + } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py b/plugins/modules/scaleway_security_group_rule.py similarity index 74% rename from plugins/modules/cloud/scaleway/scaleway_security_group_rule.py rename to plugins/modules/scaleway_security_group_rule.py index 9f95921202..edfb6a3565 100644 --- a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py +++ b/plugins/modules/scaleway_security_group_rule.py @@ -1,29 +1,33 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Scaleway Security Group Rule management module # # Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com). # -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_security_group_rule short_description: Scaleway Security Group Rule management module author: Antoine Barbare (@abarbare) description: - - This module manages Security Group Rule on Scaleway account - U(https://developer.scaleway.com) + - This module manages Security Group Rule on Scaleway account U(https://developer.scaleway.com). extends_documentation_fragment: - community.general.scaleway -requirements: - - ipaddress + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 options: state: @@ -38,22 +42,27 @@ options: region: type: str description: - - Scaleway region to use (for example C(par1)). + - Scaleway region to use (for example V(par1)). required: true choices: - ams1 - EMEA-NL-EVS + - ams2 + - ams3 - par1 - EMEA-FR-PAR1 - par2 - EMEA-FR-PAR2 + - par3 - waw1 - EMEA-PL-WAW1 + - waw2 + - waw3 protocol: type: str description: - - Network protocol to use + - Network protocol to use. choices: - TCP - UDP @@ -62,20 +71,20 @@ options: port: description: - - Port related to the rule, null value for all the ports + - Port related to the rule, null value for all the ports. required: true type: int ip_range: type: str description: - - IPV4 CIDR notation to apply to the rule + - IPV4 CIDR notation to apply to the rule. default: 0.0.0.0/0 direction: type: str description: - - Rule direction + - Rule direction. choices: - inbound - outbound @@ -84,7 +93,7 @@ options: action: type: str description: - - Rule action + - Rule action. choices: - accept - drop @@ -93,57 +102,47 @@ options: security_group: type: str description: - - Security Group unique identifier + - Security Group unique identifier. required: true -''' +""" -EXAMPLES = ''' - - name: Create a Security Group Rule - community.general.scaleway_security_group_rule: - state: present - region: par1 - protocol: TCP - port: 80 - ip_range: 0.0.0.0/0 - direction: inbound - action: accept - security_group: b57210ee-1281-4820-a6db-329f78596ecb - register: security_group_rule_creation_task -''' +EXAMPLES = r""" +- name: Create a Security Group Rule + community.general.scaleway_security_group_rule: + state: present + region: par1 + protocol: TCP + port: 80 + ip_range: 0.0.0.0/0 + direction: inbound + action: accept + security_group: b57210ee-1281-4820-a6db-329f78596ecb + register: security_group_rule_creation_task +""" -RETURN = ''' +RETURN = r""" data: - description: This is only present when C(state=present) - returned: when C(state=present) - type: dict - sample: { - "scaleway_security_group_rule": { - "direction": "inbound", - "protocol": "TCP", - "ip_range": "0.0.0.0/0", - "dest_port_from": 80, - "action": "accept", - "position": 2, - "dest_port_to": null, - "editable": null, - "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9" - } + description: This is only present when O(state=present). + returned: when O(state=present) + type: dict + sample: + { + "scaleway_security_group_rule": { + "direction": "inbound", + "protocol": "TCP", + "ip_range": "0.0.0.0/0", + "dest_port_from": 80, + "action": "accept", + "position": 2, + "dest_port_to": null, + "editable": null, + "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9" + } } -''' - -import traceback +""" from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - -try: - from ipaddress import ip_network -except ImportError: - IPADDRESS_IMP_ERR = traceback.format_exc() - HAS_IPADDRESS = False -else: - HAS_IPADDRESS = True +from ansible.module_utils.basic import AnsibleModule def get_sgr_from_api(security_group_rules, security_group_rule): @@ -266,8 +265,6 @@ def main(): argument_spec=argument_spec, supports_check_mode=True, ) - if not HAS_IPADDRESS: - module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR) core(module) diff --git a/plugins/modules/scaleway_server_info.py b/plugins/modules/scaleway_server_info.py new file mode 100644 index 0000000000..16a0fc17e3 --- /dev/null +++ b/plugins/modules/scaleway_server_info.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: scaleway_server_info +short_description: Gather information about the Scaleway servers available +description: + - Gather information about the Scaleway servers available. +author: + - "Yanis Guenane (@Spredzy)" + - "Remy Leone (@remyleone)" +extends_documentation_fragment: + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 + +options: + region: + type: str + description: + - Scaleway region to use (for example C(par1)). + required: true + choices: + - ams1 + - EMEA-NL-EVS + - ams2 + - ams3 + - par1 + - EMEA-FR-PAR1 + - par2 + - EMEA-FR-PAR2 + - par3 + - waw1 + - EMEA-PL-WAW1 + - waw2 + - waw3 +""" + +EXAMPLES = r""" +- name: Gather Scaleway servers information + community.general.scaleway_server_info: + region: par1 + register: result + +- ansible.builtin.debug: + msg: "{{ result.scaleway_server_info }}" +""" + +RETURN = r""" +scaleway_server_info: + description: + - Response from Scaleway API. + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' + returned: success + type: list + elements: dict + sample: + [ + { + "arch": "x86_64", + "boot_type": "local", + "bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "dtb": "", + "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.4.127 rev1" + }, + "commercial_type": "START1-XS", + "creation_date": "2018-08-14T21:36:56.271545+00:00", + "dynamic_ip_required": false, + "enable_ipv6": false, + "extra_networks": [], + "hostname": "scw-e0d256", + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "image": { + "arch": "x86_64", + "creation_date": "2018-04-26T12:42:21.619844+00:00", + "default_bootscript": { + "architecture": "x86_64", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "dtb": "", + "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", + "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", + "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", + "organization": "11111111-1111-4111-8111-111111111111", + "public": true, + "title": "x86_64 mainline 4.4.127 rev1" + }, + "extra_volumes": [], + "from_server": null, + "id": "67375eb1-f14d-4f02-bb42-6119cecbde51", + "modification_date": "2018-04-26T12:49:07.573004+00:00", + "name": "Ubuntu Xenial", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "public": true, + "root_volume": { + "id": "020b8d61-3867-4a0e-84a4-445c5393e05d", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", + "size": 25000000000, + "volume_type": "l_ssd" + }, + "state": "available" + }, + "ipv6": null, + "location": { + "cluster_id": "5", + "hypervisor_id": "412", + "node_id": "2", + "platform_id": "13", + "zone_id": "par1" + }, + "maintenances": [], + "modification_date": "2018-08-14T21:37:28.630882+00:00", + "name": "scw-e0d256", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "private_ip": "10.14.222.131", + "protected": false, + "public_ip": { + "address": "163.172.170.197", + "dynamic": false, + "id": "ea081794-a581-4495-8451-386ddaf0a451" + }, + "security_group": { + "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e", + "name": "Default security group" + }, + "state": "running", + "state_detail": "booted", + "tags": [], + "volumes": { + "0": { + "creation_date": "2018-08-14T21:36:56.271545+00:00", + "export_uri": "device://dev/vda", + "id": "68386fae-4f55-4fbf-aabb-953036a85872", + "modification_date": "2018-08-14T21:36:56.271545+00:00", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "server": { + "id": "12f19bc7-108c-4517-954c-e6b3d0311363", + "name": "scw-e0d256" + }, + "size": 25000000000, + "state": "available", + "volume_type": "l_ssd" + } + } + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.scaleway import ( + Scaleway, + ScalewayException, + scaleway_argument_spec, + SCALEWAY_LOCATION, +) + + +class ScalewayServerInfo(Scaleway): + + def __init__(self, module): + super(ScalewayServerInfo, self).__init__(module) + self.name = 'servers' + + region = module.params["region"] + self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] + + +def main(): + argument_spec = scaleway_argument_spec() + argument_spec.update(dict( + region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + module.exit_json( + scaleway_server_info=ScalewayServerInfo(module).get_resources() + ) + except ScalewayException as exc: + module.fail_json(msg=exc.message) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py b/plugins/modules/scaleway_snapshot_info.py similarity index 61% rename from plugins/modules/cloud/scaleway/scaleway_snapshot_info.py rename to plugins/modules/scaleway_snapshot_info.py index 8e1d2a615d..e59f9e3262 100644 --- a/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py +++ b/plugins/modules/scaleway_snapshot_info.py @@ -1,23 +1,28 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_snapshot_info -short_description: Gather information about the Scaleway snapshots available. +short_description: Gather information about the Scaleway snapshots available description: - Gather information about the Scaleway snapshot available. author: - "Yanis Guenane (@Spredzy)" - "Remy Leone (@remyleone)" extends_documentation_fragment: -- community.general.scaleway + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 options: region: @@ -28,15 +33,20 @@ options: choices: - ams1 - EMEA-NL-EVS + - ams2 + - ams3 - par1 - EMEA-FR-PAR1 - par2 - EMEA-FR-PAR2 + - par3 - waw1 - EMEA-PL-WAW1 -''' + - waw2 + - waw3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway snapshots information community.general.scaleway_snapshot_info: region: par1 @@ -44,35 +54,34 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.scaleway_snapshot_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_snapshot_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' returned: success type: list elements: dict sample: - "scaleway_snapshot_info": [ + [ { - "base_volume": { - "id": "68386fae-4f55-4fbf-aabb-953036a85872", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42" - }, - "creation_date": "2018-08-14T22:34:35.299461+00:00", - "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2", - "modification_date": "2018-08-14T22:34:54.520560+00:00", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "size": 25000000000, - "state": "available", - "volume_type": "l_ssd" + "base_volume": { + "id": "68386fae-4f55-4fbf-aabb-953036a85872", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42" + }, + "creation_date": "2018-08-14T22:34:35.299461+00:00", + "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2", + "modification_date": "2018-08-14T22:34:54.520560+00:00", + "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "size": 25000000000, + "state": "available", + "volume_type": "l_ssd" } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/cloud/scaleway/scaleway_sshkey.py b/plugins/modules/scaleway_sshkey.py similarity index 81% rename from plugins/modules/cloud/scaleway/scaleway_sshkey.py rename to plugins/modules/scaleway_sshkey.py index 4c55909245..213e6a2010 100644 --- a/plugins/modules/cloud/scaleway/scaleway_sshkey.py +++ b/plugins/modules/scaleway_sshkey.py @@ -1,34 +1,40 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Scaleway SSH keys management module # # Copyright (C) 2018 Online SAS. # https://www.scaleway.com # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_sshkey short_description: Scaleway SSH keys management module author: Remy Leone (@remyleone) description: - - This module manages SSH keys on Scaleway account - U(https://developer.scaleway.com) + - This module manages SSH keys on Scaleway account (U(https://developer.scaleway.com)). extends_documentation_fragment: -- community.general.scaleway + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 options: state: type: str description: - - Indicate desired state of the SSH key. + - Indicate desired state of the SSH key. default: present choices: - present @@ -36,17 +42,17 @@ options: ssh_pub_key: type: str description: - - The public SSH key as a string to add. + - The public SSH key as a string to add. required: true api_url: type: str description: - - Scaleway API URL + - Scaleway API URL. default: 'https://account.scaleway.com' aliases: ['base_url'] -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Add SSH key" community.general.scaleway_sshkey: ssh_pub_key: "ssh-rsa AAAA..." @@ -62,19 +68,22 @@ EXAMPLES = ''' ssh_pub_key: "ssh-rsa AAAA..." state: "present" oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c" -''' +""" -RETURN = ''' +RETURN = r""" data: - description: This is only present when C(state=present) - returned: when C(state=present) - type: dict - sample: { - "ssh_public_keys": [ - {"key": "ssh-rsa AAAA...."} - ] + description: This is only present when O(state=present). + returned: when O(state=present) + type: dict + sample: + { + "ssh_public_keys": [ + { + "key": "ssh-rsa AAAA...." + } + ] } -''' +""" from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible_collections.community.general.plugins.module_utils.scaleway import scaleway_argument_spec, Scaleway diff --git a/plugins/modules/cloud/scaleway/scaleway_user_data.py b/plugins/modules/scaleway_user_data.py similarity index 80% rename from plugins/modules/cloud/scaleway/scaleway_user_data.py rename to plugins/modules/scaleway_user_data.py index 2848ec2c4a..2dadf4439a 100644 --- a/plugins/modules/cloud/scaleway/scaleway_user_data.py +++ b/plugins/modules/scaleway_user_data.py @@ -1,71 +1,83 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Scaleway user data management module # # Copyright (C) 2018 Online SAS. # https://www.scaleway.com # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_user_data short_description: Scaleway user_data management module author: Remy Leone (@remyleone) description: - - "This module manages user_data on compute instances on Scaleway." - - "It can be used to configure cloud-init for instance" + - This module manages user_data on compute instances on Scaleway. + - It can be used to configure cloud-init for instance. extends_documentation_fragment: -- community.general.scaleway + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 options: server_id: type: str description: - - Scaleway Compute instance ID of the server + - Scaleway Compute instance ID of the server. required: true user_data: type: dict description: - - User defined data. Typically used with `cloud-init`. - - Pass your cloud-init script here as a string + - User defined data. Typically used with C(cloud-init). + - Pass your C(cloud-init) script here as a string. required: false region: type: str description: - - Scaleway compute zone + - Scaleway compute zone. required: true choices: - ams1 - EMEA-NL-EVS + - ams2 + - ams3 - par1 - EMEA-FR-PAR1 - par2 - EMEA-FR-PAR2 + - par3 - waw1 - EMEA-PL-WAW1 -''' + - waw2 + - waw3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Update the cloud-init community.general.scaleway_user_data: server_id: '5a33b4ab-57dd-4eb6-8b0a-d95eb63492ce' region: ams1 user_data: cloud-init: 'final_message: "Hello World!"' -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway @@ -122,10 +134,10 @@ def core(module): compute_api.module.fail_json(msg=msg) present_user_data_keys = user_data_list.json["user_data"] - present_user_data = dict( - (key, get_user_data(compute_api=compute_api, server_id=server_id, key=key)) + present_user_data = { + key: get_user_data(compute_api=compute_api, server_id=server_id, key=key) for key in present_user_data_keys - ) + } if present_user_data == user_data: module.exit_json(changed=changed, msg=user_data_list.json) @@ -142,7 +154,7 @@ def core(module): # Then we patch keys that are different for key, value in user_data.items(): - if key not in present_user_data or user_data[key] != present_user_data[key]: + if key not in present_user_data or value != present_user_data[key]: changed = True if compute_api.module.check_mode: diff --git a/plugins/modules/cloud/scaleway/scaleway_volume.py b/plugins/modules/scaleway_volume.py similarity index 77% rename from plugins/modules/cloud/scaleway/scaleway_volume.py rename to plugins/modules/scaleway_volume.py index e68309fc31..f30856538b 100644 --- a/plugins/modules/cloud/scaleway/scaleway_volume.py +++ b/plugins/modules/scaleway_volume.py @@ -1,33 +1,39 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Scaleway volumes management module # # Copyright (C) 2018 Henryk Konsek Consulting (hekonsek@gmail.com). # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: scaleway_volume short_description: Scaleway volumes management module author: Henryk Konsek (@hekonsek) description: - - This module manages volumes on Scaleway account - U(https://developer.scaleway.com) + - This module manages volumes on Scaleway account U(https://developer.scaleway.com). extends_documentation_fragment: -- community.general.scaleway + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway +attributes: + check_mode: + support: full + diff_mode: + support: none + action_group: + version_added: 11.3.0 options: state: type: str description: - - Indicate desired state of the volume. + - Indicate desired state of the volume. default: present choices: - present @@ -35,42 +41,47 @@ options: region: type: str description: - - Scaleway region to use (for example par1). + - Scaleway region to use (for example par1). required: true choices: - ams1 - EMEA-NL-EVS + - ams2 + - ams3 - par1 - EMEA-FR-PAR1 - par2 - EMEA-FR-PAR2 + - par3 - waw1 - EMEA-PL-WAW1 + - waw2 + - waw3 name: type: str description: - - Name used to identify the volume. + - Name used to identify the volume. required: true project: type: str description: - - Scaleway project ID to which volume belongs. + - Scaleway project ID to which volume belongs. version_added: 4.3.0 organization: type: str description: - - ScaleWay organization ID to which volume belongs. + - ScaleWay organization ID to which volume belongs. size: type: int description: - - Size of the volume in bytes. + - Size of the volume in bytes. volume_type: type: str description: - - Type of the volume (for example 'l_ssd'). -''' + - Type of the volume (for example 'l_ssd'). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create 10GB volume community.general.scaleway_volume: name: my-volume @@ -86,25 +97,26 @@ EXAMPLES = ''' name: my-volume state: absent region: par1 -''' +""" -RETURN = ''' +RETURN = r""" data: - description: This is only present when C(state=present) - returned: when C(state=present) - type: dict - sample: { + description: This is only present when O(state=present). + returned: when O(state=present) + type: dict + sample: + { "volume": { "export_uri": null, "id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd", "name": "volume-0-3", "project": "000a115d-2852-4b0a-9ce8-47f1134ba95a", - "server": null, - "size": 10000000000, - "volume_type": "l_ssd" - } -} -''' + "server": null, + "size": 10000000000, + "volume_type": "l_ssd" + } + } +""" from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/cloud/scaleway/scaleway_volume_info.py b/plugins/modules/scaleway_volume_info.py similarity index 62% rename from plugins/modules/cloud/scaleway/scaleway_volume_info.py rename to plugins/modules/scaleway_volume_info.py index e8dfa41419..b5b26360c3 100644 --- a/plugins/modules/cloud/scaleway/scaleway_volume_info.py +++ b/plugins/modules/scaleway_volume_info.py @@ -1,23 +1,28 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Yanis Guenane +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: scaleway_volume_info -short_description: Gather information about the Scaleway volumes available. +short_description: Gather information about the Scaleway volumes available description: - Gather information about the Scaleway volumes available. author: - "Yanis Guenane (@Spredzy)" - "Remy Leone (@remyleone)" extends_documentation_fragment: -- community.general.scaleway + - community.general.scaleway + - community.general.attributes + - community.general.scaleway.actiongroup_scaleway + - community.general.attributes.info_module + +attributes: + action_group: + version_added: 11.3.0 options: region: @@ -28,15 +33,20 @@ options: choices: - ams1 - EMEA-NL-EVS + - ams2 + - ams3 - par1 - EMEA-FR-PAR1 - par2 - EMEA-FR-PAR2 + - par3 - waw1 - EMEA-PL-WAW1 -''' + - waw2 + - waw3 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather Scaleway volumes information community.general.scaleway_volume_info: region: par1 @@ -44,33 +54,32 @@ EXAMPLES = r''' - ansible.builtin.debug: msg: "{{ result.scaleway_volume_info }}" -''' +""" -RETURN = r''' ---- +RETURN = r""" scaleway_volume_info: description: - Response from Scaleway API. - - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." + - 'For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/).' returned: success type: list elements: dict sample: - "scaleway_volume_info": [ - { - "creation_date": "2018-08-14T20:56:24.949660+00:00", - "export_uri": null, - "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba", - "modification_date": "2018-08-14T20:56:24.949660+00:00", - "name": "test-volume", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "server": null, - "size": 50000000000, - "state": "available", - "volume_type": "l_ssd" - } + [ + { + "creation_date": "2018-08-14T20:56:24.949660+00:00", + "export_uri": null, + "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba", + "modification_date": "2018-08-14T20:56:24.949660+00:00", + "name": "test-volume", + "organization": "3f709602-5e6c-4619-b80c-e841c89734af", + "server": null, + "size": 50000000000, + "state": "available", + "volume_type": "l_ssd" + } ] -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.scaleway import ( diff --git a/plugins/modules/sefcontext.py b/plugins/modules/sefcontext.py new file mode 100644 index 0000000000..f08370ff70 --- /dev/null +++ b/plugins/modules/sefcontext.py @@ -0,0 +1,382 @@ +#!/usr/bin/python + +# Copyright (c) 2016, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: sefcontext +short_description: Manages SELinux file context mapping definitions +description: + - Manages SELinux file context mapping definitions. + - Similar to the C(semanage fcontext) command. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.platform +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: linux +options: + target: + description: + - Target path (expression). + type: str + required: true + aliases: [path] + ftype: + description: + - The file type that should have SELinux contexts applied. + - 'The following file type options are available:' + - V(a) for all files, + - V(b) for block devices, + - V(c) for character devices, + - V(d) for directories, + - V(f) for regular files, + - V(l) for symbolic links, + - V(p) for named pipes, + - V(s) for socket files. + type: str + choices: [a, b, c, d, f, l, p, s] + default: a + setype: + description: + - SELinux type for the specified O(target). + type: str + substitute: + description: + - Path to use to substitute file context(s) for the specified O(target). The context labeling for the O(target) subtree + is made equivalent to this path. + - This is also referred to as SELinux file context equivalence and it implements the C(equal) functionality of the SELinux + management tools. + version_added: 6.4.0 + type: str + aliases: [equal] + seuser: + description: + - SELinux user for the specified O(target). + - Defaults to V(system_u) for new file contexts and to existing value when modifying file contexts. + type: str + selevel: + description: + - SELinux range for the specified O(target). + - Defaults to V(s0) for new file contexts and to existing value when modifying file contexts. + type: str + aliases: [serange] + state: + description: + - Whether the SELinux file context must be V(absent) or V(present). + - Specifying V(absent) without either O(setype) or O(substitute) deletes both SELinux type or path substitution mappings + that match O(target). + type: str + choices: [absent, present] + default: present + reload: + description: + - Reload SELinux policy after commit. + - Note that this does not apply SELinux file contexts to existing files. + type: bool + default: true + ignore_selinux_state: + description: + - Useful for scenarios (chrooted environment) that you cannot get the real SELinux state. + type: bool + default: false +notes: + - The changes are persistent across reboots. + - O(setype) and O(substitute) are mutually exclusive. + - If O(state=present) then one of O(setype) or O(substitute) is mandatory. + - The M(community.general.sefcontext) module does not modify existing files to the new SELinux context(s), so it is advisable + to first create the SELinux file contexts before creating files, or run C(restorecon) manually for the existing files + that require the new SELinux file contexts. + - Not applying SELinux fcontexts to existing files is a deliberate decision as it would be unclear what reported changes + would entail to, and there is no guarantee that applying SELinux fcontext does not pick up other unrelated prior changes. +requirements: + - libselinux-python + - policycoreutils-python +author: + - Dag Wieers (@dagwieers) +""" + +EXAMPLES = r""" +- name: Allow apache to modify files in /srv/git_repos + community.general.sefcontext: + target: '/srv/git_repos(/.*)?' + setype: httpd_sys_rw_content_t + state: present + +- name: Substitute file contexts for path /srv/containers with /var/lib/containers + community.general.sefcontext: + target: /srv/containers + substitute: /var/lib/containers + state: present + +- name: Delete file context path substitution for /srv/containers + community.general.sefcontext: + target: /srv/containers + substitute: /var/lib/containers + state: absent + +- name: Delete any file context mappings for path /srv/git + community.general.sefcontext: + target: /srv/git + state: absent + +- name: Apply new SELinux file context to filesystem + ansible.builtin.command: restorecon -irv /srv/git_repos +""" + +RETURN = r""" +# Default return values +""" + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + +SELINUX_IMP_ERR = None +try: + import selinux + HAVE_SELINUX = True +except ImportError: + SELINUX_IMP_ERR = traceback.format_exc() + HAVE_SELINUX = False + +SEOBJECT_IMP_ERR = None +try: + import seobject + HAVE_SEOBJECT = True +except ImportError: + SEOBJECT_IMP_ERR = traceback.format_exc() + HAVE_SEOBJECT = False + +# Add missing entries (backward compatible) +if HAVE_SEOBJECT: + seobject.file_types.update( + a=seobject.SEMANAGE_FCONTEXT_ALL, + b=seobject.SEMANAGE_FCONTEXT_BLOCK, + c=seobject.SEMANAGE_FCONTEXT_CHAR, + d=seobject.SEMANAGE_FCONTEXT_DIR, + f=seobject.SEMANAGE_FCONTEXT_REG, + l=seobject.SEMANAGE_FCONTEXT_LINK, + p=seobject.SEMANAGE_FCONTEXT_PIPE, + s=seobject.SEMANAGE_FCONTEXT_SOCK, + ) + +# Make backward compatible +option_to_file_type_str = dict( + a='all files', + b='block device', + c='character device', + d='directory', + f='regular file', + l='symbolic link', + p='named pipe', + s='socket', +) + + +def get_runtime_status(ignore_selinux_state=False): + return True if ignore_selinux_state is True else selinux.is_selinux_enabled() + + +def semanage_fcontext_exists(sefcontext, target, ftype): + ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. ''' + + # Beware that records comprise of a string representation of the file_type + record = (target, option_to_file_type_str[ftype]) + records = sefcontext.get_all() + try: + return records[record] + except KeyError: + return None + + +def semanage_fcontext_substitute_exists(sefcontext, target): + ''' Get the SELinux file context path substitution definition from policy. Return None if it does not exist. ''' + + return sefcontext.equiv_dist.get(target, sefcontext.equiv.get(target)) + + +def semanage_fcontext_modify(module, result, target, ftype, setype, substitute, do_reload, serange, seuser, sestore=''): + ''' Add or modify SELinux file context mapping definition to the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + if substitute is None: + exists = semanage_fcontext_exists(sefcontext, target, ftype) + if exists: + # Modify existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if seuser is None: + seuser = orig_seuser + if serange is None: + serange = orig_serange + + if setype != orig_setype or seuser != orig_seuser or serange != orig_serange: + if not module.check_mode: + sefcontext.modify(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Change to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange) + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange) + else: + # Add missing entry + if seuser is None: + seuser = 'system_u' + if serange is None: + serange = 's0' + + if not module.check_mode: + sefcontext.add(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Addition to semanage file context mappings\n' + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange) + else: + exists = semanage_fcontext_substitute_exists(sefcontext, target) + if exists: + # Modify existing path substitution entry + orig_substitute = exists + + if substitute != orig_substitute: + if not module.check_mode: + sefcontext.modify_equal(target, substitute) + changed = True + + if module._diff: + prepared_diff += '# Change to semanage file context path substitutions\n' + prepared_diff += '-%s = %s\n' % (target, orig_substitute) + prepared_diff += '+%s = %s\n' % (target, substitute) + else: + # Add missing path substitution entry + if not module.check_mode: + sefcontext.add_equal(target, substitute) + changed = True + if module._diff: + prepared_diff += '# Addition to semanage file context path substitutions\n' + prepared_diff += '+%s = %s\n' % (target, substitute) + + except Exception as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, seuser=seuser, serange=serange, **result) + + +def semanage_fcontext_delete(module, result, target, ftype, setype, substitute, do_reload, sestore=''): + ''' Delete SELinux file context mapping definition from the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + exists = semanage_fcontext_exists(sefcontext, target, ftype) + substitute_exists = semanage_fcontext_substitute_exists(sefcontext, target) + if exists and substitute is None: + # Remove existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if not module.check_mode: + sefcontext.delete(target, ftype) + changed = True + + if module._diff: + prepared_diff += '# Deletion to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3]) + if substitute_exists and setype is None and ((substitute is not None and substitute_exists == substitute) or substitute is None): + # Remove existing path substitution entry + orig_substitute = substitute_exists + + if not module.check_mode: + sefcontext.delete(target, orig_substitute) + changed = True + + if module._diff: + prepared_diff += '# Deletion to semanage file context path substitutions\n' + prepared_diff += '-%s = %s\n' % (target, orig_substitute) + + except Exception as e: + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, **result) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ignore_selinux_state=dict(type='bool', default=False), + target=dict(type='str', required=True, aliases=['path']), + ftype=dict(type='str', default='a', choices=list(option_to_file_type_str.keys())), + setype=dict(type='str'), + substitute=dict(type='str', aliases=['equal']), + seuser=dict(type='str'), + selevel=dict(type='str', aliases=['serange']), + state=dict(type='str', default='present', choices=['absent', 'present']), + reload=dict(type='bool', default=True), + ), + mutually_exclusive=[ + ('setype', 'substitute'), + ('substitute', 'ftype'), + ('substitute', 'seuser'), + ('substitute', 'selevel'), + ], + required_if=[ + ('state', 'present', ('setype', 'substitute'), True), + ], + + supports_check_mode=True, + ) + if not HAVE_SELINUX: + module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR) + + if not HAVE_SEOBJECT: + module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR) + + ignore_selinux_state = module.params['ignore_selinux_state'] + + if not get_runtime_status(ignore_selinux_state): + module.fail_json(msg="SELinux is disabled on this host.") + + target = module.params['target'] + ftype = module.params['ftype'] + setype = module.params['setype'] + substitute = module.params['substitute'] + seuser = module.params['seuser'] + serange = module.params['selevel'] + state = module.params['state'] + do_reload = module.params['reload'] + + result = dict(target=target, ftype=ftype, setype=setype, substitute=substitute, state=state) + + if state == 'present': + semanage_fcontext_modify(module, result, target, ftype, setype, substitute, do_reload, serange, seuser) + elif state == 'absent': + semanage_fcontext_delete(module, result, target, ftype, setype, substitute, do_reload) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/selinux_permissive.py b/plugins/modules/selinux_permissive.py similarity index 72% rename from plugins/modules/system/selinux_permissive.py rename to plugins/modules/selinux_permissive.py index fd90475712..64d77e33cf 100644 --- a/plugins/modules/system/selinux_permissive.py +++ b/plugins/modules/selinux_permissive.py @@ -1,55 +1,61 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2015, Michael Scherer +# Copyright (c) 2015, Michael Scherer # inspired by code of github.com/dandiker/ -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: selinux_permissive short_description: Change permissive domain in SELinux policy description: - Add and remove a domain from the list of permissive domains. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: domain: description: - - The domain that will be added or removed from the list of permissive domains. + - The domain that is added or removed from the list of permissive domains. type: str required: true - aliases: [ name ] + aliases: [name] permissive: description: - - Indicate if the domain should or should not be set as permissive. + - Indicate if the domain should or should not be set as permissive. type: bool required: true no_reload: description: - - Disable reloading of the SELinux policy after making change to a domain's permissive setting. - - The default is C(no), which causes policy to be reloaded when a domain changes state. - - Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6." + - Disable reloading of the SELinux policy after making change to a domain's permissive setting. + - The default is V(false), which causes policy to be reloaded when a domain changes state. + - Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6.". type: bool - default: no + default: false store: description: - Name of the SELinux policy store to use. type: str + default: '' notes: - - Requires a recent version of SELinux and C(policycoreutils-python) (EL 6 or newer). -requirements: [ policycoreutils-python ] + - Requires a recent version of SELinux and C(policycoreutils-python) (EL 6 or newer). +requirements: [policycoreutils-python] author: -- Michael Scherer (@mscherer) -''' + - Michael Scherer (@mscherer) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Change the httpd_t domain to permissive community.general.selinux_permissive: name: httpd_t permissive: true -''' +""" import traceback diff --git a/plugins/modules/system/selogin.py b/plugins/modules/selogin.py similarity index 82% rename from plugins/modules/system/selogin.py rename to plugins/modules/selogin.py index 46daf1a76a..d1e0faf085 100644 --- a/plugins/modules/system/selogin.py +++ b/plugins/modules/selogin.py @@ -1,74 +1,67 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2017, Petr Lautrbach +# Copyright (c) 2017, Petr Lautrbach # Based on seport.py module (c) 2014, Dan Keder +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . +from __future__ import annotations -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: selogin short_description: Manages linux user to SELinux user mapping description: - - Manages linux user to SELinux user mapping + - Manages linux user to SELinux user mapping. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: login: type: str description: - - a Linux user + - A Linux user. required: true seuser: type: str description: - - SELinux user name + - SELinux user name. selevel: type: str - aliases: [ serange ] + aliases: [serange] description: - - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range. + - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user + record range. default: s0 state: type: str description: - Desired mapping value. default: present - choices: [ 'present', 'absent' ] + choices: ['present', 'absent'] reload: description: - Reload SELinux policy after commit. type: bool - default: yes + default: true ignore_selinux_state: description: - - Run independent of selinux runtime state + - Run independent of selinux runtime state. type: bool default: false notes: - - The changes are persistent across reboots - - Not tested on any debian based system -requirements: [ 'libselinux', 'policycoreutils' ] + - The changes are persistent across reboots. + - Not tested on any debian based system. +requirements: ['libselinux', 'policycoreutils'] author: -- Dan Keder (@dankeder) -- Petr Lautrbach (@bachradsusi) -- James Cassell (@jamescassell) -''' + - Dan Keder (@dankeder) + - Petr Lautrbach (@bachradsusi) + - James Cassell (@jamescassell) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Modify the default user on the system to the guest_u user community.general.selogin: login: __default__ @@ -87,11 +80,11 @@ EXAMPLES = ''' login: '%engineering' seuser: staff_u state: present -''' +""" -RETURN = r''' +RETURN = r""" # Default return values -''' +""" import traceback diff --git a/plugins/modules/notification/sendgrid.py b/plugins/modules/sendgrid.py similarity index 76% rename from plugins/modules/notification/sendgrid.py rename to plugins/modules/sendgrid.py index 2c349064b8..26021c35c9 100644 --- a/plugins/modules/notification/sendgrid.py +++ b/plugins/modules/sendgrid.py @@ -1,42 +1,44 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2015, Matt Makai -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Matt Makai +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: sendgrid short_description: Sends an email with the SendGrid API description: - - "Sends an email with a SendGrid account through their API, not through - the SMTP service." + - Sends an email with a SendGrid account through their API, not through the SMTP service. notes: - - "This module is non-idempotent because it sends an email through the - external API. It is idempotent only in the case that the module fails." - - "Like the other notification modules, this one requires an external - dependency to work. In this case, you'll need an active SendGrid - account." - - "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers - you must pip install sendgrid" - - "since 2.2 I(username) and I(password) are not required if you supply an I(api_key)" + - This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that + the module fails. + - Like the other notification modules, this one requires an external dependency to work. In this case, you need an active + SendGrid account. + - In order to use O(api_key), O(cc), O(bcc), O(attachments), O(from_name), O(html_body), and O(headers) you must C(pip install + sendgrid). requirements: - sendgrid Python library 1.6.22 or lower (Sendgrid API V2 supported) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: username: type: str description: - Username for logging into the SendGrid account. - - Since 2.2 it is only required if I(api_key) is not supplied. + - It is only required if O(api_key) is not supplied. password: type: str description: - Password that corresponds to the username. - - Since 2.2 it is only required if I(api_key) is not supplied. + - It is only required if O(api_key) is not supplied. from_address: type: str description: @@ -75,12 +77,12 @@ options: from_name: type: str description: - - The name you want to appear in the from field, i.e 'John Doe'. + - The name you want to appear in the from field, for example V(John Doe). html_body: description: - - Whether the body is html content that should be rendered. + - Whether the body is HTML content that should be rendered. type: bool - default: 'no' + default: false headers: type: dict description: @@ -89,11 +91,11 @@ options: type: str description: - The e-mail body content. - required: yes + required: true author: "Matt Makai (@makaimc)" -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Send an email to a single recipient that the deployment was successful community.general.sendgrid: username: "{{ sendgrid_username }}" @@ -107,22 +109,23 @@ EXAMPLES = r''' - name: Send an email to more than one recipient that the build failed community.general.sendgrid: - username: "{{ sendgrid_username }}" - password: "{{ sendgrid_password }}" - from_address: "build@mycompany.com" - to_addresses: - - "ops@mycompany.com" - - "devteam@mycompany.com" - subject: "Build failure!." - body: "Unable to pull source repository from Git server." + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "build@mycompany.com" + to_addresses: + - "ops@mycompany.com" + - "devteam@mycompany.com" + subject: "Build failure!." + body: "Unable to pull source repository from Git server." delegate_to: localhost -''' +""" # ======================================= # sendgrid module support methods # import os import traceback +from urllib.parse import urlencode from ansible_collections.community.general.plugins.module_utils.version import LooseVersion @@ -135,7 +138,6 @@ except ImportError: HAS_SENDGRID = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.urls import fetch_url @@ -210,19 +212,19 @@ def post_sendgrid_api(module, username, password, from_address, to_addresses, def main(): module = AnsibleModule( argument_spec=dict( - username=dict(required=False), - password=dict(required=False, no_log=True), - api_key=dict(required=False, no_log=True), - bcc=dict(required=False, type='list', elements='str'), - cc=dict(required=False, type='list', elements='str'), - headers=dict(required=False, type='dict'), + username=dict(), + password=dict(no_log=True), + api_key=dict(no_log=True), + bcc=dict(type='list', elements='str'), + cc=dict(type='list', elements='str'), + headers=dict(type='dict'), from_address=dict(required=True), - from_name=dict(required=False), + from_name=dict(), to_addresses=dict(required=True, type='list', elements='str'), subject=dict(required=True), body=dict(required=True), - html_body=dict(required=False, default=False, type='bool'), - attachments=dict(required=False, type='list', elements='path') + html_body=dict(default=False, type='bool'), + attachments=dict(type='list', elements='path') ), supports_check_mode=True, mutually_exclusive=[ diff --git a/plugins/modules/monitoring/sensu/sensu_check.py b/plugins/modules/sensu_check.py similarity index 79% rename from plugins/modules/monitoring/sensu/sensu_check.py rename to plugins/modules/sensu_check.py index ec43b60abe..10763992b0 100644 --- a/plugins/modules/monitoring/sensu/sensu_check.py +++ b/plugins/modules/sensu_check.py @@ -1,158 +1,163 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2014, Anders Ingemann -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Anders Ingemann +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sensu_check short_description: Manage Sensu checks description: - Manage the checks that should be run on a machine by I(Sensu). - - Most options do not have a default and will not be added to the check definition unless specified. - - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module, - - they are simply specified for your convenience. + - Most options do not have a default and are not added to the check definition unless specified. + - All defaults except O(path), O(state), O(backup) and O(metric) are not managed by this module, they are simply specified + for your convenience. +deprecated: + removed_in: 13.0.0 + why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: type: str description: - - The name of the check - - This is the key that is used to determine whether a check exists + - The name of the check. + - This is the key that is used to determine whether a check exists. required: true state: type: str description: - - Whether the check should be present or not - choices: [ 'present', 'absent' ] + - Whether the check should be present or not. + choices: ['present', 'absent'] default: present path: type: str description: - - Path to the json file of the check to be added/removed. - - Will be created if it does not exist (unless I(state=absent)). - - The parent folders need to exist when I(state=present), otherwise an error will be thrown + - Path to the JSON file of the check to be added/removed. + - It is created if it does not exist (unless O(state=absent)). + - The parent folders need to exist when O(state=present), otherwise an error is thrown. default: /etc/sensu/conf.d/checks.json backup: description: - - Create a backup file (if yes), including the timestamp information so - - you can get the original file back if you somehow clobbered it incorrectly. + - Create a backup file (if yes), including the timestamp information so you can get the original file back if you somehow + clobbered it incorrectly. type: bool - default: 'no' + default: false command: type: str description: - - Path to the sensu check to run (not required when I(state=absent)) + - Path to the sensu check to run (not required when O(state=absent)). handlers: type: list elements: str description: - - List of handlers to notify when the check fails - default: [] + - List of handlers to notify when the check fails. subscribers: type: list elements: str description: - - List of subscribers/channels this check should run for - - See sensu_subscribers to subscribe a machine to a channel - default: [] + - List of subscribers/channels this check should run for. + - See sensu_subscribers to subscribe a machine to a channel. interval: type: int description: - - Check interval in seconds + - Check interval in seconds. timeout: type: int description: - - Timeout for the check + - Timeout for the check. - If not specified, it defaults to 10. ttl: type: int description: - - Time to live in seconds until the check is considered stale + - Time to live in seconds until the check is considered stale. handle: description: - - Whether the check should be handled or not - - Default is C(false). + - Whether the check should be handled or not. + - Default is V(false). type: bool subdue_begin: type: str description: - - When to disable handling of check failures + - When to disable handling of check failures. subdue_end: type: str description: - - When to enable handling of check failures + - When to enable handling of check failures. dependencies: type: list elements: str description: - - Other checks this check depends on, if dependencies fail handling of this check will be disabled - default: [] + - Other checks this one depends on. + - If dependencies fail handling of this check is disabled. metric: description: - - Whether the check is a metric + - Whether the check is a metric. type: bool - default: 'no' + default: false standalone: description: - - Whether the check should be scheduled by the sensu client or server - - This option obviates the need for specifying the I(subscribers) option - - Default is C(false). + - Whether the check should be scheduled by the sensu client or server. + - This option obviates the need for specifying the O(subscribers) option. + - Default is V(false). type: bool publish: description: - Whether the check should be scheduled at all. - - You can still issue it via the sensu api - - Default is C(false). + - You can still issue it using the sensu API. + - Default is V(false). type: bool occurrences: type: int description: - - Number of event occurrences before the handler should take action + - Number of event occurrences before the handler should take action. - If not specified, defaults to 1. refresh: type: int description: - - Number of seconds handlers should wait before taking second action + - Number of seconds handlers should wait before taking second action. aggregate: description: - - Classifies the check as an aggregate check, - - making it available via the aggregate API - - Default is C(false). + - Classifies the check as an aggregate check, making it available using the aggregate API. + - Default is V(false). type: bool low_flap_threshold: type: int description: - - The low threshold for flap detection + - The low threshold for flap detection. high_flap_threshold: type: int description: - - The high threshold for flap detection + - The high threshold for flap detection. custom: type: dict description: - A hash/dictionary of custom parameters for mixing to the configuration. - - You can't rewrite others module parameters using this - default: {} + - You cannot rewrite other module parameters using this. source: type: str description: - - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch). + - The check source, used to create a JIT Sensu client for an external resource (for example a network switch). author: "Anders Ingemann (@andsens)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Fetch metrics about the CPU load every 60 seconds, # the sensu server has a handler called 'relay' which forwards stats to graphite - name: Get cpu metrics community.general.sensu_check: name: cpu_load command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb - metric: yes + metric: true handlers: relay subscribers: common interval: 60 @@ -173,7 +178,7 @@ EXAMPLES = ''' community.general.sensu_check: name: check_disk_capacity state: absent -''' +""" import json import traceback @@ -327,7 +332,7 @@ def main(): arg_spec = {'name': {'type': 'str', 'required': True}, 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'}, 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, - 'backup': {'type': 'bool', 'default': 'no'}, + 'backup': {'type': 'bool', 'default': False}, 'command': {'type': 'str'}, 'handlers': {'type': 'list', 'elements': 'str'}, 'subscribers': {'type': 'list', 'elements': 'str'}, @@ -338,7 +343,7 @@ def main(): 'subdue_begin': {'type': 'str'}, 'subdue_end': {'type': 'str'}, 'dependencies': {'type': 'list', 'elements': 'str'}, - 'metric': {'type': 'bool', 'default': 'no'}, + 'metric': {'type': 'bool', 'default': False}, 'standalone': {'type': 'bool'}, 'publish': {'type': 'bool'}, 'occurrences': {'type': 'int'}, diff --git a/plugins/modules/monitoring/sensu/sensu_client.py b/plugins/modules/sensu_client.py similarity index 83% rename from plugins/modules/monitoring/sensu/sensu_client.py rename to plugins/modules/sensu_client.py index 886c398e09..a41b5db9fe 100644 --- a/plugins/modules/monitoring/sensu/sensu_client.py +++ b/plugins/modules/sensu_client.py @@ -1,26 +1,35 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Red Hat Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Red Hat Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sensu_client author: "David Moreau Simard (@dmsimard)" short_description: Manages Sensu client configuration description: - Manages Sensu client configuration. - - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/clients.html)' + - For more information, refer to the L(Sensu documentation, https://sensuapp.org/docs/latest/reference/clients.html). +deprecated: + removed_in: 13.0.0 + why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: type: str description: - - Whether the client should be present or not - choices: [ 'present', 'absent' ] + - Whether the client should be present or not. + choices: ['present', 'absent'] default: present name: type: str @@ -31,19 +40,21 @@ options: type: str description: - An address to help identify and reach the client. This is only informational, usually an IP address or hostname. - - If not specified it defaults to non-loopback IPv4 address as determined by Ruby Socket.ip_address_list (provided by Sensu). + - If not specified it defaults to non-loopback IPv4 address as determined by Ruby C(Socket.ip_address_list) (provided + by Sensu). subscriptions: type: list elements: str description: - - An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (e.g. webserver). + - An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (for example V(webserver)). - These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions. - The subscriptions array items must be strings. safe_mode: description: - - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the check. + - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request + and execute the check. type: bool - default: 'no' + default: false redact: type: list elements: str @@ -57,11 +68,12 @@ options: description: - If Sensu should monitor keepalives for this client. type: bool - default: 'yes' + default: true keepalive: type: dict description: - - The keepalive definition scope, used to configure Sensu client keepalives behavior (e.g. keepalive thresholds, etc). + - The keepalive definition scope, used to configure Sensu client keepalives behavior (for example keepalive thresholds + and so). registration: type: dict description: @@ -69,7 +81,7 @@ options: deregister: description: - If a deregistration event should be created upon Sensu client process stop. - - Default is C(false). + - Default is V(false). type: bool deregistration: type: dict @@ -90,12 +102,11 @@ options: servicenow: type: dict description: - - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users only). -notes: - - Check mode is supported -''' + - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users + only). +""" -EXAMPLES = ''' +EXAMPLES = r""" # Minimum possible configuration - name: Configure Sensu client community.general.sensu_client: @@ -138,20 +149,26 @@ EXAMPLES = ''' - name: Delete the Sensu client configuration community.general.sensu_client: state: "absent" -''' +""" -RETURN = ''' +RETURN = r""" config: - description: Effective client configuration, when state is present + description: Effective client configuration, when state is present. returned: success type: dict - sample: {'name': 'client', 'subscriptions': ['default']} + sample: + { + "name": "client", + "subscriptions": [ + "default" + ] + } file: - description: Path to the client configuration file + description: Path to the client configuration file. returned: success type: str sample: "/etc/sensu/conf.d/client.json" -''' +""" import json import os @@ -203,7 +220,7 @@ def main(): module.fail_json( msg=msg.format(path=path, exception=str(e))) else: - # Idempotency: it's okay if the file doesn't exist + # Idempotency: it is okay if the file doesn't exist msg = '{path} already does not exist'.format(path=path) module.exit_json(msg=msg) @@ -222,7 +239,7 @@ def main(): try: current_config = json.load(open(path, 'r')) except (IOError, ValueError): - # File either doesn't exist or it's invalid JSON + # File either doesn't exist or it is invalid JSON pass if current_config is not None and current_config == config: diff --git a/plugins/modules/monitoring/sensu/sensu_handler.py b/plugins/modules/sensu_handler.py similarity index 78% rename from plugins/modules/monitoring/sensu/sensu_handler.py rename to plugins/modules/sensu_handler.py index 6511479899..26ce01d313 100644 --- a/plugins/modules/monitoring/sensu/sensu_handler.py +++ b/plugins/modules/sensu_handler.py @@ -1,37 +1,46 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Red Hat Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Red Hat Inc. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sensu_handler author: "David Moreau Simard (@dmsimard)" short_description: Manages Sensu handler configuration description: - - Manages Sensu handler configuration - - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/handlers.html)' + - Manages Sensu handler configuration. + - For more information, refer to the L(Sensu documentation, https://sensuapp.org/docs/latest/reference/handlers.html). +deprecated: + removed_in: 13.0.0 + why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: type: str description: - - Whether the handler should be present or not - choices: [ 'present', 'absent' ] + - Whether the handler should be present or not. + choices: ['present', 'absent'] default: present name: type: str description: - A unique name for the handler. The name cannot contain special characters or spaces. - required: True + required: true type: type: str description: - - The handler type - choices: [ 'pipe', 'tcp', 'udp', 'transport', 'set' ] + - The handler type. + choices: ['pipe', 'tcp', 'udp', 'transport', 'set'] filter: type: str description: @@ -46,7 +55,7 @@ options: type: list elements: str description: - - An array of check result severities the handler will handle. + - An array of check result severities the handler handles. - 'NOTE: event resolution bypasses this filtering.' - "Example: [ 'warning', 'critical', 'unknown' ]." mutator: @@ -63,39 +72,38 @@ options: description: - If events matching one or more silence entries should be handled. type: bool - default: 'no' + default: false handle_flapping: description: - If events in the flapping state should be handled. type: bool - default: 'no' + default: false command: type: str description: - The handler command to be executed. - - The event data is passed to the process via STDIN. - - 'NOTE: the command attribute is only required for Pipe handlers (i.e. handlers configured with "type": "pipe").' + - The event data is passed to the process using STDIN. + - 'NOTE: the O(command) attribute is only required for Pipe handlers (that is, handlers configured with O(type=pipe)).' socket: type: dict description: - The socket definition scope, used to configure the TCP/UDP handler socket. - - 'NOTE: the socket attribute is only required for TCP/UDP handlers (i.e. handlers configured with "type": "tcp" or "type": "udp").' + - 'NOTE: the O(socket) attribute is only required for TCP/UDP handlers (that is, handlers configured with O(type=tcp) + or O(type=udp)).' pipe: type: dict description: - The pipe definition scope, used to configure the Sensu transport pipe. - - 'NOTE: the pipe attribute is only required for Transport handlers (i.e. handlers configured with "type": "transport").' + - 'NOTE: the O(pipe) attribute is only required for Transport handlers (that is, handlers configured with O(type=transport)).' handlers: type: list elements: str description: - An array of Sensu event handlers (names) to use for events using the handler set. - - 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").' -notes: - - Check mode is supported -''' + - 'NOTE: the O(handlers) attribute is only required for handler sets (that is, handlers configured with O(type=set)).' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Configure a handler that sends event data as STDIN (pipe) - name: Configure IRC Sensu handler community.general.sensu_handler: @@ -138,25 +146,30 @@ EXAMPLES = ''' owner: "sensu" group: "sensu" mode: "0600" -''' +""" -RETURN = ''' +RETURN = r""" config: - description: Effective handler configuration, when state is present + description: Effective handler configuration, when state is present. returned: success type: dict - sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'} + sample: + { + "name": "irc", + "type": "pipe", + "command": "/usr/local/bin/notify-irc.sh" + } file: - description: Path to the handler configuration file + description: Path to the handler configuration file. returned: success type: str sample: "/etc/sensu/conf.d/handlers/irc.json" name: - description: Name of the handler + description: Name of the handler. returned: success type: str sample: "irc" -''' +""" import json import os @@ -212,7 +225,7 @@ def main(): module.fail_json( msg=msg.format(path=path, exception=str(e))) else: - # Idempotency: it's okay if the file doesn't exist + # Idempotency: it is okay if the file doesn't exist msg = '{path} already does not exist'.format(path=path) module.exit_json(msg=msg) @@ -231,7 +244,7 @@ def main(): try: current_config = json.load(open(path, 'r')) except (IOError, ValueError): - # File either doesn't exist or it's invalid JSON + # File either doesn't exist or it is invalid JSON pass if current_config is not None and current_config == config: diff --git a/plugins/modules/monitoring/sensu/sensu_silence.py b/plugins/modules/sensu_silence.py similarity index 82% rename from plugins/modules/monitoring/sensu/sensu_silence.py rename to plugins/modules/sensu_silence.py index 80a5216711..f3270ab506 100644 --- a/plugins/modules/monitoring/sensu/sensu_silence.py +++ b/plugins/modules/sensu_silence.py @@ -1,21 +1,29 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Steven Bambling -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Steven Bambling +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sensu_silence author: Steven Bambling (@smbambling) short_description: Manage Sensu silence entries description: - - Create and clear (delete) a silence entries via the Sensu API - for subscriptions and checks. + - Create and clear (delete) a silence entries using the Sensu API for subscriptions and checks. +deprecated: + removed_in: 13.0.0 + why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: check: type: str @@ -28,30 +36,27 @@ options: expire: type: int description: - - If specified, the silence entry will be automatically cleared - after this number of seconds. + - If specified, the silence entry is automatically cleared after this number of seconds. expire_on_resolve: description: - - If specified as true, the silence entry will be automatically - cleared once the condition it is silencing is resolved. + - If specified as true, the silence entry is automatically cleared once the condition it is silencing is resolved. type: bool reason: type: str description: - - If specified, this free-form string is used to provide context or - rationale for the reason this silence entry was created. + - If specified, this free-form string is used to provide context or rationale for the reason this silence entry was + created. state: type: str description: - - Specifies to create or clear (delete) a silence entry via the Sensu API + - Specifies to create or clear (delete) a silence entry using the Sensu API. default: present choices: ['present', 'absent'] subscription: type: str description: - Specifies the subscription which the silence entry applies to. - - To create a silence entry for a client prepend C(client:) to client name. - Example - C(client:server1.example.dev) + - To create a silence entry for a client prepend C(client:) to client name. Example - C(client:server1.example.dev). required: true url: type: str @@ -59,9 +64,9 @@ options: - Specifies the URL of the Sensu monitoring host server. required: false default: http://127.0.01:4567 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Silence ALL checks for a given client - name: Silence server1.example.dev community.general.sensu_silence: @@ -90,10 +95,10 @@ EXAMPLES = ''' reason: "{{ item.value.reason }}" creator: "{{ ansible_user_id }}" with_dict: "{{ silence }}" -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import json @@ -141,7 +146,7 @@ def clear(module, url, check, subscription): # Test if silence exists before clearing (rc, out, changed) = query(module, url, check, subscription) - d = dict((i['subscription'], i['check']) for i in out) + d = {i['subscription']: i['check'] for i in out} subscription_exists = subscription in d if check and subscription_exists: exists = (check == d[subscription]) @@ -195,7 +200,7 @@ def create( expire_on_resolve, reason, subscription): (rc, out, changed) = query(module, url, check, subscription) for i in out: - if (i['subscription'] == subscription): + if i['subscription'] == subscription: if ( (check is None or check == i['check']) and ( @@ -258,14 +263,14 @@ def create( def main(): module = AnsibleModule( argument_spec=dict( - check=dict(required=False), - creator=dict(required=False), - expire=dict(type='int', required=False), - expire_on_resolve=dict(type='bool', required=False), - reason=dict(required=False), + check=dict(), + creator=dict(), + expire=dict(type='int'), + expire_on_resolve=dict(type='bool'), + reason=dict(), state=dict(default='present', choices=['present', 'absent']), subscription=dict(required=True), - url=dict(required=False, default='http://127.0.01:4567'), + url=dict(default='http://127.0.01:4567'), ), supports_check_mode=True ) diff --git a/plugins/modules/monitoring/sensu/sensu_subscription.py b/plugins/modules/sensu_subscription.py similarity index 66% rename from plugins/modules/monitoring/sensu/sensu_subscription.py rename to plugins/modules/sensu_subscription.py index 947c6e0de5..7cd7668e98 100644 --- a/plugins/modules/monitoring/sensu/sensu_subscription.py +++ b/plugins/modules/sensu_subscription.py @@ -1,58 +1,67 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2014, Anders Ingemann -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Anders Ingemann +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sensu_subscription short_description: Manage Sensu subscriptions description: - - Manage which I(sensu channels) a machine should subscribe to + - Manage which I(sensu channels) a machine should subscribe to. +deprecated: + removed_in: 13.0.0 + why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + alternative: Use Sensu Go and its accompanying collection C(sensu.sensu_go). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: type: str description: - - The name of the channel + - The name of the channel. required: true state: type: str description: - - Whether the machine should subscribe or unsubscribe from the channel - choices: [ 'present', 'absent' ] + - Whether the machine should subscribe or unsubscribe from the channel. + choices: ['present', 'absent'] required: false default: present path: type: str description: - - Path to the subscriptions json file + - Path to the subscriptions JSON file. required: false default: /etc/sensu/conf.d/subscriptions.json backup: description: - - Create a backup file (if yes), including the timestamp information so you - - can get the original file back if you somehow clobbered it incorrectly. + - Create a backup file (if yes), including the timestamp information so you can get the original file back if you somehow + clobbered it incorrectly. type: bool required: false - default: no -requirements: [ ] + default: false +requirements: [] author: Anders Ingemann (@andsens) -''' +""" -RETURN = ''' +RETURN = r""" reasons: - description: the reasons why the module changed or did not change something - returned: success - type: list - sample: ["channel subscription was absent and state is `present'"] -''' + description: The reasons why the module changed or did not change something. + returned: success + type: list + sample: ["channel subscription was absent and state is 'present'"] +""" -EXAMPLES = ''' +EXAMPLES = r""" # Subscribe to the nginx channel - name: Subscribe to nginx checks community.general.sensu_subscription: name=nginx @@ -60,7 +69,7 @@ EXAMPLES = ''' # Unsubscribe from the common checks channel - name: Unsubscribe from common checks community.general.sensu_subscription: name=common state=absent -''' +""" import json import traceback @@ -78,7 +87,7 @@ def sensu_subscription(module, path, name, state='present', backup=False): except IOError as e: if e.errno == 2: # File not found, non-fatal if state == 'absent': - reasons.append('file did not exist and state is `absent\'') + reasons.append("file did not exist and state is 'absent'") return changed, reasons config = {} else: @@ -89,32 +98,32 @@ def sensu_subscription(module, path, name, state='present', backup=False): if 'client' not in config: if state == 'absent': - reasons.append('`client\' did not exist and state is `absent\'') + reasons.append("'client' did not exist and state is 'absent'") return changed, reasons config['client'] = {} changed = True - reasons.append('`client\' did not exist') + reasons.append("'client' did not exist") if 'subscriptions' not in config['client']: if state == 'absent': - reasons.append('`client.subscriptions\' did not exist and state is `absent\'') + reasons.append("'client.subscriptions' did not exist and state is 'absent'") return changed, reasons config['client']['subscriptions'] = [] changed = True - reasons.append('`client.subscriptions\' did not exist') + reasons.append("'client.subscriptions' did not exist") if name not in config['client']['subscriptions']: if state == 'absent': - reasons.append('channel subscription was absent') + reasons.append("channel subscription was absent") return changed, reasons config['client']['subscriptions'].append(name) changed = True - reasons.append('channel subscription was absent and state is `present\'') + reasons.append("channel subscription was absent and state is 'present'") else: if state == 'absent': config['client']['subscriptions'].remove(name) changed = True - reasons.append('channel subscription was present and state is `absent\'') + reasons.append("channel subscription was present and state is 'absent'") if changed and not module.check_mode: if backup: @@ -132,7 +141,7 @@ def main(): arg_spec = {'name': {'type': 'str', 'required': True}, 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'}, 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, - 'backup': {'type': 'bool', 'default': 'no'}, + 'backup': {'type': 'bool', 'default': False}, } module = AnsibleModule(argument_spec=arg_spec, diff --git a/plugins/modules/system/seport.py b/plugins/modules/seport.py similarity index 83% rename from plugins/modules/system/seport.py rename to plugins/modules/seport.py index 4a734c2353..7e3a2690d2 100644 --- a/plugins/modules/system/seport.py +++ b/plugins/modules/seport.py @@ -1,18 +1,23 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2014, Dan Keder -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2014, Dan Keder +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: seport short_description: Manages SELinux network port type definitions description: - - Manages SELinux network port type definitions. + - Manages SELinux network port type definitions. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: ports: description: @@ -26,7 +31,7 @@ options: - Protocol for the specified port. type: str required: true - choices: [ tcp, udp ] + choices: [tcp, udp] setype: description: - SELinux type for the specified port. @@ -36,29 +41,35 @@ options: description: - Desired boolean value. type: str - choices: [ absent, present ] + choices: [absent, present] default: present reload: description: - Reload SELinux policy after commit. type: bool - default: yes + default: true ignore_selinux_state: description: - - Run independent of selinux runtime state + - Run independent of selinux runtime state. type: bool - default: no + default: false + local: + description: + - Work with local modifications only. + type: bool + default: false + version_added: 5.6.0 notes: - - The changes are persistent across reboots. - - Not tested on any debian based system. + - The changes are persistent across reboots. + - Not tested on any Debian based system. requirements: -- libselinux-python -- policycoreutils-python + - libselinux-python + - policycoreutils-python author: -- Dan Keder (@dankeder) -''' + - Dan Keder (@dankeder) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Allow Apache to listen on tcp port 8888 community.general.seport: ports: 8888 @@ -88,7 +99,15 @@ EXAMPLES = r''' proto: tcp setype: memcache_port_t state: present -''' + +- name: Remove tcp port 22 local modification if exists + community.general.seport: + ports: 22 + protocol: tcp + setype: ssh_port_t + state: absent + local: true +""" import traceback @@ -116,7 +135,7 @@ def get_runtime_status(ignore_selinux_state=False): return ignore_selinux_state or selinux.is_selinux_enabled() -def semanage_port_get_ports(seport, setype, proto): +def semanage_port_get_ports(seport, setype, proto, local): """ Get the list of ports that have the specified type definition. :param community.general.seport: Instance of seobject.portRecords @@ -130,7 +149,7 @@ def semanage_port_get_ports(seport, setype, proto): :rtype: list :return: List of ports that have the specified SELinux type. """ - records = seport.get_all_by_type() + records = seport.get_all_by_type(locallist=local) if (setype, proto) in records: return records[(setype, proto)] else: @@ -164,7 +183,7 @@ def semanage_port_get_type(seport, port, proto): return records.get(key) -def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''): +def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore='', local=False): """ Add SELinux port type definition to the policy. :type module: AnsibleModule @@ -195,7 +214,7 @@ def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', ses try: seport = seobject.portRecords(sestore) seport.set_reload(do_reload) - ports_by_type = semanage_port_get_ports(seport, setype, proto) + ports_by_type = semanage_port_get_ports(seport, setype, proto, local) for port in ports: if port in ports_by_type: continue @@ -215,7 +234,7 @@ def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', ses return change -def semanage_port_del(module, ports, proto, setype, do_reload, sestore=''): +def semanage_port_del(module, ports, proto, setype, do_reload, sestore='', local=False): """ Delete SELinux port type definition from the policy. :type module: AnsibleModule @@ -243,7 +262,7 @@ def semanage_port_del(module, ports, proto, setype, do_reload, sestore=''): try: seport = seobject.portRecords(sestore) seport.set_reload(do_reload) - ports_by_type = semanage_port_get_ports(seport, setype, proto) + ports_by_type = semanage_port_get_ports(seport, setype, proto, local) for port in ports: if port in ports_by_type: change = True @@ -265,6 +284,7 @@ def main(): setype=dict(type='str', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), reload=dict(type='bool', default=True), + local=dict(type='bool', default=False) ), supports_check_mode=True, ) @@ -285,6 +305,7 @@ def main(): setype = module.params['setype'] state = module.params['state'] do_reload = module.params['reload'] + local = module.params['local'] result = { 'ports': ports, @@ -294,9 +315,9 @@ def main(): } if state == 'present': - result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload) + result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload, local=local) elif state == 'absent': - result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload) + result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload, local=local) else: module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) diff --git a/plugins/modules/cloud/misc/serverless.py b/plugins/modules/serverless.py similarity index 82% rename from plugins/modules/cloud/misc/serverless.py rename to plugins/modules/serverless.py index fce6b77e16..0ea2eb3e1f 100644 --- a/plugins/modules/cloud/misc/serverless.py +++ b/plugins/modules/serverless.py @@ -1,28 +1,33 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Ryan Scott Brown -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Ryan Scott Brown +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: serverless short_description: Manages a Serverless Framework project description: - - Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks. + - Provides support for managing Serverless Framework (U(https://serverless.com/)) project deployments and stacks. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: state: description: - Goal state of given stage/project. type: str - choices: [ absent, present ] + choices: [absent, present] default: present serverless_bin_path: description: - - The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless + - The path of a serverless framework binary relative to the O(service_path), for example V(node_module/.bin/serverless). type: path service_path: description: @@ -34,39 +39,40 @@ options: - The name of the serverless framework project stage to deploy to. - This uses the serverless framework default "dev". type: str + default: '' region: description: - AWS region to deploy the service to. - - This parameter defaults to C(us-east-1). + - This parameter defaults to V(us-east-1). type: str + default: '' deploy: description: - Whether or not to deploy artifacts after building them. - - When this option is C(false) all the functions will be built, but no stack update will be run to send them out. + - When this option is V(false) all the functions are built, but no stack update is run to send them out. - This is mostly useful for generating artifacts to be stored/deployed elsewhere. type: bool - default: yes + default: true force: description: - Whether or not to force full deployment, equivalent to serverless C(--force) option. type: bool - default: no + default: false verbose: description: - Shows all stack events during deployment, and display any Stack Output. type: bool - default: no + default: false notes: - - Currently, the C(serverless) command must be in the path of the node executing the task. - In the future this may be a flag. + - Currently, the C(serverless) command must be in the path of the node executing the task. In the future this may be a flag. requirements: -- serverless -- yaml + - serverless + - PyYAML author: -- Ryan Scott Brown (@ryansb) -''' + - Ryan Scott Brown (@ryansb) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Basic deploy of a service community.general.serverless: service_path: '{{ project_dir }}' @@ -93,9 +99,9 @@ EXAMPLES = r''' region: us-east-1 service_path: '{{ project_dir }}' serverless_bin_path: node_modules/.bin/serverless -''' +""" -RETURN = r''' +RETURN = r""" service_name: type: str description: The service name specified in the serverless.yml that was just deployed. @@ -107,10 +113,10 @@ state: returned: always command: type: str - description: Full `serverless` command run by this module, in case you want to re-run the command outside the module. + description: Full C(serverless) command run by this module, in case you want to re-run the command outside the module. returned: always sample: serverless deploy --stage production -''' +""" import os diff --git a/plugins/modules/shutdown.py b/plugins/modules/shutdown.py new file mode 100644 index 0000000000..497706d25e --- /dev/null +++ b/plugins/modules/shutdown.py @@ -0,0 +1,82 @@ +#!/usr/bin/python +# Copyright (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: shutdown +short_description: Shut down a machine +notes: + - E(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use O(search_paths) to specify locations + to search if the default paths do not work. + - The O(msg) and O(delay) options are not supported when a shutdown command is not found in O(search_paths), instead the + module attempts to shutdown the system by calling C(systemctl shutdown). +description: + - Shut downs a machine. +version_added: "1.1.0" +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.flow +attributes: + check_mode: + support: full + diff_mode: + support: none + action: + support: full + async: + support: full +options: + delay: + description: + - Seconds to wait before shutdown. Passed as a parameter to the shutdown command. + - On Linux, macOS and OpenBSD, this is converted to minutes and rounded down. If less than 60, it is set to 0. + - On Solaris and FreeBSD, this represents seconds. + type: int + default: 0 + msg: + description: + - Message to display to users before shutdown. + type: str + default: Shut down initiated by Ansible + search_paths: + description: + - Paths to search on the remote machine for the C(shutdown) command. + - I(Only) these paths are searched for the C(shutdown) command. E(PATH) is ignored in the remote node when searching + for the C(shutdown) command. + type: list + elements: path + default: ['/sbin', '/usr/sbin', '/usr/local/sbin'] + +seealso: + - module: ansible.builtin.reboot +author: + - Matt Davis (@nitzmahone) + - Sam Doran (@samdoran) + - Amin Vakil (@aminvakil) +""" + +EXAMPLES = r""" +- name: Unconditionally shut down the machine with all defaults + community.general.shutdown: + +- name: Delay shutting down the remote node + community.general.shutdown: + delay: 60 + +- name: Shut down a machine with shutdown command in unusual place + community.general.shutdown: + search_paths: + - '/lib/molly-guard' +""" + +RETURN = r""" +shutdown: + description: V(true) if the machine has been shut down. + returned: always + type: bool + sample: true +""" diff --git a/plugins/modules/simpleinit_msb.py b/plugins/modules/simpleinit_msb.py new file mode 100644 index 0000000000..90e7caa308 --- /dev/null +++ b/plugins/modules/simpleinit_msb.py @@ -0,0 +1,312 @@ +#!/usr/bin/python + +# Copyright (c) 2016-2023, Vlad Glagolev +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: simpleinit_msb +short_description: Manage services on Source Mage GNU/Linux +version_added: 7.5.0 +description: + - Controls services on remote hosts using C(simpleinit-msb). +author: "Vlad Glagolev (@vaygr)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + type: str + description: + - Name of the service. + required: true + aliases: ['service'] + state: + type: str + required: false + choices: [running, started, stopped, restarted, reloaded] + description: + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. V(restarted) always bounces + the service. V(reloaded) always reloads. + - At least one of O(state) and O(enabled) are required. + - Note that V(reloaded) starts the service if it is not already started, even if your chosen init system would not normally. + enabled: + type: bool + required: false + description: + - Whether the service should start on boot. + - At least one of O(state) and O(enabled) are required. +""" + +EXAMPLES = r""" +- name: Example action to start service httpd, if not running + community.general.simpleinit_msb: + name: httpd + state: started + +- name: Example action to stop service httpd, if running + community.general.simpleinit_msb: + name: httpd + state: stopped + +- name: Example action to restart service httpd, in all cases + community.general.simpleinit_msb: + name: httpd + state: restarted + +- name: Example action to reload service httpd, in all cases + community.general.simpleinit_msb: + name: httpd + state: reloaded + +- name: Example action to enable service httpd, and not touch the running state + community.general.simpleinit_msb: + name: httpd + enabled: true +""" + +import os +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.service import daemonize + + +class SimpleinitMSB(object): + """ + Main simpleinit-msb service manipulation class + """ + + def __init__(self, module): + self.module = module + self.name = module.params['name'] + self.state = module.params['state'] + self.enable = module.params['enabled'] + self.changed = False + self.running = None + self.action = None + self.telinit_cmd = None + self.svc_change = False + + def execute_command(self, cmd, daemon=False): + if not daemon: + return self.module.run_command(cmd) + else: + return daemonize(self.module, cmd) + + def check_service_changed(self): + if self.state and self.running is None: + self.module.fail_json(msg="failed determining service state, possible typo of service name?") + # Find out if state has changed + if not self.running and self.state in ["started", "running", "reloaded"]: + self.svc_change = True + elif self.running and self.state in ["stopped", "reloaded"]: + self.svc_change = True + elif self.state == "restarted": + self.svc_change = True + if self.module.check_mode and self.svc_change: + self.module.exit_json(changed=True, msg='service state changed') + + def modify_service_state(self): + # Only do something if state will change + if self.svc_change: + # Control service + if self.state in ['started', 'running']: + self.action = "start" + elif not self.running and self.state == 'reloaded': + self.action = "start" + elif self.state == 'stopped': + self.action = "stop" + elif self.state == 'reloaded': + self.action = "reload" + elif self.state == 'restarted': + self.action = "restart" + + if self.module.check_mode: + self.module.exit_json(changed=True, msg='changing service state') + + return self.service_control() + else: + # If nothing needs to change just say all is well + rc = 0 + err = '' + out = '' + return rc, out, err + + def get_service_tools(self): + paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin'] + binaries = ['telinit'] + location = dict() + + for binary in binaries: + location[binary] = self.module.get_bin_path(binary, opt_dirs=paths) + + if location.get('telinit', False) and os.path.exists("/etc/init.d/smgl_init"): + self.telinit_cmd = location['telinit'] + + if self.telinit_cmd is None: + self.module.fail_json(msg='cannot find telinit script for simpleinit-msb, aborting...') + + def get_service_status(self): + self.action = "status" + rc, status_stdout, status_stderr = self.service_control() + + if self.running is None and status_stdout.count('\n') <= 1: + cleanout = status_stdout.lower().replace(self.name.lower(), '') + + if "is not running" in cleanout: + self.running = False + elif "is running" in cleanout: + self.running = True + + return self.running + + def service_enable(self): + # Check if the service is already enabled/disabled + if not self.enable ^ self.service_enabled(): + return + + action = "boot" + ("enable" if self.enable else "disable") + + (rc, out, err) = self.execute_command("%s %s %s" % (self.telinit_cmd, action, self.name)) + + self.changed = True + + for line in err.splitlines(): + if self.enable and line.find('already enabled') != -1: + self.changed = False + break + if not self.enable and line.find('already disabled') != -1: + self.changed = False + break + + if not self.changed: + return + + return (rc, out, err) + + def service_enabled(self): + self.service_exists() + + (rc, out, err) = self.execute_command("%s %sd" % (self.telinit_cmd, self.enable)) + + service_enabled = False if self.enable else True + + rex = re.compile(r'^%s$' % self.name) + + for line in out.splitlines(): + if rex.match(line): + service_enabled = True if self.enable else False + break + + return service_enabled + + def service_exists(self): + (rc, out, err) = self.execute_command("%s list" % self.telinit_cmd) + + service_exists = False + + rex = re.compile(r'^\w+\s+%s$' % self.name) + + for line in out.splitlines(): + if rex.match(line): + service_exists = True + break + + if not service_exists: + self.module.fail_json(msg='telinit could not find the requested service: %s' % self.name) + + def service_control(self): + self.service_exists() + + svc_cmd = "%s run %s" % (self.telinit_cmd, self.name) + + rc_state, stdout, stderr = self.execute_command("%s %s" % (svc_cmd, self.action), daemon=True) + + return (rc_state, stdout, stderr) + + +def build_module(): + return AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['service']), + state=dict(choices=['running', 'started', 'stopped', 'restarted', 'reloaded']), + enabled=dict(type='bool'), + ), + supports_check_mode=True, + required_one_of=[['state', 'enabled']], + ) + + +def main(): + module = build_module() + + service = SimpleinitMSB(module) + + rc = 0 + out = '' + err = '' + result = {} + result['name'] = service.name + + # Find service management tools + service.get_service_tools() + + # Enable/disable service startup at boot if requested + if service.module.params['enabled'] is not None: + service.service_enable() + result['enabled'] = service.enable + + if module.params['state'] is None: + # Not changing the running state, so bail out now. + result['changed'] = service.changed + module.exit_json(**result) + + result['state'] = service.state + + service.get_service_status() + + # Calculate if request will change service state + service.check_service_changed() + + # Modify service state if necessary + (rc, out, err) = service.modify_service_state() + + if rc != 0: + if err: + module.fail_json(msg=err) + else: + module.fail_json(msg=out) + + result['changed'] = service.changed | service.svc_change + if service.module.params['enabled'] is not None: + result['enabled'] = service.module.params['enabled'] + + if not service.module.params['state']: + status = service.get_service_status() + if status is None: + result['state'] = 'absent' + elif status is False: + result['state'] = 'started' + else: + result['state'] = 'stopped' + else: + # as we may have just bounced the service the service command may not + # report accurate state at this moment so just show what we ran + if service.module.params['state'] in ['started', 'restarted', 'running', 'reloaded']: + result['state'] = 'started' + else: + result['state'] = 'stopped' + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/cloud/softlayer/sl_vm.py b/plugins/modules/sl_vm.py similarity index 73% rename from plugins/modules/cloud/softlayer/sl_vm.py rename to plugins/modules/sl_vm.py index 825d82e173..9ae0def5c4 100644 --- a/plugins/modules/cloud/softlayer/sl_vm.py +++ b/plugins/modules/sl_vm.py @@ -1,23 +1,28 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sl_vm -short_description: create or cancel a virtual instance in SoftLayer +short_description: Create or cancel a virtual instance in SoftLayer description: - Creates or cancels SoftLayer instances. - When created, optionally waits for it to be 'running'. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: instance_id: description: - - Instance Id of the virtual instance to perform action option. + - Instance ID of the virtual instance to perform action option. type: str hostname: description: @@ -79,22 +84,22 @@ options: description: - Flag to determine if the instance should be hourly billed. type: bool - default: 'yes' + default: true private: description: - Flag to determine if the instance should be private only. type: bool - default: 'no' + default: false dedicated: description: - Flag to determine if the instance should be deployed in dedicated space. type: bool - default: 'no' + default: false local_disk: description: - Flag to determine if local disk should be used for the new instance. type: bool - default: 'yes' + default: true cpus: description: - Count of cpus to be assigned to new virtual instance. @@ -113,7 +118,7 @@ options: disks: description: - List of disk sizes to be assigned to new virtual instance. - default: [ 25 ] + default: [25] type: list elements: int os_code: @@ -131,17 +136,18 @@ options: type: int public_vlan: description: - - VLAN by its Id to be assigned to the public NIC. + - VLAN by its ID to be assigned to the public NIC. type: str private_vlan: description: - - VLAN by its Id to be assigned to the private NIC. + - VLAN by its ID to be assigned to the private NIC. type: str ssh_keys: description: - - List of ssh keys by their Id to be assigned to a virtual instance. + - List of ssh keys by their ID to be assigned to a virtual instance. type: list elements: str + default: [] post_uri: description: - URL of a post provisioning script to be loaded and executed on virtual instance. @@ -149,118 +155,123 @@ options: state: description: - Create, or cancel a virtual instance. - - Specify C(present) for create, C(absent) to cancel. - choices: [ absent, present ] + - Specify V(present) for create, V(absent) to cancel. + choices: [absent, present] default: present type: str wait: description: - Flag used to wait for active status before returning. type: bool - default: 'yes' + default: true wait_time: description: - Time in seconds before wait returns. default: 600 type: int requirements: - - python >= 2.6 - - softlayer >= 4.1.1 + - softlayer >= 4.1.1 +notes: + - The C(softlayer-python) library, at version 6.2.6 (from Jan 2025), only supports Python version 3.8, 3.9 and 3.10. author: -- Matt Colton (@mcltn) -''' + - Matt Colton (@mcltn) +seealso: + - name: SoftLayer API Python Client + description: The SoftLayer API Python Client is required for this module. + link: https://github.com/SoftLayer/softlayer-python +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Build instance hosts: localhost - gather_facts: no + gather_facts: false tasks: - - name: Build instance request - community.general.sl_vm: - hostname: instance-1 - domain: anydomain.com - datacenter: dal09 - tags: ansible-module-test - hourly: yes - private: no - dedicated: no - local_disk: yes - cpus: 1 - memory: 1024 - disks: [25] - os_code: UBUNTU_LATEST - wait: no + - name: Build instance request + community.general.sl_vm: + hostname: instance-1 + domain: anydomain.com + datacenter: dal09 + tags: ansible-module-test + hourly: true + private: false + dedicated: false + local_disk: true + cpus: 1 + memory: 1024 + disks: [25] + os_code: UBUNTU_LATEST + wait: false - name: Build additional instances hosts: localhost - gather_facts: no + gather_facts: false tasks: - - name: Build instances request - community.general.sl_vm: - hostname: "{{ item.hostname }}" - domain: "{{ item.domain }}" - datacenter: "{{ item.datacenter }}" - tags: "{{ item.tags }}" - hourly: "{{ item.hourly }}" - private: "{{ item.private }}" - dedicated: "{{ item.dedicated }}" - local_disk: "{{ item.local_disk }}" - cpus: "{{ item.cpus }}" - memory: "{{ item.memory }}" - disks: "{{ item.disks }}" - os_code: "{{ item.os_code }}" - ssh_keys: "{{ item.ssh_keys }}" - wait: "{{ item.wait }}" - with_items: - - hostname: instance-2 - domain: anydomain.com - datacenter: dal09 - tags: - - ansible-module-test - - ansible-module-test-replicas - hourly: yes - private: no - dedicated: no - local_disk: yes - cpus: 1 - memory: 1024 - disks: - - 25 - - 100 - os_code: UBUNTU_LATEST - ssh_keys: [] - wait: True - - hostname: instance-3 - domain: anydomain.com - datacenter: dal09 - tags: - - ansible-module-test - - ansible-module-test-replicas - hourly: yes - private: no - dedicated: no - local_disk: yes - cpus: 1 - memory: 1024 - disks: - - 25 - - 100 - os_code: UBUNTU_LATEST - ssh_keys: [] - wait: yes + - name: Build instances request + community.general.sl_vm: + hostname: "{{ item.hostname }}" + domain: "{{ item.domain }}" + datacenter: "{{ item.datacenter }}" + tags: "{{ item.tags }}" + hourly: "{{ item.hourly }}" + private: "{{ item.private }}" + dedicated: "{{ item.dedicated }}" + local_disk: "{{ item.local_disk }}" + cpus: "{{ item.cpus }}" + memory: "{{ item.memory }}" + disks: "{{ item.disks }}" + os_code: "{{ item.os_code }}" + ssh_keys: "{{ item.ssh_keys }}" + wait: "{{ item.wait }}" + with_items: + - hostname: instance-2 + domain: anydomain.com + datacenter: dal09 + tags: + - ansible-module-test + - ansible-module-test-replicas + hourly: true + private: false + dedicated: false + local_disk: true + cpus: 1 + memory: 1024 + disks: + - 25 + - 100 + os_code: UBUNTU_LATEST + ssh_keys: [] + wait: true + - hostname: instance-3 + domain: anydomain.com + datacenter: dal09 + tags: + - ansible-module-test + - ansible-module-test-replicas + hourly: true + private: false + dedicated: false + local_disk: true + cpus: 1 + memory: 1024 + disks: + - 25 + - 100 + os_code: UBUNTU_LATEST + ssh_keys: [] + wait: true - name: Cancel instances hosts: localhost - gather_facts: no + gather_facts: false tasks: - - name: Cancel by tag - community.general.sl_vm: - state: absent - tags: ansible-module-test -''' + - name: Cancel by tag + community.general.sl_vm: + state: absent + tags: ansible-module-test +""" # TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed. -RETURN = '''# ''' +RETURN = """#""" import json import time @@ -275,7 +286,6 @@ except ImportError: HAS_SL = False from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import string_types # TODO: get this info from API @@ -304,9 +314,9 @@ def create_virtual_instance(module): return False, None # Check if OS or Image Template is provided (Can't be both, defaults to OS) - if (module.params.get('os_code') is not None and module.params.get('os_code') != ''): + if module.params.get('os_code') is not None and module.params.get('os_code') != '': module.params['image_id'] = '' - elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''): + elif module.params.get('image_id') is not None and module.params.get('image_id') != '': module.params['os_code'] = '' module.params['disks'] = [] # Blank out disks since it will use the template else: @@ -363,7 +373,7 @@ def cancel_instance(module): canceled = True if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')): tags = module.params.get('tags') - if isinstance(tags, string_types): + if isinstance(tags, str): tags = [module.params.get('tags')] instances = vsManager.list_instances(tags=tags, hostname=module.params.get('hostname'), domain=module.params.get('domain')) for instance in instances: diff --git a/plugins/modules/notification/slack.py b/plugins/modules/slack.py similarity index 64% rename from plugins/modules/notification/slack.py rename to plugins/modules/slack.py index bdc839f9a8..07cd1885f6 100644 --- a/plugins/modules/notification/slack.py +++ b/plugins/modules/slack.py @@ -1,74 +1,79 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2020, Lee Goolsbee -# (c) 2020, Michal Middleton -# (c) 2017, Steve Pletcher -# (c) 2016, René Moser -# (c) 2015, Stefan Berggren -# (c) 2014, Ramon de la Fuente +# Copyright (c) 2020, Lee Goolsbee +# Copyright (c) 2020, Michal Middleton +# Copyright (c) 2017, Steve Pletcher +# Copyright (c) 2016, René Moser +# Copyright (c) 2015, Stefan Berggren +# Copyright (c) 2014, Ramon de la Fuente # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = """ +DOCUMENTATION = r""" module: slack short_description: Send Slack notifications description: - - The C(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration + - The M(community.general.slack) module sends notifications to U(http://slack.com) using the Incoming WebHook integration. author: "Ramon de la Fuente (@ramondelafuente)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: domain: type: str description: - - Slack (sub)domain for your environment without protocol. (i.e. - C(example.slack.com)) In 1.8 and beyond, this is deprecated and may - be ignored. See token documentation for information. + - "When using new format 'Webhook token' and WebAPI tokens: this can be V(slack.com) or V(slack-gov.com) and is ignored + otherwise." + - "When using old format 'Webhook token': Slack (sub)domain for your environment without protocol. (For example V(example.slack.com).) + in Ansible 1.8 and beyond, this is deprecated and may be ignored. See token documentation for information." token: type: str description: - - Slack integration token. This authenticates you to the slack service. - Make sure to use the correct type of token, depending on what method you use. - - "Webhook token: - Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In - 1.8 and above, ansible adapts to the new slack API where tokens look - like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens - are in the new format then slack will ignore any value of domain. If - the token is in the old format the domain is required. Ansible has no - control of when slack will get rid of the old API. When slack does - that the old format will stop working. ** Please keep in mind the tokens - are not the API tokens but are the webhook tokens. In slack these are - found in the webhook URL which are obtained under the apps and integrations. - The incoming webhooks can be added in that area. In some cases this may - be locked by your Slack admin and you must request access. It is there - that the incoming webhooks can be added. The key is on the end of the - URL given to you in that section." - - "WebAPI token: - Slack WebAPI requires a personal, bot or work application token. These tokens start with C(xoxp-), C(xoxb-) - or C(xoxa-), eg. C(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id. - See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information." + - Slack integration token. This authenticates you to the Slack service. Make sure to use the correct type of token, + depending on what method you use. + - 'Webhook token: Prior to Ansible 1.8, a token looked like V(3Ffe373sfhRE6y42Fg3rvf4GlK). In Ansible 1.8 and above, + Ansible adapts to the new Slack API where tokens look like V(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens + are in the new format then Slack ignores any value of domain except V(slack.com) or V(slack-gov.com). If the token + is in the old format the domain is required. Ansible has no control of when Slack is going to remove the old API. + When Slack does that the old format is going to cease working. B(Please keep in mind the tokens are not the API tokens + but are the webhook tokens.) In Slack these are found in the webhook URL which are obtained under the apps and integrations. + The incoming webhooks can be added in that area. In some cases this may be locked by your Slack admin and you must + request access. It is there that the incoming webhooks can be added. The key is on the end of the URL given to you + in that section.' + - "WebAPI token: Slack WebAPI requires a personal, bot or work application token. These tokens start with V(xoxp-), + V(xoxb-) or V(xoxa-), for example V(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive + thread_id. See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information." required: true msg: type: str description: - - Message to send. Note that the module does not handle escaping characters. - Plain-text angle brackets and ampersands should be converted to HTML entities (e.g. & to &) before sending. - See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more. + - Message to send. Note that the module does not handle escaping characters. Plain-text angle brackets and ampersands + should be converted to HTML entities (for example C(&) to C(&)) before sending. See Slack's documentation + (U(https://api.slack.com/docs/message-formatting)) + for more. channel: type: str description: - - Channel to send the message to. If absent, the message goes to the channel selected for the I(token). + - Channel to send the message to. If absent, the message goes to the channel selected for the O(token). thread_id: description: - - Optional. Timestamp of parent message to thread this message. https://api.slack.com/docs/message-threading + - Optional. Timestamp of parent message to thread this message, see U(https://api.slack.com/docs/message-threading). type: str message_id: description: - Optional. Message ID to edit, instead of posting a new message. - Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)). + - If supplied O(channel) must be in form of C(C0xxxxxxx). use C({{ slack_response.channel }}) to get RV(ignore:channel) + from previous task run. + - The token needs history scope to get information on the message to edit (C(channels:history,groups:history,mpim:history,im:history)). + - Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)). type: str version_added: 1.2.0 username: @@ -79,17 +84,17 @@ options: icon_url: type: str description: - - Url for the message sender's icon (default C(https://www.ansible.com/favicon.ico)) - default: https://www.ansible.com/favicon.ico + - URL for the message sender's icon. + default: https://docs.ansible.com/favicon.ico icon_emoji: type: str description: - Emoji for the message sender. See Slack documentation for options. - (if I(icon_emoji) is set, I(icon_url) will not be used) + - If O(icon_emoji) is set, O(icon_url) is not used. link_names: type: int description: - - Automatically create links for channels and usernames in I(msg). + - Automatically create links for channels and usernames in O(msg). default: 1 choices: - 1 @@ -97,22 +102,23 @@ options: parse: type: str description: - - Setting for the message parser at Slack + - Setting for the message parser at Slack. choices: - 'full' - 'none' validate_certs: description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool - default: 'yes' + default: true color: type: str description: - - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message. - - Allowed values for color can be one of 'normal', 'good', 'warning', 'danger', any valid 3 digit or 6 digit hex color value. - - Specifying value in hex is supported since Ansible 2.8. + - Allow text to use default colors - use the default of V(normal) to not send a custom color bar at the start of the + message. + - Allowed values for color can be one of V(normal), V(good), V(warning), V(danger), any valid 3 digit or 6 digit hex + color value. default: 'normal' attachments: type: list @@ -127,9 +133,26 @@ options: type: list elements: dict version_added: 1.0.0 + prepend_hash: + type: str + description: + - Setting for automatically prepending a V(#) symbol on the passed in O(channel). + - The V(auto) method prepends a V(#) unless O(channel) starts with one of V(#), V(@), V(C0), V(GF), V(G0), V(CP). These + prefixes only cover a small set of the prefixes that should not have a V(#) prepended. Since an exact condition which + O(channel) values must not have the V(#) prefix is not known, the value V(auto) for this option is deprecated in the + future. It is best to explicitly set O(prepend_hash=always) or O(prepend_hash=never) to obtain the needed behavior. + - Before community.general 12.0.0, the default was V(auto). It has been deprecated since community.general 10.2.0. + - Note that V(auto) will be deprecated in a future version. + # TODO: Deprecate 'auto' in community.general 13.0.0 + default: never + choices: + - 'always' + - 'never' + - 'auto' + version_added: 6.1.0 """ -EXAMPLES = """ +EXAMPLES = r""" - name: Send notification message via Slack community.general.slack: token: thetoken/generatedby/slack @@ -148,7 +171,8 @@ EXAMPLES = """ parse: 'none' delegate_to: localhost -- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack +- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured + in Slack community.general.slack: token: thetoken/generatedby/slack msg: '{{ inventory_hostname }} is alive!' @@ -174,10 +198,10 @@ EXAMPLES = """ fields: - title: System A value: "load average: 0,74, 0,66, 0,63" - short: True + short: true - title: System B value: 'load average: 5,16, 4,64, 2,43' - short: True + short: true - name: Use the blocks API community.general.slack: @@ -191,14 +215,14 @@ EXAMPLES = """ Display my system load on host A and B - type: context elements: - - type: mrkdwn - text: |- - *System A* - load average: 0,74, 0,66, 0,63 - - type: mrkdwn - text: |- - *System B* - load average: 5,16, 4,64, 2,43 + - type: mrkdwn + text: |- + *System A* + load average: 0,74, 0,66, 0,63 + - type: mrkdwn + text: |- + *System B* + load average: 5,16, 4,64, 2,43 - name: Send a message with a link using Slack markup community.general.slack: @@ -233,6 +257,8 @@ EXAMPLES = """ - name: Edit message community.general.slack: token: thetoken/generatedby/slack + # The 'channel' option does not accept the channel name. It must use the 'channel_id', + # which can be retrieved for example from 'slack_response' from the previous task. channel: "{{ slack_response.channel }}" msg: Deployment complete! message_id: "{{ slack_response.ts }}" @@ -240,14 +266,14 @@ EXAMPLES = """ import re from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url +from urllib.parse import urlencode OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' -SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s' -SLACK_POSTMESSAGE_WEBAPI = 'https://slack.com/api/chat.postMessage' -SLACK_UPDATEMESSAGE_WEBAPI = 'https://slack.com/api/chat.update' -SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://slack.com/api/conversations.history' +SLACK_INCOMING_WEBHOOK = 'https://hooks.%s/services/%s' +SLACK_POSTMESSAGE_WEBAPI = 'https://%s/api/chat.postMessage' +SLACK_UPDATEMESSAGE_WEBAPI = 'https://%s/api/chat.update' +SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://%s/api/conversations.history' # Escaping quotes and apostrophes to avoid ending string prematurely in ansible call. # We do not escape other characters used as Slack metacharacters (e.g. &, <, >). @@ -285,7 +311,7 @@ def recursive_escape_quotes(obj, keys): def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, - parse, color, attachments, blocks, message_id): + parse, color, attachments, blocks, message_id, prepend_hash): payload = {} if color == "normal" and text is not None: payload = dict(text=escape_quotes(text)) @@ -293,10 +319,15 @@ def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_e # With a custom color we have to set the message as attachment, and explicitly turn markdown parsing on for it. payload = dict(attachments=[dict(text=escape_quotes(text), color=color, mrkdwn_in=["text"])]) if channel is not None: - if channel.startswith(('#', '@', 'C0')): - payload['channel'] = channel - else: + if prepend_hash == 'auto': + if channel.startswith(('#', '@', 'C0', 'GF', 'G0', 'CP')): + payload['channel'] = channel + else: + payload['channel'] = '#' + channel + elif prepend_hash == 'always': payload['channel'] = '#' + channel + elif prepend_hash == 'never': + payload['channel'] = channel if thread_id is not None: payload['thread_ts'] = thread_id if username is not None: @@ -344,7 +375,11 @@ def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_e return payload -def get_slack_message(module, token, channel, ts): +def validate_slack_domain(domain): + return (domain if domain in ('slack.com', 'slack-gov.com') else 'slack.com') + + +def get_slack_message(module, domain, token, channel, ts): headers = { 'Content-Type': 'application/json; charset=UTF-8', 'Accept': 'application/json', @@ -356,11 +391,14 @@ def get_slack_message(module, token, channel, ts): 'limit': 1, 'inclusive': 'true', }) - url = SLACK_CONVERSATIONS_HISTORY_WEBAPI + '?' + qs + domain = validate_slack_domain(domain) + url = (SLACK_CONVERSATIONS_HISTORY_WEBAPI % domain) + '?' + qs response, info = fetch_url(module=module, url=url, headers=headers, method='GET') if info['status'] != 200: module.fail_json(msg="failed to get slack message") data = module.from_json(response.read()) + if data.get('ok') is False: + module.fail_json(msg="failed to get slack message: %s" % data) if len(data['messages']) < 1: module.fail_json(msg="no messages matching ts: %s" % ts) if len(data['messages']) > 1: @@ -372,9 +410,11 @@ def do_notify_slack(module, domain, token, payload): use_webapi = False if token.count('/') >= 2: # New style webhook token - slack_uri = SLACK_INCOMING_WEBHOOK % token + domain = validate_slack_domain(domain) + slack_uri = SLACK_INCOMING_WEBHOOK % (domain, token) elif re.match(r'^xox[abp]-\S+$', token): - slack_uri = SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI + domain = validate_slack_domain(domain) + slack_uri = (SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI) % domain use_webapi = True else: if not domain: @@ -396,7 +436,7 @@ def do_notify_slack(module, domain, token, payload): if use_webapi: obscured_incoming_webhook = slack_uri else: - obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % '[obscured]' + obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, '[obscured]') module.fail_json(msg=" failed to send %s to %s: %s" % (data, obscured_incoming_webhook, info['msg'])) # each API requires different handling @@ -415,7 +455,7 @@ def main(): channel=dict(type='str'), thread_id=dict(type='str'), username=dict(type='str', default='Ansible'), - icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'), + icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'), icon_emoji=dict(type='str'), link_names=dict(type='int', default=1, choices=[0, 1]), parse=dict(type='str', choices=['none', 'full']), @@ -424,6 +464,7 @@ def main(): attachments=dict(type='list', elements='dict'), blocks=dict(type='list', elements='dict'), message_id=dict(type='str'), + prepend_hash=dict(type='str', choices=['always', 'never', 'auto'], default='never'), ), supports_check_mode=True, ) @@ -442,6 +483,7 @@ def main(): attachments = module.params['attachments'] blocks = module.params['blocks'] message_id = module.params['message_id'] + prepend_hash = module.params['prepend_hash'] color_choices = ['normal', 'good', 'warning', 'danger'] if color not in color_choices and not is_valid_hex_color(color): @@ -453,7 +495,7 @@ def main(): # if updating an existing message, we can check if there's anything to update if message_id is not None: changed = False - msg = get_slack_message(module, token, channel, message_id) + msg = get_slack_message(module, domain, token, channel, message_id) for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'): if msg.get(key) != module.params.get(key): changed = True @@ -466,7 +508,7 @@ def main(): module.exit_json(changed=changed) payload = build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, - parse, color, attachments, blocks, message_id) + parse, color, attachments, blocks, message_id, prepend_hash) slack_response = do_notify_slack(module, domain, token, payload) if 'ok' in slack_response: diff --git a/plugins/modules/packaging/os/slackpkg.py b/plugins/modules/slackpkg.py similarity index 71% rename from plugins/modules/packaging/os/slackpkg.py rename to plugins/modules/slackpkg.py index e98f9a338d..a32c0048f7 100644 --- a/plugins/modules/packaging/os/slackpkg.py +++ b/plugins/modules/slackpkg.py @@ -1,55 +1,59 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2014, Kim Nørgaard +# Copyright (c) 2014, Kim Nørgaard # Written by Kim Nørgaard # Based on pkgng module written by bleader # that was based on pkgin module written by Shaun Zinck # that was based on pacman module written by Afterburn # that was based on apt module written by Matthew Williams # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: slackpkg short_description: Package manager for Slackware >= 12.2 description: - - Manage binary packages for Slackware using 'slackpkg' which - is available in versions after 12.2. + - Manage binary packages for Slackware using C(slackpkg) which is available in versions after 12.2. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - name of package to install/remove - required: true - type: list - elements: str - aliases: [pkg] + name: + description: + - Name of package to install/remove. + required: true + type: list + elements: str + aliases: [pkg] - state: - description: - - state of the package, you can use "installed" as an alias for C(present) and removed as one for C(absent). - choices: [ 'present', 'absent', 'latest', 'installed', 'removed' ] - required: false - default: present - type: str + state: + description: + - State of the package, you can use V(installed) as an alias for V(present) and V(removed) as one for V(absent). + choices: ['present', 'absent', 'latest', 'installed', 'removed'] + required: false + default: present + type: str - update_cache: - description: - - update the package database first - required: false - default: false - type: bool + update_cache: + description: + - Update the package database first. + required: false + default: false + type: bool author: Kim Nørgaard (@KimNorgaard) -requirements: [ "Slackware >= 12.2" ] -''' +requirements: ["Slackware >= 12.2"] +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install package foo community.general.slackpkg: name: foo @@ -64,7 +68,7 @@ EXAMPLES = ''' community.general.slackpkg: name: foo state: latest -''' +""" from ansible.module_utils.basic import AnsibleModule @@ -98,9 +102,8 @@ def remove_packages(module, slackpkg_path, packages): continue if not module.check_mode: - rc, out, err = module.run_command("%s -default_answer=y -batch=on \ - remove %s" % (slackpkg_path, - package)) + rc, out, err = module.run_command( + [slackpkg_path, "-default_answer=y", "-batch=on", "remove", package]) if not module.check_mode and query_package(module, slackpkg_path, package): @@ -124,9 +127,8 @@ def install_packages(module, slackpkg_path, packages): continue if not module.check_mode: - rc, out, err = module.run_command("%s -default_answer=y -batch=on \ - install %s" % (slackpkg_path, - package)) + rc, out, err = module.run_command( + [slackpkg_path, "-default_answer=y", "-batch=on", "install", package]) if not module.check_mode and not query_package(module, slackpkg_path, package): @@ -147,9 +149,8 @@ def upgrade_packages(module, slackpkg_path, packages): for package in packages: if not module.check_mode: - rc, out, err = module.run_command("%s -default_answer=y -batch=on \ - upgrade %s" % (slackpkg_path, - package)) + rc, out, err = module.run_command( + [slackpkg_path, "-default_answer=y", "-batch=on", "upgrade", package]) if not module.check_mode and not query_package(module, slackpkg_path, package): @@ -166,7 +167,8 @@ def upgrade_packages(module, slackpkg_path, packages): def update_cache(module, slackpkg_path): - rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path)) + rc, out, err = module.run_command( + [slackpkg_path, "-batch=on", "update"]) if rc != 0: module.fail_json(msg="Could not update package cache") diff --git a/plugins/modules/cloud/smartos/smartos_image_info.py b/plugins/modules/smartos_image_info.py similarity index 60% rename from plugins/modules/cloud/smartos/smartos_image_info.py rename to plugins/modules/smartos_image_info.py index 0aa9c3ac1c..0c68a4c52f 100644 --- a/plugins/modules/cloud/smartos/smartos_image_info.py +++ b/plugins/modules/smartos_image_info.py @@ -1,33 +1,35 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2015, Adam Števko -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Adam Števko +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: smartos_image_info -short_description: Get SmartOS image details. +short_description: Get SmartOS image details description: - - Retrieve information about all installed images on SmartOS. - - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.smartos_image_info) module no longer returns C(ansible_facts)! + - Retrieve information about all installed images on SmartOS. author: Adam Števko (@xen0l) +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - filters: - description: - - Criteria for selecting image. Can be any value from image - manifest and 'published_date', 'published', 'source', 'clones', - and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm) - under 'imgadm list'. - type: str -''' + filters: + description: + - Criteria for selecting image. Can be any value from image manifest and V(published_date), V(published), V(source), + V(clones), and V(size). + - More information can be found at U(https://smartos.org/man/1m/imgadm) under C(imgadm list). + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Return information about all installed images community.general.smartos_image_info: register: result @@ -43,19 +45,25 @@ EXAMPLES = ''' - name: Print information ansible.builtin.debug: - msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }} - has {{ result.smartos_images[item]['clones'] }} VM(s)" + msg: >- + {{ + result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }} + has {{ result.smartos_images[item]['clones'] + }} VM(s) with_items: "{{ result.smartos_images.keys() | list }}" - name: Print information ansible.builtin.debug: - msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }} - has {{ smartos_images[item]['clones'] }} VM(s)" + msg: >- + {{ + smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }} + has {{ smartos_images[item]['clones'] + }} VM(s) with_items: "{{ smartos_images.keys() | list }}" -''' +""" -RETURN = ''' -''' +RETURN = r""" +""" import json from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/snap.py b/plugins/modules/snap.py new file mode 100644 index 0000000000..01599b1b3e --- /dev/null +++ b/plugins/modules/snap.py @@ -0,0 +1,513 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Lincoln Wallace (locnnil) +# Copyright (c) 2021, Alexei Znamensky (russoz) +# Copyright (c) 2021, Marcus Rickert +# Copyright (c) 2018, Stanislas Lange (angristan) +# Copyright (c) 2018, Victor Carceler + +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: snap +short_description: Manages snaps +description: + - Manages snaps packages. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of the snaps to be installed. + - Any named snap accepted by the C(snap) command is valid. + - O(dangerous=true) may be necessary when installing C(.snap) files. See O(dangerous) for more details. + required: true + type: list + elements: str + state: + description: + - Desired state of the package. + - When O(state=present) the module uses C(snap install) if the snap is not installed, and C(snap refresh) if it is installed + but from a different channel. + default: present + choices: [absent, present, enabled, disabled] + type: str + classic: + description: + - Install a snap that has classic confinement. + - This option corresponds to the C(--classic) argument of the C(snap install) command. + - This level of confinement is permissive, granting full system access, similar to that of traditionally packaged applications + that do not use sandboxing mechanisms. This option can only be specified when the task involves a single snap. + - See U(https://snapcraft.io/docs/snap-confinement) for more details about classic confinement and confinement levels. + type: bool + required: false + default: false + channel: + description: + - Define which release of a snap is installed and tracked for updates. This option can only be specified if there is + a single snap in the task. + - If not passed, the C(snap) command defaults to V(stable). + - If the value passed does not contain the C(track), it defaults to C(latest). For example, if V(edge) is passed, the + module assumes the channel to be V(latest/edge). + - See U(https://snapcraft.io/docs/channels) for more details about snap channels. + type: str + required: false + options: + description: + - Set options with pattern C(key=value) or C(snap:key=value). If a snap name is given, the option is applied to that + snap only. If the snap name is omitted, the options are applied to all snaps listed in O(name). Options are only applied + to active snaps. + - Options are only applied when C(state) is set to V(present). This is done after the necessary installation or refresh + (upgrade/downgrade) of all the snaps listed in O(name). + - See U(https://snapcraft.io/docs/configuration-in-snaps) for more details about snap configuration options. + required: false + type: list + elements: str + version_added: 4.4.0 + dangerous: + description: + - Install the snap in dangerous mode, without validating its assertions and signatures. + - This is useful when installing local snaps that are either unsigned or have signatures that have not been acknowledged. + - See U(https://snapcraft.io/docs/install-modes) for more details about installation modes. + type: bool + required: false + default: false + version_added: 7.2.0 +notes: + - Privileged operations, such as installing and configuring snaps, require root priviledges. This is only the case if the + user has not logged in to the Snap Store. +author: + - Victor Carceler (@vcarceler) + - Stanislas Lange (@angristan) + +seealso: + - module: community.general.snap_alias +""" + +EXAMPLES = r""" +# Install "foo" and "bar" snap +- name: Install foo + community.general.snap: + name: + - foo + - bar + +# Install "foo" snap with options par1=A and par2=B +- name: Install "foo" with options + community.general.snap: + name: + - foo + options: + - par1=A + - par2=B + +# Install "foo" and "bar" snaps with common option com=A and specific options fooPar=X and barPar=Y +- name: Install "foo" and "bar" with options + community.general.snap: + name: + - foo + - bar + options: + - com=A + - foo:fooPar=X + - bar:barPar=Y + +# Remove "foo" snap +- name: Remove foo + community.general.snap: + name: foo + state: absent + +# Install a snap with classic confinement +- name: Install "foo" with option --classic + community.general.snap: + name: foo + classic: true + +# Install a snap with from a specific channel +- name: Install "foo" with option --channel=latest/edge + community.general.snap: + name: foo + channel: latest/edge +""" + +RETURN = r""" +classic: + description: Whether or not the snaps were installed with the classic confinement. + type: bool + returned: When snaps are installed +channel: + description: The channel the snaps were installed from. + type: str + returned: When snaps are installed +cmd: + description: The command that was executed on the host. + type: str + returned: When changed is true +snaps_installed: + description: The list of actually installed snaps. + type: list + returned: When any snaps have been installed +snaps_removed: + description: The list of actually removed snaps. + type: list + returned: When any snaps have been removed +options_changed: + description: The list of options set/changed in format C(snap:key=value). + type: list + returned: When any options have been changed/set + version_added: 4.4.0 +version: + description: Versions of snap components as reported by C(snap version). + type: dict + returned: always + version_added: 10.3.0 +""" + +import re +import json +import numbers + +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.snap import snap_runner, get_version + + +class Snap(StateModuleHelper): + NOT_INSTALLED = 0 + CHANNEL_MISMATCH = 1 + INSTALLED = 2 + + __disable_re = re.compile(r'(?:\S+\s+){5}(?P\S+)') + __set_param_re = re.compile(r'(?P\S+:)?(?P\S+)\s*=\s*(?P.+)') + __list_re = re.compile(r'^(?P\S+)\s+\S+\s+\S+\s+(?P\S+)') + module = dict( + argument_spec={ + 'name': dict(type='list', elements='str', required=True), + 'state': dict(type='str', default='present', choices=['absent', 'present', 'enabled', 'disabled']), + 'classic': dict(type='bool', default=False), + 'channel': dict(type='str'), + 'options': dict(type='list', elements='str'), + 'dangerous': dict(type='bool', default=False), + }, + supports_check_mode=True, + ) + + @staticmethod + def _first_non_zero(a): + for elem in a: + if elem != 0: + return elem + + return 0 + + def __init_module__(self): + self.runner = snap_runner(self.module) + self.vars.version = get_version(self.runner) + # if state=present there might be file names passed in 'name', in + # which case they must be converted to their actual snap names, which + # is done using the names_from_snaps() method calling 'snap info'. + self.vars.set("snapinfo_run_info", [], output=(self.verbosity >= 4)) + self.vars.set("status_run_info", [], output=(self.verbosity >= 4)) + self.vars.set("status_out", None, output=(self.verbosity >= 4)) + self.vars.set("run_info", [], output=(self.verbosity >= 4)) + + if self.vars.state == "present": + self.vars.set("snap_names", self.names_from_snaps(self.vars.name)) + status_var = "snap_names" + else: + status_var = "name" + self.vars.set("status_var", status_var, output=False) + self.vars.set("snap_status", self.snap_status(self.vars[self.vars.status_var], self.vars.channel), output=False, change=True) + self.vars.set("snap_status_map", dict(zip(self.vars.name, self.vars.snap_status)), output=False, change=True) + + def __quit_module__(self): + self.vars.snap_status = self.snap_status(self.vars[self.vars.status_var], self.vars.channel) + if self.vars.channel is None: + self.vars.channel = "stable" + + def _run_multiple_commands(self, commands, actionable_names, bundle=True, refresh=False): + results_cmd = [] + results_rc = [] + results_out = [] + results_err = [] + results_run_info = [] + + state = "refresh" if refresh else self.vars.state + + with self.runner(commands + ["name"]) as ctx: + if bundle: + rc, out, err = ctx.run(state=state, name=actionable_names) + results_cmd.append(commands + actionable_names) + results_rc.append(rc) + results_out.append(out.strip()) + results_err.append(err.strip()) + results_run_info.append(ctx.run_info) + else: + for name in actionable_names: + rc, out, err = ctx.run(state=state, name=name) + results_cmd.append(commands + [name]) + results_rc.append(rc) + results_out.append(out.strip()) + results_err.append(err.strip()) + results_run_info.append(ctx.run_info) + + return ( + '; '.join([to_native(x) for x in results_cmd]), + self._first_non_zero(results_rc), + '\n'.join(results_out), + '\n'.join(results_err), + results_run_info, + ) + + def convert_json_subtree_to_map(self, json_subtree, prefix=None): + option_map = {} + + if not isinstance(json_subtree, dict): + self.do_raise("Non-dict non-leaf element encountered while parsing option map. " + "The output format of 'snap set' may have changed. Aborting!") + + for key, value in json_subtree.items(): + full_key = key if prefix is None else prefix + "." + key + + if isinstance(value, (str, float, bool, numbers.Integral)): + option_map[full_key] = str(value) + else: + option_map.update(self.convert_json_subtree_to_map(json_subtree=value, prefix=full_key)) + + return option_map + + def convert_json_to_map(self, json_string): + json_object = json.loads(json_string) + return self.convert_json_subtree_to_map(json_object) + + def retrieve_option_map(self, snap_name): + with self.runner("get name") as ctx: + rc, out, err = ctx.run(name=snap_name) + + if rc != 0: + return {} + + result = out.splitlines() + + if "has no configuration" in result[0]: + return {} + + try: + option_map = self.convert_json_to_map(out) + return option_map + except Exception as e: + self.do_raise( + msg="Parsing option map returned by 'snap get {0}' triggers exception '{1}', output:\n'{2}'".format(snap_name, str(e), out)) + + def names_from_snaps(self, snaps): + def process_one(rc, out, err): + res = [line for line in out.split("\n") if line.startswith("name:")] + name = res[0].split()[1] + return [name] + + def process_many(rc, out, err): + # This needs to be "\n---" instead of just "---" because otherwise + # if a snap uses "---" in its description then that will incorrectly + # be interpreted as a separator between snaps in the output. + outputs = out.split("\n---") + res = [] + for sout in outputs: + res.extend(process_one(rc, sout, "")) + return res + + def process(rc, out, err): + if len(snaps) == 1: + check_error = err + process_ = process_one + else: + check_error = out + process_ = process_many + + if "warning: no snap found" in check_error: + self.do_raise("Snaps not found: {0}.".format([x.split()[-1] + for x in out.split('\n') + if x.startswith("warning: no snap found")])) + return process_(rc, out, err) + + names = [] + if snaps: + with self.runner("info name", output_process=process) as ctx: + try: + names = ctx.run(name=snaps) + finally: + self.vars.snapinfo_run_info.append(ctx.run_info) + return names + + def snap_status(self, snap_name, channel): + def _status_check(name, channel, installed): + match = [c for n, c in installed if n == name] + if not match: + return Snap.NOT_INSTALLED + if channel and match[0] not in (channel, "latest/{0}".format(channel)): + return Snap.CHANNEL_MISMATCH + else: + return Snap.INSTALLED + + with self.runner("_list") as ctx: + rc, out, err = ctx.run(check_rc=True) + list_out = out.split('\n')[1:] + list_out = [self.__list_re.match(x) for x in list_out] + list_out = [(m.group('name'), m.group('channel')) for m in list_out if m] + self.vars.status_out = list_out + self.vars.status_run_info = ctx.run_info + + return [_status_check(n, channel, list_out) for n in snap_name] + + def is_snap_enabled(self, snap_name): + with self.runner("_list name") as ctx: + rc, out, err = ctx.run(name=snap_name) + if rc != 0: + return None + result = out.splitlines()[1] + match = self.__disable_re.match(result) + if not match: + self.do_raise(msg="Unable to parse 'snap list {0}' output:\n{1}".format(snap_name, out)) + notes = match.group('notes') + return "disabled" not in notes.split(',') + + def _present(self, actionable_snaps, refresh=False): + self.changed = True + self.vars.snaps_installed = actionable_snaps + + if self.check_mode: + return + + params = ['state', 'classic', 'channel', 'dangerous'] # get base cmd parts + has_one_pkg_params = bool(self.vars.classic) or self.vars.channel != 'stable' + has_multiple_snaps = len(actionable_snaps) > 1 + + if has_one_pkg_params and has_multiple_snaps: + self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands(params, actionable_snaps, bundle=False, refresh=refresh) + else: + self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands(params, actionable_snaps, refresh=refresh) + self.vars.run_info = run_info + + if rc == 0: + return + + classic_snap_pattern = re.compile(r'^error: This revision of snap "(?P\w+)"' + r' was published using classic confinement') + match = classic_snap_pattern.match(err) + if match: + err_pkg = match.group('package_name') + msg = "Couldn't install {name} because it requires classic confinement".format(name=err_pkg) + else: + msg = "Ooops! Snap installation failed while executing '{cmd}', please examine logs and " \ + "error output for more details.".format(cmd=self.vars.cmd) + self.do_raise(msg=msg) + + def state_present(self): + + self.vars.set_meta('classic', output=True) + self.vars.set_meta('channel', output=True) + + actionable_refresh = [snap for snap in self.vars.name if self.vars.snap_status_map[snap] == Snap.CHANNEL_MISMATCH] + if actionable_refresh: + self._present(actionable_refresh, refresh=True) + actionable_install = [snap for snap in self.vars.name if self.vars.snap_status_map[snap] == Snap.NOT_INSTALLED] + if actionable_install: + self._present(actionable_install) + + self.set_options() + + def set_options(self): + if self.vars.options is None: + return + + actionable_snaps = [s for s in self.vars.name if self.vars.snap_status_map[s] != Snap.NOT_INSTALLED] + overall_options_changed = [] + + for snap_name in actionable_snaps: + option_map = self.retrieve_option_map(snap_name=snap_name) + + options_changed = [] + + for option_string in self.vars.options: + match = self.__set_param_re.match(option_string) + + if not match: + msg = "Cannot parse set option '{option_string}'".format(option_string=option_string) + self.do_raise(msg) + + snap_prefix = match.group("snap_prefix") + selected_snap_name = snap_prefix[:-1] if snap_prefix else None + + if selected_snap_name is not None and selected_snap_name not in self.vars.name: + msg = "Snap option '{option_string}' refers to snap which is not in the list of snap names".format(option_string=option_string) + self.do_raise(msg) + + if selected_snap_name is None or (snap_name is not None and snap_name == selected_snap_name): + key = match.group("key") + value = match.group("value").strip() + + if key not in option_map or key in option_map and option_map[key] != value: + option_without_prefix = key + "=" + value + option_with_prefix = option_string if selected_snap_name is not None else snap_name + ":" + option_string + options_changed.append(option_without_prefix) + overall_options_changed.append(option_with_prefix) + + if options_changed: + self.changed = True + + if not self.check_mode: + with self.runner("_set name options") as ctx: + rc, out, err = ctx.run(name=snap_name, options=options_changed) + if rc != 0: + if 'has no "configure" hook' in err: + msg = "Snap '{snap}' does not have any configurable options".format(snap=snap_name) + self.do_raise(msg) + + msg = "Cannot set options '{options}' for snap '{snap}': error={error}".format( + options=" ".join(options_changed), snap=snap_name, error=err) + self.do_raise(msg) + + if overall_options_changed: + self.vars.options_changed = overall_options_changed + + def _generic_state_action(self, actionable_func, actionable_var, params): + actionable_snaps = [s for s in self.vars.name if actionable_func(s)] + if not actionable_snaps: + return + self.changed = True + self.vars[actionable_var] = actionable_snaps + if self.check_mode: + return + self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands(params, actionable_snaps) + self.vars.run_info = run_info + if rc == 0: + return + msg = "Ooops! Snap operation failed while executing '{cmd}', please examine logs and " \ + "error output for more details.".format(cmd=self.vars.cmd) + self.do_raise(msg=msg) + + def state_absent(self): + self._generic_state_action(lambda s: self.vars.snap_status_map[s] != Snap.NOT_INSTALLED, "snaps_removed", ['classic', 'channel', 'state']) + + def state_enabled(self): + self._generic_state_action(lambda s: not self.is_snap_enabled(s), "snaps_enabled", ['classic', 'channel', 'state']) + + def state_disabled(self): + self._generic_state_action(self.is_snap_enabled, "snaps_disabled", ['classic', 'channel', 'state']) + + +def main(): + Snap.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/snap_alias.py b/plugins/modules/snap_alias.py new file mode 100644 index 0000000000..4a68671a06 --- /dev/null +++ b/plugins/modules/snap_alias.py @@ -0,0 +1,185 @@ +#!/usr/bin/python +# +# Copyright (c) 2021, Alexei Znamensky (russoz) +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: snap_alias +short_description: Manages snap aliases +version_added: 4.0.0 +description: + - Manages snaps aliases. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + state: + description: + - Desired state of the alias. + type: str + choices: [absent, present] + default: present + name: + description: + - Name of the snap. + type: str + alias: + description: + - Aliases to be created or removed. + type: list + elements: str + aliases: [aliases] + +author: + - Alexei Znamensky (@russoz) + +seealso: + - module: community.general.snap +""" + +EXAMPLES = r""" +# Install "foo" and "bar" snap +- name: Create snap alias + community.general.snap_alias: + name: hello-world + alias: hw + +- name: Create multiple aliases + community.general.snap_alias: + name: hello-world + aliases: + - hw + - hw2 + - hw3 + state: present # optional + +- name: Remove one specific aliases + community.general.snap_alias: + name: hw + state: absent + +- name: Remove all aliases for snap + community.general.snap_alias: + name: hello-world + state: absent +""" + +RETURN = r""" +snap_aliases: + description: The snap aliases after execution. If called in check mode, then the list represents the state before execution. + type: list + elements: str + returned: always +version: + description: Versions of snap components as reported by C(snap version). + type: dict + returned: always + version_added: 10.3.0 +""" + + +import re + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.snap import snap_runner, get_version + + +class SnapAlias(StateModuleHelper): + _RE_ALIAS_LIST = re.compile(r"^(?P\S+)\s+(?P[\w-]+)\s+.*$") + + module = dict( + argument_spec={ + 'state': dict(type='str', choices=['absent', 'present'], default='present'), + 'name': dict(type='str'), + 'alias': dict(type='list', elements='str', aliases=['aliases']), + }, + required_if=[ + ('state', 'present', ['name', 'alias']), + ('state', 'absent', ['name', 'alias'], True), + ], + supports_check_mode=True, + ) + + def _aliases(self): + n = self.vars.name + return {n: self._get_aliases_for(n)} if n else self._get_aliases() + + def __init_module__(self): + self.runner = snap_runner(self.module) + self.vars.version = get_version(self.runner) + self.vars.set("snap_aliases", self._aliases(), change=True, diff=True) + + def __quit_module__(self): + self.vars.snap_aliases = self._aliases() + + def _get_aliases(self): + def process(rc, out, err): + if err: + return {} + aliases = [self._RE_ALIAS_LIST.match(a.strip()) for a in out.splitlines()[1:]] + snap_alias_list = [(entry.group("snap"), entry.group("alias")) for entry in aliases] + results = {} + for snap, alias in snap_alias_list: + results[snap] = results.get(snap, []) + [alias] + return results + + with self.runner("state_alias name", check_rc=True, output_process=process) as ctx: + aliases = ctx.run(state_alias="info") + if self.verbosity >= 4: + self.vars.get_aliases_run_info = ctx.run_info + return aliases + + def _get_aliases_for(self, name): + return self._get_aliases().get(name, []) + + def _has_alias(self, name=None, alias=None): + if name: + if name not in self.vars.snap_aliases: + return False + if alias is None: + return bool(self.vars.snap_aliases[name]) + return alias in self.vars.snap_aliases[name] + + return any(alias in aliases for aliases in self.vars.snap_aliases.values()) + + def state_present(self): + for _alias in self.vars.alias: + if not self._has_alias(self.vars.name, _alias): + self.changed = True + with self.runner("state_alias name alias", check_mode_skip=True) as ctx: + ctx.run(state_alias=self.vars.state, alias=_alias) + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + + def state_absent(self): + if not self.vars.alias: + if self._has_alias(self.vars.name): + self.changed = True + with self.runner("state_alias name", check_mode_skip=True) as ctx: + ctx.run(state_alias=self.vars.state) + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + else: + for _alias in self.vars.alias: + if self._has_alias(self.vars.name, _alias): + self.changed = True + with self.runner("state_alias alias", check_mode_skip=True) as ctx: + ctx.run(state_alias=self.vars.state, alias=_alias) + if self.verbosity >= 4: + self.vars.run_info = ctx.run_info + + +def main(): + SnapAlias.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/net_tools/snmp_facts.py b/plugins/modules/snmp_facts.py similarity index 80% rename from plugins/modules/net_tools/snmp_facts.py rename to plugins/modules/snmp_facts.py index 37183b95f4..a0577a8be9 100644 --- a/plugins/modules/net_tools/snmp_facts.py +++ b/plugins/modules/snmp_facts.py @@ -1,85 +1,91 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # This file is part of Networklore's snmp library for Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: snmp_facts author: -- Patrick Ogenstad (@ogenstad) + - Patrick Ogenstad (@ogenstad) short_description: Retrieve facts for a device using SNMP description: - - Retrieve facts for a device using SNMP, the facts will be - inserted to the ansible_facts key. + - Retrieve facts for a device using SNMP, the facts are inserted to the C(ansible_facts) key. requirements: - - pysnmp + - pysnmp +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - host: - description: - - Set to target SNMP server (normally C({{ inventory_hostname }})). - type: str - required: true - version: - description: - - SNMP Version to use, C(v2), C(v2c) or C(v3). - type: str - required: true - choices: [ v2, v2c, v3 ] - community: - description: - - The SNMP community string, required if I(version) is C(v2) or C(v2c). - type: str - level: - description: - - Authentication level. - - Required if I(version) is C(v3). - type: str - choices: [ authNoPriv, authPriv ] - username: - description: - - Username for SNMPv3. - - Required if I(version) is C(v3). - type: str - integrity: - description: - - Hashing algorithm. - - Required if I(version) is C(v3). - type: str - choices: [ md5, sha ] - authkey: - description: - - Authentication key. - - Required I(version) is C(v3). - type: str - privacy: - description: - - Encryption algorithm. - - Required if I(level) is C(authPriv). - type: str - choices: [ aes, des ] - privkey: - description: - - Encryption key. - - Required if I(level) is C(authPriv). - type: str - timeout: - description: - - Response timeout in seconds. - type: int - version_added: 2.3.0 - retries: - description: - - Maximum number of request retries, 0 retries means just a single request. - type: int - version_added: 2.3.0 -''' + host: + description: + - Set to target SNMP server (normally C({{ inventory_hostname }})). + type: str + required: true + version: + description: + - SNMP Version to use, V(v2), V(v2c) or V(v3). + type: str + required: true + choices: [v2, v2c, v3] + community: + description: + - The SNMP community string, required if O(version) is V(v2) or V(v2c). + type: str + level: + description: + - Authentication level. + - Required if O(version=v3). + type: str + choices: [authNoPriv, authPriv] + username: + description: + - Username for SNMPv3. + - Required if O(version=v3). + type: str + integrity: + description: + - Hashing algorithm. + - Required if O(version=v3). + type: str + choices: [md5, sha] + authkey: + description: + - Authentication key. + - Required O(version=v3). + type: str + privacy: + description: + - Encryption algorithm. + - Required if O(level=authPriv). + type: str + choices: [aes, des] + privkey: + description: + - Encryption key. + - Required if O(level=authPriv). + type: str + timeout: + description: + - Response timeout in seconds. + type: int + version_added: 2.3.0 + retries: + description: + - Maximum number of request retries, 0 retries means just a single request. + type: int + version_added: 2.3.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts with SNMP version 2 community.general.snmp_facts: host: '{{ inventory_hostname }}' @@ -98,14 +104,14 @@ EXAMPLES = r''' authkey: abc12345 privkey: def6789 delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" ansible_sysdescr: description: A textual description of the entity. returned: success type: str - sample: Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64 + sample: "Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64" ansible_sysobjectid: description: The vendor's authoritative identification of the network management subsystem contained in the entity. returned: success @@ -117,7 +123,8 @@ ansible_sysuptime: type: int sample: 42388 ansible_syscontact: - description: The textual identification of the contact person for this managed node, together with information on how to contact this person. + description: The textual identification of the contact person for this managed node, together with information on how to + contact this person. returned: success type: str sample: Me @@ -127,7 +134,7 @@ ansible_sysname: type: str sample: ubuntu-user ansible_syslocation: - description: The physical location of this node (e.g., C(telephone closet, 3rd floor)). + description: The physical location of this node (for example, V(telephone closet, 3rd floor)). returned: success type: str sample: Sitting on the Dock of the Bay @@ -140,57 +147,52 @@ ansible_interfaces: description: Dictionary of each network interface and its metadata. returned: success type: dict - sample: { - "1": { + sample: + { + "1": { "adminstatus": "up", "description": "", "ifindex": "1", "ipv4": [ - { - "address": "127.0.0.1", - "netmask": "255.0.0.0" - } + { + "address": "127.0.0.1", + "netmask": "255.0.0.0" + } ], "mac": "", "mtu": "65536", "name": "lo", "operstatus": "up", "speed": "65536" - }, - "2": { + }, + "2": { "adminstatus": "up", "description": "", "ifindex": "2", "ipv4": [ - { - "address": "192.168.213.128", - "netmask": "255.255.255.0" - } + { + "address": "192.168.213.128", + "netmask": "255.255.255.0" + } ], "mac": "000a305a52a1", "mtu": "1500", "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)", "operstatus": "up", "speed": "1500" + } } - } -''' +""" import binascii -import traceback from collections import defaultdict +from ansible_collections.community.general.plugins.module_utils import deps +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_text -PYSNMP_IMP_ERR = None -try: +with deps.declare("pysnmp"): from pysnmp.entity.rfc3413.oneliner import cmdgen from pysnmp.proto.rfc1905 import EndOfMibView - HAS_PYSNMP = True -except Exception: - PYSNMP_IMP_ERR = traceback.format_exc() - HAS_PYSNMP = False - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_text class DefineOid(object): @@ -293,17 +295,22 @@ def main(): m_args = module.params - if not HAS_PYSNMP: - module.fail_json(msg=missing_required_lib('pysnmp'), exception=PYSNMP_IMP_ERR) + deps.validate(module) cmdGen = cmdgen.CommandGenerator() - transport_opts = dict((k, m_args[k]) for k in ('timeout', 'retries') if m_args[k] is not None) + transport_opts = { + k: m_args[k] + for k in ('timeout', 'retries') + if m_args[k] is not None + } # Verify that we receive a community when using snmp v2 if m_args['version'] in ("v2", "v2c"): if m_args['community'] is None: module.fail_json(msg='Community not set when using snmp version 2') + integrity_proto = None + privacy_proto = None if m_args['version'] == "v3": if m_args['username'] is None: module.fail_json(msg='Username not set when using snmp version 3') diff --git a/plugins/modules/system/solaris_zone.py b/plugins/modules/solaris_zone.py similarity index 75% rename from plugins/modules/system/solaris_zone.py rename to plugins/modules/solaris_zone.py index 8ecdeb8dcf..8999b21393 100644 --- a/plugins/modules/system/solaris_zone.py +++ b/plugins/modules/solaris_zone.py @@ -1,86 +1,87 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2015, Paul Markham -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Paul Markham +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: solaris_zone short_description: Manage Solaris zones description: - Create, start, stop and delete Solaris zones. - This module does not currently allow changing of options for a zone that is already been created. author: -- Paul Markham (@pmarkham) + - Paul Markham (@pmarkham) requirements: - Solaris 10 or 11 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: description: - - C(present), configure and install the zone. - - C(installed), synonym for C(present). - - C(running), if the zone already exists, boot it, otherwise, configure and install - the zone first, then boot it. - - C(started), synonym for C(running). - - C(stopped), shutdown a zone. - - C(absent), destroy the zone. - - C(configured), configure the ready so that it's to be attached. - - C(attached), attach a zone, but do not boot it. - - C(detached), shutdown and detach a zone + - V(present), configure and install the zone. + - V(installed), synonym for V(present). + - V(running), if the zone already exists, boot it, otherwise, configure and install the zone first, then boot it. + - V(started), synonym for V(running). + - V(stopped), shutdown a zone. + - V(absent), destroy the zone. + - V(configured), configure the ready so that it is to be attached. + - V(attached), attach a zone, but do not boot it. + - V(detached), shutdown and detach a zone. type: str - choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ] + choices: [absent, attached, configured, detached, installed, present, running, started, stopped] default: present name: description: - Zone name. - A zone name must be unique name. - - A zone name must begin with an alpha-numeric character. - - The name can contain alpha-numeric characters, underbars I(_), hyphens I(-), and periods I(.). + - A zone name must begin with an alphanumeric character. + - The name can contain alphanumeric characters, underscores V(_), hyphens V(-), and periods V(.). - The name cannot be longer than 64 characters. type: str required: true path: description: - - The path where the zone will be created. This is required when the zone is created, but not - used otherwise. + - The path where the zone is created. This is required when the zone is created, but not used otherwise. type: str sparse: description: - - Whether to create a sparse (C(true)) or whole root (C(false)) zone. + - Whether to create a sparse (V(true)) or whole root (V(false)) zone. type: bool - default: no + default: false root_password: description: - - The password hash for the root account. If not specified, the zone's root account - will not have a password. + - The password hash for the root account. If not specified, the zone's root account does not have a password. type: str config: description: - - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options - and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g. - "set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end"' + - The C(zonecfg) configuration commands for this zone. See zonecfg(1M) for the valid options and syntax. Typically this + is a list of options separated by semi-colons or new lines, for example V(set auto-boot=true;add net;set physical=bge0;set + address=10.1.1.1;end). type: str default: '' create_options: description: - - 'Extra options to the zonecfg(1M) create command.' + - Extra options to the zonecfg(1M) create command. type: str default: '' install_options: description: - - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation, - use this to specify the profile XML file, e.g. install_options="-c sc_profile.xml"' + - Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation, use this to specify the profile + XML file, for example O(install_options=-c sc_profile.xml). type: str default: '' attach_options: description: - - 'Extra options to the zoneadm attach command. For example, this can be used to specify - whether a minimum or full update of packages is required and if any packages need to - be deleted. For valid values, see zoneadm(1M)' + - Extra options to the zoneadm attach command. For example, this can be used to specify whether a minimum or full update + of packages is required and if any packages need to be deleted. For valid values, see zoneadm(1M). type: str default: '' timeout: @@ -88,15 +89,15 @@ options: - Timeout, in seconds, for zone to boot. type: int default: 600 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create and install a zone, but don't boot it community.general.solaris_zone: name: zone1 state: present path: /zones/zone1 - sparse: True + sparse: true root_password: Be9oX7OSwWoU. config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' @@ -141,7 +142,7 @@ EXAMPLES = ''' name: zone1 state: attached attach_options: -u -''' +""" import os import platform @@ -205,7 +206,7 @@ class Zone(object): t.write('%s\n' % self.config) t.close() - cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name) + cmd = [self.zonecfg_cmd, '-z', self.name, '-f', t.name] (rc, out, err) = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg='Failed to create zone. %s' % (out + err)) @@ -216,7 +217,7 @@ class Zone(object): def install(self): if not self.module.check_mode: - cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options) + cmd = [self.zoneadm_cmd, '-z', self.name, 'install', self.install_options] (rc, out, err) = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg='Failed to install zone. %s' % (out + err)) @@ -230,7 +231,7 @@ class Zone(object): def uninstall(self): if self.is_installed(): if not self.module.check_mode: - cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name) + cmd = [self.zoneadm_cmd, '-z', self.name, 'uninstall', '-F'] (rc, out, err) = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err)) @@ -243,37 +244,35 @@ class Zone(object): open('%s/root/noautoshutdown' % self.path, 'w').close() - node = open('%s/root/etc/nodename' % self.path, 'w') - node.write(self.name) - node.close() + with open('%s/root/etc/nodename' % self.path, 'w') as node: + node.write(self.name) - id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w') - id.write('1 # System previously configured?\n') - id.write('1 # Bootparams succeeded?\n') - id.write('1 # System is on a network?\n') - id.write('1 # Extended network information gathered?\n') - id.write('0 # Autobinder succeeded?\n') - id.write('1 # Network has subnets?\n') - id.write('1 # root password prompted for?\n') - id.write('1 # locale and term prompted for?\n') - id.write('1 # security policy in place\n') - id.write('1 # NFSv4 domain configured\n') - id.write('0 # Auto Registration Configured\n') - id.write('vt100') - id.close() + with open('%s/root/etc/.sysIDtool.state' % self.path, 'w') as id: + id.write('1 # System previously configured?\n') + id.write('1 # Bootparams succeeded?\n') + id.write('1 # System is on a network?\n') + id.write('1 # Extended network information gathered?\n') + id.write('0 # Autobinder succeeded?\n') + id.write('1 # Network has subnets?\n') + id.write('1 # root password prompted for?\n') + id.write('1 # locale and term prompted for?\n') + id.write('1 # security policy in place\n') + id.write('1 # NFSv4 domain configured\n') + id.write('0 # Auto Registration Configured\n') + id.write('vt100') def configure_ssh_keys(self): rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path if not os.path.isfile(rsa_key_file): - cmd = '%s -f %s -t rsa -N ""' % (self.ssh_keygen_cmd, rsa_key_file) + cmd = [self.ssh_keygen_cmd, '-f', rsa_key_file, '-t', 'rsa', '-N', ''] (rc, out, err) = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err)) if not os.path.isfile(dsa_key_file): - cmd = '%s -f %s -t dsa -N ""' % (self.ssh_keygen_cmd, dsa_key_file) + cmd = [self.ssh_keygen_cmd, '-f', dsa_key_file, '-t', 'dsa', '-N', ''] (rc, out, err) = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err)) @@ -281,9 +280,8 @@ class Zone(object): def configure_password(self): shadow = '%s/root/etc/shadow' % self.path if self.root_password: - f = open(shadow, 'r') - lines = f.readlines() - f.close() + with open(shadow, 'r') as f: + lines = f.readlines() for i in range(0, len(lines)): fields = lines[i].split(':') @@ -291,14 +289,13 @@ class Zone(object): fields[1] = self.root_password lines[i] = ':'.join(fields) - f = open(shadow, 'w') - for line in lines: - f.write(line) - f.close() + with open(shadow, 'w') as f: + for line in lines: + f.write(line) def boot(self): if not self.module.check_mode: - cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name) + cmd = [self.zoneadm_cmd, '-z', self.name, 'boot'] (rc, out, err) = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg='Failed to boot zone. %s' % (out + err)) @@ -328,7 +325,7 @@ class Zone(object): if self.is_installed(): self.uninstall() if not self.module.check_mode: - cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name) + cmd = [self.zonecfg_cmd, '-z', self.name, 'delete', '-F'] (rc, out, err) = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg='Failed to delete zone. %s' % (out + err)) @@ -337,7 +334,7 @@ class Zone(object): def stop(self): if not self.module.check_mode: - cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name) + cmd = [self.zoneadm_cmd, '-z', self.name, 'halt'] (rc, out, err) = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg='Failed to stop zone. %s' % (out + err)) @@ -346,7 +343,7 @@ class Zone(object): def detach(self): if not self.module.check_mode: - cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name) + cmd = [self.zoneadm_cmd, '-z', self.name, 'detach'] (rc, out, err) = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg='Failed to detach zone. %s' % (out + err)) @@ -355,7 +352,7 @@ class Zone(object): def attach(self): if not self.module.check_mode: - cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options) + cmd = [self.zoneadm_cmd, '-z', self.name, 'attach', self.attach_options] (rc, out, err) = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg='Failed to attach zone. %s' % (out + err)) @@ -363,7 +360,7 @@ class Zone(object): self.msg.append('zone attached') def exists(self): - cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name) + cmd = [self.zoneadm_cmd, '-z', self.name, 'list'] (rc, out, err) = self.module.run_command(cmd) if rc == 0: return True @@ -380,7 +377,7 @@ class Zone(object): return self.status() == 'configured' def status(self): - cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name) + cmd = [self.zoneadm_cmd, '-z', self.name, 'list', '-p'] (rc, out, err) = self.module.run_command(cmd) if rc == 0: return out.split(':')[2] diff --git a/plugins/modules/packaging/os/sorcery.py b/plugins/modules/sorcery.py similarity index 63% rename from plugins/modules/packaging/os/sorcery.py rename to plugins/modules/sorcery.py index 347413fc9d..eabd459be7 100644 --- a/plugins/modules/packaging/os/sorcery.py +++ b/plugins/modules/sorcery.py @@ -1,84 +1,94 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2015-2016, Vlad Glagolev +# Copyright (c) 2015-2023, Vlad Glagolev # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sorcery short_description: Package manager for Source Mage GNU/Linux description: - - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain + - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain. author: "Vlad Glagolev (@vaygr)" notes: - - When all three components are selected, the update goes by the sequence -- - Sorcery -> Grimoire(s) -> Spell(s); you cannot override it. - - grimoire handling (i.e. add/remove, including SCM/rsync versions) is not - yet supported. + - When all three components are selected, the update goes by the sequence -- Sorcery -> Grimoire(s) -> Spell(s); you cannot + override it. + - Grimoire handling is supported since community.general 7.3.0. requirements: - - bash + - bash +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the spell - - multiple names can be given, separated by commas - - special value '*' in conjunction with states C(latest) or - C(rebuild) will update or rebuild the whole system respectively - aliases: ["spell"] - type: list - elements: str + name: + description: + - Name of the spell or grimoire. + - Multiple names can be given, separated by commas. + - Special value V(*) in conjunction with states V(latest) or V(rebuild) updates or rebuilds the whole system respectively. + - The alias O(grimoire) was added in community.general 7.3.0. + aliases: ["spell", "grimoire"] + type: list + elements: str - state: - description: - - Whether to cast, dispel or rebuild a package - - state C(cast) is an equivalent of C(present), not C(latest) - - state C(latest) always triggers C(update_cache=yes) - - state C(rebuild) implies cast of all specified spells, not only - those existed before - choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"] - default: "present" - type: str + repository: + description: + - Repository location. + - If specified, O(name) represents grimoire(s) instead of spell(s). + - Special value V(*) pulls grimoire from the official location. + - Only single item in O(name) in conjunction with V(*) can be used. + - O(state=absent) must be used with a special value V(*). + type: str + version_added: 7.3.0 - depends: - description: - - Comma-separated list of _optional_ dependencies to build a spell - (or make sure it is built) with; use +/- in front of dependency - to turn it on/off ('+' is optional though) - - this option is ignored if C(name) parameter is equal to '*' or - contains more than one spell - - providers must be supplied in the form recognized by Sorcery, e.g. - 'openssl(SSL)' - type: str + state: + description: + - Whether to cast, dispel or rebuild a package. + - State V(cast) is an equivalent of V(present), not V(latest). + - State V(rebuild) implies cast of all specified spells, not only those existed before. + choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"] + default: "present" + type: str - update: - description: - - Whether or not to update sorcery scripts at the very first stage - type: bool - default: no + depends: + description: + - Comma-separated list of _optional_ dependencies to build a spell (or make sure it is built) with; use V(+)/V(-) in + front of dependency to turn it on/off (V(+) is optional though). + - This option is ignored if O(name) parameter is equal to V(*) or contains more than one spell. + - Providers must be supplied in the form recognized by Sorcery, for example 'V(openssl(SSL\))'. + type: str - update_cache: - description: - - Whether or not to update grimoire collection before casting spells - type: bool - default: no - aliases: ["update_codex"] + update: + description: + - Whether or not to update sorcery scripts at the very first stage. + type: bool + default: false - cache_valid_time: - description: - - Time in seconds to invalidate grimoire collection on update - - especially useful for SCM and rsync grimoires - - makes sense only in pair with C(update_cache) - type: int -''' + update_cache: + description: + - Whether or not to update grimoire collection before casting spells. + type: bool + default: false + aliases: ["update_codex"] + + cache_valid_time: + description: + - Time in seconds to invalidate grimoire collection on update. + - Especially useful for SCM and rsync grimoires. + - Makes sense only in pair with O(update_cache). + type: int + default: 0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Make sure spell foo is installed community.general.sorcery: spell: foo @@ -113,9 +123,9 @@ EXAMPLES = ''' depends: "{{ item.depends | default(None) }}" state: present loop: - - { spell: 'vifm', depends: '+file,-gtk+2' } - - { spell: 'fwknop', depends: 'gpgme' } - - { spell: 'pv,tnftp,tor' } + - {spell: 'vifm', depends: '+file,-gtk+2'} + - {spell: 'fwknop', depends: 'gpgme'} + - {spell: 'pv,tnftp,tor'} - name: Install the latest version of spell foo using regular glossary community.general.sorcery: @@ -131,22 +141,46 @@ EXAMPLES = ''' community.general.sorcery: spell: '*' state: rebuild - update: yes - update_cache: yes + update: true + update_cache: true - name: Refresh the grimoire collection if it is 1 day old using native sorcerous alias community.general.sorcery: - update_codex: yes + update_codex: true cache_valid_time: 86400 +- name: Make sure stable grimoire is present + community.general.sorcery: + name: stable + repository: '*' + state: present + +- name: Make sure binary and stable-rc grimoires are removed + community.general.sorcery: + grimoire: binary,stable-rc + repository: '*' + state: absent + +- name: Make sure games grimoire is pulled from rsync + community.general.sorcery: + grimoire: games + repository: "rsync://download.sourcemage.org::codex/games" + state: present + +- name: Make sure a specific branch of stable grimoire is pulled from git + community.general.sorcery: + grimoire: stable.git + repository: "git://download.sourcemage.org/smgl/grimoire.git:stable.git:stable-0.62" + state: present + - name: Update only Sorcery itself community.general.sorcery: - update: yes -''' + update: true +""" -RETURN = ''' -''' +RETURN = r""" +""" import datetime @@ -156,6 +190,8 @@ import re import shutil import sys +from ansible.module_utils.basic import AnsibleModule + # auto-filled at module init SORCERY = { @@ -169,6 +205,8 @@ SORCERY = { SORCERY_LOG_DIR = "/var/log/sorcery" SORCERY_STATE_DIR = "/var/state/sorcery" +NA = "N/A" + def get_sorcery_ver(module): """ Get Sorcery version. """ @@ -209,9 +247,11 @@ def codex_fresh(codex, module): return True -def codex_list(module): +def codex_list(module, skip_new=False): """ List valid grimoire collection. """ + params = module.params + codex = {} cmd_scribe = "%s index" % SORCERY['scribe'] @@ -230,6 +270,10 @@ def codex_list(module): if match: codex[match.group('grim')] = match.group('ver') + # return only specified grimoires unless requested to skip new + if params['repository'] and not skip_new: + codex = {x: codex.get(x, NA) for x in params['name']} + if not codex: module.fail_json(msg="no grimoires to operate on; add at least one") @@ -247,8 +291,7 @@ def update_sorcery(module): changed = False if module.check_mode: - if not module.params['name'] and not module.params['update_cache']: - module.exit_json(changed=True, msg="would have updated Sorcery") + return (True, "would have updated Sorcery") else: sorcery_ver = get_sorcery_ver(module) @@ -262,9 +305,7 @@ def update_sorcery(module): if sorcery_ver != get_sorcery_ver(module): changed = True - if not module.params['name'] and not module.params['update_cache']: - module.exit_json(changed=changed, - msg="successfully updated Sorcery") + return (changed, "successfully updated Sorcery") def update_codex(module): @@ -283,28 +324,29 @@ def update_codex(module): fresh = codex_fresh(codex, module) if module.check_mode: - if not params['name']: - if not fresh: - changed = True - - module.exit_json(changed=changed, msg="would have updated Codex") - elif not fresh or params['name'] and params['state'] == 'latest': - # SILENT is required as a workaround for query() in libgpg - module.run_command_environ_update.update(dict(SILENT='1')) - - cmd_scribe = "%s update" % SORCERY['scribe'] - - rc, stdout, stderr = module.run_command(cmd_scribe) - - if rc != 0: - module.fail_json(msg="unable to update Codex: " + stdout) - - if codex != codex_list(module): + if not fresh: changed = True - if not params['name']: - module.exit_json(changed=changed, - msg="successfully updated Codex") + return (changed, "would have updated Codex") + else: + if not fresh: + # SILENT is required as a workaround for query() in libgpg + module.run_command_environ_update.update(dict(SILENT='1')) + + cmd_scribe = "%s update" % SORCERY['scribe'] + + if params['repository']: + cmd_scribe += ' %s' % ' '.join(codex.keys()) + + rc, stdout, stderr = module.run_command(cmd_scribe) + + if rc != 0: + module.fail_json(msg="unable to update Codex: " + stdout) + + if codex != codex_list(module): + changed = True + + return (changed, "successfully updated Codex") def match_depends(module): @@ -416,15 +458,11 @@ def match_depends(module): if depends_new: try: - try: - fl = open(sorcery_depends, 'a') - + with open(sorcery_depends, 'a') as fl: for k in depends_new: fl.write("%s:%s:%s:optional::\n" % (spell, k, depends[k])) - except IOError: - module.fail_json(msg="I/O error on the depends file") - finally: - fl.close() + except IOError: + module.fail_json(msg="I/O error on the depends file") depends_ok = False @@ -437,6 +475,65 @@ def match_depends(module): return depends_ok +def manage_grimoires(module): + """ Add or remove grimoires. """ + + params = module.params + grimoires = params['name'] + url = params['repository'] + + codex = codex_list(module, True) + + if url == '*': + if params['state'] in ('present', 'latest', 'absent'): + if params['state'] == 'absent': + action = "remove" + todo = set(grimoires) & set(codex) + else: + action = "add" + todo = set(grimoires) - set(codex) + + if not todo: + return (False, "all grimoire(s) are already %sed" % action[:5]) + + if module.check_mode: + return (True, "would have %sed grimoire(s)" % action[:5]) + + cmd_scribe = "%s %s %s" % (SORCERY['scribe'], action, ' '.join(todo)) + + rc, stdout, stderr = module.run_command(cmd_scribe) + + if rc != 0: + module.fail_json(msg="failed to %s one or more grimoire(s): %s" % (action, stdout)) + + return (True, "successfully %sed one or more grimoire(s)" % action[:5]) + else: + module.fail_json(msg="unsupported operation on '*' repository value") + else: + if params['state'] in ('present', 'latest'): + if len(grimoires) > 1: + module.fail_json(msg="using multiple items with repository is invalid") + + grimoire = grimoires[0] + + if grimoire in codex: + return (False, "grimoire %s already exists" % grimoire) + + if module.check_mode: + return (True, "would have added grimoire %s from %s" % (grimoire, url)) + + cmd_scribe = "%s add %s from %s" % (SORCERY['scribe'], grimoire, url) + + rc, stdout, stderr = module.run_command(cmd_scribe) + + if rc != 0: + module.fail_json(msg="failed to add grimoire %s from %s: %s" % (grimoire, url, stdout)) + + return (True, "successfully added grimoire %s from %s" % (grimoire, url)) + else: + module.fail_json(msg="unsupported operation on repository value") + + def manage_spells(module): """ Cast or dispel spells. @@ -462,7 +559,7 @@ def manage_spells(module): # see update_codex() module.run_command_environ_update.update(dict(SILENT='1')) - cmd_sorcery = "%s queue" + cmd_sorcery = "%s queue" % SORCERY['sorcery'] rc, stdout, stderr = module.run_command(cmd_sorcery) @@ -481,7 +578,7 @@ def manage_spells(module): except IOError: module.fail_json(msg="failed to restore the update queue") - module.exit_json(changed=True, msg="would have updated the system") + return (True, "would have updated the system") cmd_cast = "%s --queue" % SORCERY['cast'] @@ -490,12 +587,12 @@ def manage_spells(module): if rc != 0: module.fail_json(msg="failed to update the system") - module.exit_json(changed=True, msg="successfully updated the system") + return (True, "successfully updated the system") else: - module.exit_json(changed=False, msg="the system is already up to date") + return (False, "the system is already up to date") elif params['state'] == 'rebuild': if module.check_mode: - module.exit_json(changed=True, msg="would have rebuilt the system") + return (True, "would have rebuilt the system") cmd_sorcery = "%s rebuild" % SORCERY['sorcery'] @@ -504,7 +601,7 @@ def manage_spells(module): if rc != 0: module.fail_json(msg="failed to rebuild the system: " + stdout) - module.exit_json(changed=True, msg="successfully rebuilt the system") + return (True, "successfully rebuilt the system") else: module.fail_json(msg="unsupported operation on '*' name value") else: @@ -566,42 +663,43 @@ def manage_spells(module): if cast_queue: if module.check_mode: - module.exit_json(changed=True, msg="would have cast spell(s)") + return (True, "would have cast spell(s)") cmd_cast = "%s -c %s" % (SORCERY['cast'], ' '.join(cast_queue)) rc, stdout, stderr = module.run_command(cmd_cast) if rc != 0: - module.fail_json(msg="failed to cast spell(s): %s" + stdout) + module.fail_json(msg="failed to cast spell(s): " + stdout) - module.exit_json(changed=True, msg="successfully cast spell(s)") + return (True, "successfully cast spell(s)") elif params['state'] != 'absent': - module.exit_json(changed=False, msg="spell(s) are already cast") + return (False, "spell(s) are already cast") if dispel_queue: if module.check_mode: - module.exit_json(changed=True, msg="would have dispelled spell(s)") + return (True, "would have dispelled spell(s)") cmd_dispel = "%s %s" % (SORCERY['dispel'], ' '.join(dispel_queue)) rc, stdout, stderr = module.run_command(cmd_dispel) if rc != 0: - module.fail_json(msg="failed to dispel spell(s): %s" + stdout) + module.fail_json(msg="failed to dispel spell(s): " + stdout) - module.exit_json(changed=True, msg="successfully dispelled spell(s)") + return (True, "successfully dispelled spell(s)") else: - module.exit_json(changed=False, msg="spell(s) are already dispelled") + return (False, "spell(s) are already dispelled") def main(): module = AnsibleModule( argument_spec=dict( - name=dict(default=None, aliases=['spell'], type='list', elements='str'), + name=dict(aliases=['spell', 'grimoire'], type='list', elements='str'), + repository=dict(type='str'), state=dict(default='present', choices=['present', 'latest', 'absent', 'cast', 'dispelled', 'rebuild']), - depends=dict(default=None), + depends=dict(), update=dict(default=False, type='bool'), update_cache=dict(default=False, aliases=['update_codex'], type='bool'), cache_valid_time=dict(default=0, type='int') @@ -627,18 +725,34 @@ def main(): elif params['state'] in ('absent', 'dispelled'): params['state'] = 'absent' + changed = { + 'sorcery': (False, NA), + 'grimoires': (False, NA), + 'codex': (False, NA), + 'spells': (False, NA) + } + if params['update']: - update_sorcery(module) + changed['sorcery'] = update_sorcery(module) - if params['update_cache'] or params['state'] == 'latest': - update_codex(module) + if params['name'] and params['repository']: + changed['grimoires'] = manage_grimoires(module) - if params['name']: - manage_spells(module) + if params['update_cache']: + changed['codex'] = update_codex(module) + if params['name'] and not params['repository']: + changed['spells'] = manage_spells(module) + + if any(x[0] for x in changed.values()): + state_msg = "state changed" + state_changed = True + else: + state_msg = "no change in state" + state_changed = False + + module.exit_json(changed=state_changed, msg=state_msg + ": " + '; '.join(x[1] for x in changed.values())) -# import module snippets -from ansible.module_utils.basic import AnsibleModule if __name__ == '__main__': main() diff --git a/plugins/modules/source_control/git_config.py b/plugins/modules/source_control/git_config.py deleted file mode 100644 index 8651458610..0000000000 --- a/plugins/modules/source_control/git_config.py +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2015, Marius Gedminas -# (c) 2016, Matthew Gamble -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: git_config -author: - - Matthew Gamble (@djmattyg007) - - Marius Gedminas (@mgedmin) -requirements: ['git'] -short_description: Read and write git configuration -description: - - The C(git_config) module changes git configuration by invoking 'git config'. - This is needed if you don't want to use M(ansible.builtin.template) for the entire git - config file (e.g. because you need to change just C(user.email) in - /etc/.git/config). Solutions involving M(ansible.builtin.command) are cumbersome or - don't work correctly in check mode. -options: - list_all: - description: - - List all settings (optionally limited to a given I(scope)). - type: bool - default: 'no' - name: - description: - - The name of the setting. If no value is supplied, the value will - be read from the config if it has been set. - type: str - repo: - description: - - Path to a git repository for reading and writing values from a - specific repo. - type: path - file: - description: - - Path to an adhoc git configuration file to be managed using the C(file) scope. - type: path - version_added: 2.0.0 - scope: - description: - - Specify which scope to read/set values from. - - This is required when setting config values. - - If this is set to C(local), you must also specify the C(repo) parameter. - - If this is set to C(file), you must also specify the C(file) parameter. - - It defaults to system only when not using I(list_all)=C(yes). - choices: [ "file", "local", "global", "system" ] - type: str - state: - description: - - "Indicates the setting should be set/unset. - This parameter has higher precedence than I(value) parameter: - when I(state)=absent and I(value) is defined, I(value) is discarded." - choices: [ 'present', 'absent' ] - default: 'present' - type: str - value: - description: - - When specifying the name of a single setting, supply a value to - set that setting to the given value. - type: str -''' - -EXAMPLES = ''' -- name: Add a setting to ~/.gitconfig - community.general.git_config: - name: alias.ci - scope: global - value: commit - -- name: Add a setting to ~/.gitconfig - community.general.git_config: - name: alias.st - scope: global - value: status - -- name: Remove a setting from ~/.gitconfig - community.general.git_config: - name: alias.ci - scope: global - state: absent - -- name: Add a setting to ~/.gitconfig - community.general.git_config: - name: core.editor - scope: global - value: vim - -- name: Add a setting system-wide - community.general.git_config: - name: alias.remotev - scope: system - value: remote -v - -- name: Add a setting to a system scope (default) - community.general.git_config: - name: alias.diffc - value: diff --cached - -- name: Add a setting to a system scope (default) - community.general.git_config: - name: color.ui - value: auto - -- name: Make etckeeper not complaining when it is invoked by cron - community.general.git_config: - name: user.email - repo: /etc - scope: local - value: 'root@{{ ansible_fqdn }}' - -- name: Read individual values from git config - community.general.git_config: - name: alias.ci - scope: global - -- name: Scope system is also assumed when reading values, unless list_all=yes - community.general.git_config: - name: alias.diffc - -- name: Read all values from git config - community.general.git_config: - list_all: yes - scope: global - -- name: When list_all is yes and no scope is specified, you get configuration from all scopes - community.general.git_config: - list_all: yes - -- name: Specify a repository to include local settings - community.general.git_config: - list_all: yes - repo: /path/to/repo.git -''' - -RETURN = ''' ---- -config_value: - description: When list_all=no and value is not set, a string containing the value of the setting in name - returned: success - type: str - sample: "vim" - -config_values: - description: When list_all=yes, a dict containing key/value pairs of multiple configuration settings - returned: success - type: dict - sample: - core.editor: "vim" - color.ui: "auto" - alias.diffc: "diff --cached" - alias.remotev: "remote -v" -''' -import os - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - argument_spec=dict( - list_all=dict(required=False, type='bool', default=False), - name=dict(type='str'), - repo=dict(type='path'), - file=dict(type='path'), - scope=dict(required=False, type='str', choices=['file', 'local', 'global', 'system']), - state=dict(required=False, type='str', default='present', choices=['present', 'absent']), - value=dict(required=False), - ), - mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']], - required_if=[ - ('scope', 'local', ['repo']), - ('scope', 'file', ['file']) - ], - required_one_of=[['list_all', 'name']], - supports_check_mode=True, - ) - git_path = module.get_bin_path('git', True) - - params = module.params - # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting. - # Set the locale to C to ensure consistent messages. - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') - - if params['name']: - name = params['name'] - else: - name = None - - if params['scope']: - scope = params['scope'] - elif params['list_all']: - scope = None - else: - scope = 'system' - - if params['state'] == 'absent': - unset = 'unset' - params['value'] = None - else: - unset = None - - if params['value']: - new_value = params['value'] - else: - new_value = None - - args = [git_path, "config", "--includes"] - if params['list_all']: - args.append('-l') - if scope == 'file': - args.append('-f') - args.append(params['file']) - elif scope: - args.append("--" + scope) - if name: - args.append(name) - - if scope == 'local': - dir = params['repo'] - elif params['list_all'] and params['repo']: - # Include local settings from a specific repo when listing all available settings - dir = params['repo'] - else: - # Run from root directory to avoid accidentally picking up any local config settings - dir = "/" - - (rc, out, err) = module.run_command(args, cwd=dir, expand_user_and_vars=False) - if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err: - # This just means nothing has been set at the given scope - module.exit_json(changed=False, msg='', config_values={}) - elif rc >= 2: - # If the return code is 1, it just means the option hasn't been set yet, which is fine. - module.fail_json(rc=rc, msg=err, cmd=' '.join(args)) - - if params['list_all']: - values = out.rstrip().splitlines() - config_values = {} - for value in values: - k, v = value.split('=', 1) - config_values[k] = v - module.exit_json(changed=False, msg='', config_values=config_values) - elif not new_value and not unset: - module.exit_json(changed=False, msg='', config_value=out.rstrip()) - elif unset and not out: - module.exit_json(changed=False, msg='no setting to unset') - else: - old_value = out.rstrip() - if old_value == new_value: - module.exit_json(changed=False, msg="") - - if not module.check_mode: - if unset: - args.insert(len(args) - 1, "--" + unset) - cmd = args - else: - cmd = args + [new_value] - (rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False, expand_user_and_vars=False) - if err: - module.fail_json(rc=rc, msg=err, cmd=cmd) - - module.exit_json( - msg='setting changed', - diff=dict( - before_header=' '.join(args), - before=old_value + "\n", - after_header=' '.join(args), - after=(new_value or '') + "\n" - ), - changed=True - ) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/source_control/gitlab/gitlab_runner.py b/plugins/modules/source_control/gitlab/gitlab_runner.py deleted file mode 100644 index c31030ab01..0000000000 --- a/plugins/modules/source_control/gitlab/gitlab_runner.py +++ /dev/null @@ -1,415 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2021, Raphaël Droz (raphael.droz@gmail.com) -# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) -# Copyright: (c) 2018, Samy Coenen -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: gitlab_runner -short_description: Create, modify and delete GitLab Runners. -description: - - Register, update and delete runners with the GitLab API. - - All operations are performed using the GitLab API v4. - - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html). - - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web interface at - U(https://$GITLAB_URL/profile/personal_access_tokens). - - A valid registration token is required for registering a new runner. - To create shared runners, you need to ask your administrator to give you this token. - It can be found at U(https://$GITLAB_URL/admin/runners/). -notes: - - To create a new runner at least the C(api_token), C(description) and C(api_url) options are required. - - Runners need to have unique descriptions. -author: - - Samy Coenen (@SamyCoenen) - - Guillaume Martinez (@Lunik) -requirements: - - python >= 2.7 - - python-gitlab >= 1.5.0 -extends_documentation_fragment: - - community.general.auth_basic - - community.general.gitlab - -options: - project: - description: - - ID or full path of the project in the form of group/name. - - Mutually exclusive with I(owned) since community.general 4.5.0. - type: str - version_added: '3.7.0' - description: - description: - - The unique name of the runner. - required: True - type: str - aliases: - - name - state: - description: - - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same name. - required: False - default: present - choices: ["present", "absent"] - type: str - registration_token: - description: - - The registration token is used to register new runners. - - Required if I(state) is C(present). - type: str - owned: - description: - - Searches only runners available to the user when searching for existing, when false admin token required. - - Mutually exclusive with I(project) since community.general 4.5.0. - default: no - type: bool - version_added: 2.0.0 - active: - description: - - Define if the runners is immediately active after creation. - required: False - default: yes - type: bool - locked: - description: - - Determines if the runner is locked or not. - required: False - default: False - type: bool - access_level: - description: - - Determines if a runner can pick up jobs only from protected branches. - - If set to C(ref_protected), runner can pick up jobs only from protected branches. - - If set to C(not_protected), runner can pick up jobs from both protected and unprotected branches. - required: False - default: ref_protected - choices: ["ref_protected", "not_protected"] - type: str - maximum_timeout: - description: - - The maximum time that a runner has to complete a specific job. - required: False - default: 3600 - type: int - run_untagged: - description: - - Run untagged jobs or not. - required: False - default: yes - type: bool - tag_list: - description: The tags that apply to the runner. - required: False - default: [] - type: list - elements: str -''' - -EXAMPLES = ''' -- name: "Register runner" - community.general.gitlab_runner: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - registration_token: 4gfdsg345 - description: Docker Machine t1 - state: present - active: True - tag_list: ['docker'] - run_untagged: False - locked: False - -- name: "Delete runner" - community.general.gitlab_runner: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - description: Docker Machine t1 - state: absent - -- name: Delete an owned runner as a non-admin - community.general.gitlab_runner: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - description: Docker Machine t1 - owned: yes - state: absent - -- name: Register runner for a specific project - community.general.gitlab_runner: - api_url: https://gitlab.example.com/ - api_token: "{{ access_token }}" - registration_token: 4gfdsg345 - description: MyProject runner - state: present - project: mygroup/mysubgroup/myproject -''' - -RETURN = ''' -msg: - description: Success or failure message - returned: always - type: str - sample: "Success" - -result: - description: json parsed response from the server - returned: always - type: dict - -error: - description: the error message returned by the GitLab API - returned: failed - type: str - sample: "400: path is already in use" - -runner: - description: API object - returned: always - type: dict -''' - -import traceback - -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False - -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, gitlab_authentication - -try: - cmp -except NameError: - def cmp(a, b): - return (a > b) - (a < b) - - -class GitLabRunner(object): - def __init__(self, module, gitlab_instance, project=None): - self._module = module - self._gitlab = gitlab_instance - # Whether to operate on GitLab-instance-wide or project-wide runners - # See https://gitlab.com/gitlab-org/gitlab-ce/issues/60774 - # for group runner token access - if project: - self._runners_endpoint = project.runners.list - elif module.params['owned']: - self._runners_endpoint = gitlab_instance.runners.list - else: - self._runners_endpoint = gitlab_instance.runners.all - - self.runner_object = None - - def create_or_update_runner(self, description, options): - changed = False - - # Because we have already call userExists in main() - if self.runner_object is None: - runner = self.create_runner({ - 'description': description, - 'active': options['active'], - 'token': options['registration_token'], - 'locked': options['locked'], - 'run_untagged': options['run_untagged'], - 'maximum_timeout': options['maximum_timeout'], - 'tag_list': options['tag_list'], - }) - changed = True - else: - changed, runner = self.update_runner(self.runner_object, { - 'active': options['active'], - 'locked': options['locked'], - 'run_untagged': options['run_untagged'], - 'maximum_timeout': options['maximum_timeout'], - 'access_level': options['access_level'], - 'tag_list': options['tag_list'], - }) - - self.runner_object = runner - if changed: - if self._module.check_mode: - self._module.exit_json(changed=True, msg="Successfully created or updated the runner %s" % description) - - try: - runner.save() - except Exception as e: - self._module.fail_json(msg="Failed to update runner: %s " % to_native(e)) - return True - else: - return False - - ''' - @param arguments Attributes of the runner - ''' - def create_runner(self, arguments): - if self._module.check_mode: - return True - - try: - runner = self._gitlab.runners.create(arguments) - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json(msg="Failed to create runner: %s " % to_native(e)) - - return runner - - ''' - @param runner Runner object - @param arguments Attributes of the runner - ''' - def update_runner(self, runner, arguments): - changed = False - - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if isinstance(arguments[arg_key], list): - list1 = getattr(runner, arg_key) - list1.sort() - list2 = arguments[arg_key] - list2.sort() - if cmp(list1, list2): - setattr(runner, arg_key, arguments[arg_key]) - changed = True - else: - if getattr(runner, arg_key) != arguments[arg_key]: - setattr(runner, arg_key, arguments[arg_key]) - changed = True - - return (changed, runner) - - ''' - @param description Description of the runner - ''' - def find_runner(self, description): - runners = self._runners_endpoint(as_list=False) - - for runner in runners: - # python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner - # object, so we need to handle both - if hasattr(runner, "description"): - if (runner.description == description): - return self._gitlab.runners.get(runner.id) - else: - if (runner['description'] == description): - return self._gitlab.runners.get(runner['id']) - - ''' - @param description Description of the runner - ''' - def exists_runner(self, description): - # When runner exists, object will be stored in self.runner_object. - runner = self.find_runner(description) - - if runner: - self.runner_object = runner - return True - return False - - def delete_runner(self): - if self._module.check_mode: - return True - - runner = self.runner_object - - return runner.delete() - - -def main(): - argument_spec = basic_auth_argument_spec() - argument_spec.update(auth_argument_spec()) - argument_spec.update(dict( - description=dict(type='str', required=True, aliases=["name"]), - active=dict(type='bool', default=True), - owned=dict(type='bool', default=False), - tag_list=dict(type='list', elements='str', default=[]), - run_untagged=dict(type='bool', default=True), - locked=dict(type='bool', default=False), - access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]), - maximum_timeout=dict(type='int', default=3600), - registration_token=dict(type='str', no_log=True), - project=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], - ['api_token', 'api_job_token'], - ['project', 'owned'], - ], - required_together=[ - ['api_username', 'api_password'], - ], - required_one_of=[ - ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], - ], - required_if=[ - ('state', 'present', ['registration_token']), - ], - supports_check_mode=True, - ) - - state = module.params['state'] - runner_description = module.params['description'] - runner_active = module.params['active'] - tag_list = module.params['tag_list'] - run_untagged = module.params['run_untagged'] - runner_locked = module.params['locked'] - access_level = module.params['access_level'] - maximum_timeout = module.params['maximum_timeout'] - registration_token = module.params['registration_token'] - project = module.params['project'] - - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) - - gitlab_instance = gitlab_authentication(module) - gitlab_project = None - if project: - try: - gitlab_project = gitlab_instance.projects.get(project) - except gitlab.exceptions.GitlabGetError as e: - module.fail_json(msg='No such a project %s' % project, exception=to_native(e)) - - gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_project) - runner_exists = gitlab_runner.exists_runner(runner_description) - - if state == 'absent': - if runner_exists: - gitlab_runner.delete_runner() - module.exit_json(changed=True, msg="Successfully deleted runner %s" % runner_description) - else: - module.exit_json(changed=False, msg="Runner deleted or does not exists") - - if state == 'present': - if gitlab_runner.create_or_update_runner(runner_description, { - "active": runner_active, - "tag_list": tag_list, - "run_untagged": run_untagged, - "locked": runner_locked, - "access_level": access_level, - "maximum_timeout": maximum_timeout, - "registration_token": registration_token, - }): - module.exit_json(changed=True, runner=gitlab_runner.runner_object._attrs, - msg="Successfully created or updated the runner %s" % runner_description) - else: - module.exit_json(changed=False, runner=gitlab_runner.runner_object._attrs, - msg="No need to update the runner %s" % runner_description) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/monitoring/spectrum_device.py b/plugins/modules/spectrum_device.py similarity index 73% rename from plugins/modules/monitoring/spectrum_device.py rename to plugins/modules/spectrum_device.py index 77e3b15390..bbc6fe0ba4 100644 --- a/plugins/modules/monitoring/spectrum_device.py +++ b/plugins/modules/spectrum_device.py @@ -1,92 +1,93 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2016, Renato Orgito -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Renato Orgito +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: spectrum_device -short_description: Creates/deletes devices in CA Spectrum. +short_description: Creates/deletes devices in CA Spectrum description: - - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html). - - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1 + - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html). + - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1. author: "Renato Orgito (@orgito)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - device: - type: str - aliases: [ host, name ] - required: true - description: - - IP address of the device. - - If a hostname is given, it will be resolved to the IP address. - community: - type: str - description: - - SNMP community used for device discovery. - - Required when C(state=present). - required: true - landscape: - type: str - required: true - description: - - Landscape handle of the SpectroServer to which add or remove the device. - state: - type: str - required: false - description: - - On C(present) creates the device when it does not exist. - - On C(absent) removes the device when it exists. - choices: ['present', 'absent'] - default: 'present' - url: - type: str - aliases: [ oneclick_url ] - required: true - description: - - HTTP, HTTPS URL of the Oneclick server in the form (http|https)://host.domain[:port] - url_username: - type: str - aliases: [ oneclick_user ] - required: true - description: - - Oneclick user name. - url_password: - type: str - aliases: [ oneclick_password ] - required: true - description: - - Oneclick user password. - use_proxy: - required: false - description: - - if C(no), it will not use a proxy, even if one is defined in an environment - variable on the target hosts. - default: 'yes' - type: bool - validate_certs: - required: false - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - default: 'yes' - type: bool - agentport: - type: int - required: false - description: - - UDP port used for SNMP discovery. - default: 161 + device: + type: str + aliases: [host, name] + required: true + description: + - IP address of the device. + - If a hostname is given, it is resolved to the IP address. + community: + type: str + description: + - SNMP community used for device discovery. + - Required when O(state=present). + required: true + landscape: + type: str + required: true + description: + - Landscape handle of the SpectroServer to which add or remove the device. + state: + type: str + description: + - On V(present) creates the device when it does not exist. + - On V(absent) removes the device when it exists. + choices: ['present', 'absent'] + default: 'present' + url: + type: str + aliases: [oneclick_url] + required: true + description: + - HTTP, HTTPS URL of the Oneclick server in the form V((http|https\)://host.domain[:port]). + url_username: + type: str + aliases: [oneclick_user] + required: true + description: + - Oneclick user name. + url_password: + type: str + aliases: [oneclick_password] + required: true + description: + - Oneclick user password. + use_proxy: + description: + - If V(false), it does not use a proxy, even if one is defined in an environment variable on the target hosts. + default: true + type: bool + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + default: true + type: bool + agentport: + type: int + required: false + description: + - UDP port used for SNMP discovery. + default: 161 notes: - - The devices will be created inside the I(Universe) container of the specified landscape. - - All the operations will be performed only on the specified landscape. -''' + - The devices are created inside the I(Universe) container of the specified landscape. + - All the operations are performed only on the specified landscape. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add device to CA Spectrum local_action: module: spectrum_device @@ -107,17 +108,22 @@ EXAMPLES = ''' oneclick_url: http://oneclick.example.com:8080 oneclick_user: username oneclick_password: password - use_proxy: no + use_proxy: false state: absent -''' +""" -RETURN = ''' +RETURN = r""" device: - description: device data when state = present + description: Device data when O(state=present). returned: success type: dict - sample: {'model_handle': '0x1007ab', 'landscape': '0x100000', 'address': '10.10.5.1'} -''' + sample: + { + "model_handle": "0x1007ab", + "landscape": "0x100000", + "address": "10.10.5.1" + } +""" from socket import gethostbyname, gaierror import xml.etree.ElementTree as ET diff --git a/plugins/modules/monitoring/spectrum_model_attrs.py b/plugins/modules/spectrum_model_attrs.py similarity index 80% rename from plugins/modules/monitoring/spectrum_model_attrs.py rename to plugins/modules/spectrum_model_attrs.py index 231352acd6..acd07042c2 100644 --- a/plugins/modules/monitoring/spectrum_model_attrs.py +++ b/plugins/modules/spectrum_model_attrs.py @@ -1,112 +1,114 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# (c) 2021, Tyler Gates +# Copyright (c) 2021, Tyler Gates # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: spectrum_model_attrs -short_description: Enforce a model's attributes in CA Spectrum. +short_description: Enforce a model's attributes in CA Spectrum description: - - This module can be used to enforce a model's attributes in CA Spectrum. + - This module can be used to enforce a model's attributes in CA Spectrum. version_added: 2.5.0 author: - - Tyler Gates (@tgates81) + - Tyler Gates (@tgates81) notes: - - Tested on CA Spectrum version 10.4.2.0.189. - - Model creation and deletion are not possible with this module. For that use M(community.general.spectrum_device) instead. -requirements: - - 'python >= 2.7' + - Tested on CA Spectrum version 10.4.2.0.189. + - Model creation and deletion are not possible with this module. For that use M(community.general.spectrum_device) instead. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - url: - description: - - URL of OneClick server. - type: str - required: true - url_username: - description: - - OneClick username. - type: str - required: true - aliases: [username] - url_password: - description: - - OneClick password. - type: str - required: true - aliases: [password] - use_proxy: - description: - - if C(no), it will not use a proxy, even if one is defined in - an environment variable on the target hosts. - default: yes - required: false - type: bool - name: - description: - - Model name. - type: str - required: true - type: - description: - - Model type. - type: str - required: true - validate_certs: - description: - - Validate SSL certificates. Only change this to C(false) if you can guarantee that you are talking to the correct endpoint and there is no - man-in-the-middle attack happening. - type: bool - default: yes - required: false - attributes: - description: - - A list of attribute names and values to enforce. - - All values and parameters are case sensitive and must be provided as strings only. - required: true - type: list - elements: dict - suboptions: - name: - description: - - Attribute name OR hex ID. - - 'Currently defined names are:' - - ' C(App_Manufacturer) (C(0x230683))' - - ' C(CollectionsModelNameString) (C(0x12adb))' - - ' C(Condition) (C(0x1000a))' - - ' C(Criticality) (C(0x1290c))' - - ' C(DeviceType) (C(0x23000e))' - - ' C(isManaged) (C(0x1295d))' - - ' C(Model_Class) (C(0x11ee8))' - - ' C(Model_Handle) (C(0x129fa))' - - ' C(Model_Name) (C(0x1006e))' - - ' C(Modeltype_Handle) (C(0x10001))' - - ' C(Modeltype_Name) (C(0x10000))' - - ' C(Network_Address) (C(0x12d7f))' - - ' C(Notes) (C(0x11564))' - - ' C(ServiceDesk_Asset_ID) (C(0x12db9))' - - ' C(TopologyModelNameString) (C(0x129e7))' - - ' C(sysDescr) (C(0x10052))' - - ' C(sysName) (C(0x10b5b))' - - ' C(Vendor_Name) (C(0x11570))' - - ' C(Description) (C(0x230017))' - - Hex IDs are the direct identifiers in Spectrum and will always work. - - 'To lookup hex IDs go to the UI: Locator -> Devices -> By Model Name -> -> Attributes tab.' - type: str - required: true - value: - description: - - Attribute value. Empty strings should be C("") or C(null). - type: str - required: true -''' + url: + description: + - URL of OneClick server. + type: str + required: true + url_username: + description: + - OneClick username. + type: str + required: true + aliases: [username] + url_password: + description: + - OneClick password. + type: str + required: true + aliases: [password] + use_proxy: + description: + - If V(false), it does not use a proxy, even if one is defined in an environment variable on the target hosts. + default: true + required: false + type: bool + name: + description: + - Model name. + type: str + required: true + type: + description: + - Model type. + type: str + required: true + validate_certs: + description: + - Validate SSL certificates. Only change this to V(false) if you can guarantee that you are talking to the correct endpoint + and there is no man-in-the-middle attack happening. + type: bool + default: true + required: false + attributes: + description: + - A list of attribute names and values to enforce. + - All values and parameters are case sensitive and must be provided as strings only. + required: true + type: list + elements: dict + suboptions: + name: + description: + - Attribute name OR hex ID. + - 'Currently defined names are:' + - C(App_Manufacturer) (C(0x230683)); + - C(CollectionsModelNameString) (C(0x12adb)); + - C(Condition) (C(0x1000a)); + - C(Criticality) (C(0x1290c)); + - C(DeviceType) (C(0x23000e)); + - C(isManaged) (C(0x1295d)); + - C(Model_Class) (C(0x11ee8)); + - C(Model_Handle) (C(0x129fa)); + - C(Model_Name) (C(0x1006e)); + - C(Modeltype_Handle) (C(0x10001)); + - C(Modeltype_Name) (C(0x10000)); + - C(Network_Address) (C(0x12d7f)); + - C(Notes) (C(0x11564)); + - C(ServiceDesk_Asset_ID) (C(0x12db9)); + - C(TopologyModelNameString) (C(0x129e7)); + - C(sysDescr) (C(0x10052)); + - C(sysName) (C(0x10b5b)); + - C(Vendor_Name) (C(0x11570)); + - C(Description) (C(0x230017)). + - Hex IDs are the direct identifiers in Spectrum and always work. + - 'To lookup hex IDs go to the UI: Locator -> Devices -> By Model Name -> -> Attributes tab.' + type: str + required: true + value: + description: + - Attribute value. Empty strings should be V("") or V(null). + type: str + required: true +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Enforce maintenance mode for modelxyz01 with a note about why community.general.spectrum_model_attrs: url: "http://oneclick.url.com" @@ -119,32 +121,31 @@ EXAMPLES = r''' - name: "isManaged" value: "false" - name: "Notes" - value: "MM set on {{ ansible_date_time.iso8601 }} via CO {{ CO }} by {{ tower_user_name | default(ansible_user_id) }}" + value: >- + MM set on {{ ansible_date_time.iso8601 }} via CO {{ CO }} + by {{ tower_user_name | default(ansible_user_id) }} delegate_to: localhost register: spectrum_model_attrs_status -''' +""" -RETURN = r''' +RETURN = r""" msg: - description: Informational message on the job result. - type: str - returned: always - sample: 'Success' + description: Informational message on the job result. + type: str + returned: always + sample: 'Success' changed_attrs: - description: Dictionary of changed name or hex IDs (whichever was specified) to their new corresponding values. - type: dict - returned: always - sample: { - "Notes": "MM set on 2021-02-03T22:04:02Z via CO CO9999 by tgates", - "isManaged": "true" - } -''' + description: Dictionary of changed name or hex IDs (whichever was specified) to their new corresponding values. + type: dict + returned: always + sample: {"Notes": "MM set on 2021-02-03T22:04:02Z via CO CO9999 by tgates", "isManaged": "true"} +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import quote +from urllib.parse import quote import json import re import xml.etree.ElementTree as ET diff --git a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py b/plugins/modules/spotinst_aws_elastigroup.py similarity index 69% rename from plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py rename to plugins/modules/spotinst_aws_elastigroup.py index da8f010229..237ffddcdd 100644 --- a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py +++ b/plugins/modules/spotinst_aws_elastigroup.py @@ -1,25 +1,28 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: spotinst_aws_elastigroup short_description: Create, update or delete Spotinst AWS Elastigroups author: Spotinst (@talzur) description: - - Can create, update, or delete Spotinst AWS Elastigroups - Launch configuration is part of the elastigroup configuration, - so no additional modules are necessary for handling the launch configuration. - You will have to have a credentials file in this location - /.spotinst/credentials - The credentials file must contain a row that looks like this - token = - Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible- + - Can create, update, or delete Spotinst AWS Elastigroups Launch configuration is part of the elastigroup configuration, + so no additional modules are necessary for handling the launch configuration. You must have a credentials file in this + location - C($HOME/.spotinst/credentials). The credentials file must contain a row that looks like this C(token = ). + - Full documentation available at U(https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-). requirements: - - python >= 2.7 - spotinst_sdk >= 1.0.38 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: credentials_path: @@ -31,55 +34,44 @@ options: account_id: description: - Optional parameter that allows to set an account-id inside the module configuration. - By default this is retrieved from the credentials path. + - By default this is retrieved from the credentials path. + type: str + + token: + description: + - A Personal API Access Token issued by Spotinst. + - When not specified, the module tries to obtain it, in that order, from environment variable E(SPOTINST_TOKEN), or + from the credentials path. type: str availability_vs_cost: description: - The strategy orientation. - - "The choices available are: C(availabilityOriented), C(costOriented), C(balanced)." + - 'The choices available are: V(availabilityOriented), V(costOriented), V(balanced).' required: true type: str availability_zones: description: - - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - name (String), - subnet_id (String), - placement_group_name (String), + - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup; '[{"key":"value", "key":"value"}]'; + keys allowed are name (String), subnet_id (String), placement_group_name (String),. required: true type: list elements: dict block_device_mappings: description: - - A list of hash/dictionaries of Block Device Mappings for elastigroup instances; - You can specify virtual devices and EBS volumes.; - '[{"key":"value", "key":"value"}]'; - keys allowed are - device_name (List of Strings), - virtual_name (String), - no_device (String), - ebs (Object, expects the following keys- - delete_on_termination(Boolean), - encrypted(Boolean), - iops (Integer), - snapshot_id(Integer), - volume_type(String), - volume_size(Integer)) + - A list of hash/dictionaries of Block Device Mappings for elastigroup instances; You can specify virtual devices and + EBS volumes.; '[{"key":"value", "key":"value"}]'; keys allowed are device_name (List of Strings), virtual_name (String), + no_device (String), ebs (Object, expects the following keys- delete_on_termination(Boolean), encrypted(Boolean), iops + (Integer), snapshot_id(Integer), volume_type(String), volume_size(Integer)). type: list elements: dict chef: description: - - The Chef integration configuration.; - Expects the following keys - chef_server (String), - organization (String), - user (String), - pem_key (String), - chef_version (String) + - The Chef integration configuration.; Expects the following keys - chef_server (String), organization (String), user + (String), pem_key (String), chef_version (String). type: dict draining_timeout: @@ -89,42 +81,36 @@ options: ebs_optimized: description: - - Enable EBS optimization for supported instances which are not enabled by default.; - Note - additional charges will be applied. + - Enable EBS optimization for supported instances which are not enabled by default. Note - additional charges are applied. type: bool ebs_volume_pool: description: - - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - volume_ids (List of Strings), - device_name (String) + - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; '[{"key":"value", "key":"value"}]'; + keys allowed are - volume_ids (List of Strings), device_name (String). type: list elements: dict ecs: description: - - The ECS integration configuration.; - Expects the following key - - cluster_name (String) + - The ECS integration configuration.; Expects the following key - cluster_name (String). type: dict elastic_ips: description: - - List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances + - List of ElasticIps Allocation IDs (example V(eipalloc-9d4e16f8)) to associate to the group instances. type: list elements: str fallback_to_od: description: - - In case of no spots available, Elastigroup will launch an On-demand instance instead + - In case of no spots available, Elastigroup launches an On-demand instance instead. type: bool health_check_grace_period: description: - The amount of time, in seconds, after the instance has launched to start and check its health. - - If not specified, it defaults to C(300). + - If not specified, it defaults to V(300). type: int health_check_unhealthy_duration_before_replacement: @@ -135,159 +121,134 @@ options: health_check_type: description: - The service to use for the health check. - - "The choices available are: C(ELB), C(HCS), C(TARGET_GROUP), C(MLB), C(EC2)." + - 'The choices available are: V(ELB), V(HCS), V(TARGET_GROUP), V(MLB), V(EC2).' type: str iam_role_name: description: - - The instance profile iamRole name - - Only use iam_role_arn, or iam_role_name + - The instance profile iamRole name. + - Only use O(iam_role_arn) or O(iam_role_name). type: str iam_role_arn: description: - - The instance profile iamRole arn - - Only use iam_role_arn, or iam_role_name + - The instance profile iamRole arn. + - Only use O(iam_role_arn) or O(iam_role_name). type: str id: description: - - The group id if it already exists and you want to update, or delete it. - This will not work unless the uniqueness_by field is set to id. - When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created. + - The group ID if it already exists and you want to update, or delete it. This does not work unless the O(uniqueness_by) + field is set to ID. When this is set, and the O(uniqueness_by) field is set, the group is either updated or deleted, + but not created. type: str image_id: description: - - The image Id used to launch the instance.; - In case of conflict between Instance type and image type, an error will be returned + - The image ID used to launch the instance.; In case of conflict between Instance type and image type, an error is be + returned. required: true type: str key_pair: description: - - Specify a Key Pair to attach to the instances + - Specify a Key Pair to attach to the instances. type: str kubernetes: description: - - The Kubernetes integration configuration. - Expects the following keys - - api_server (String), - token (String) + - The Kubernetes integration configuration. Expects the following keys - api_server (String), token (String). type: dict lifetime_period: description: - - Lifetime period + - Lifetime period. type: int load_balancers: description: - - List of classic ELB names + - List of classic ELB names. type: list elements: str max_size: description: - - The upper limit number of instances that you can scale up to + - The upper limit number of instances that you can scale up to. required: true type: int mesosphere: description: - - The Mesosphere integration configuration. - Expects the following key - - api_server (String) + - The Mesosphere integration configuration. Expects the following key - api_server (String). type: dict min_size: description: - - The lower limit number of instances that you can scale down to + - The lower limit number of instances that you can scale down to. required: true type: int monitoring: description: - - Describes whether instance Enhanced Monitoring is enabled + - Describes whether instance Enhanced Monitoring is enabled. type: str name: description: - - Unique name for elastigroup to be created, updated or deleted + - Unique name for elastigroup to be created, updated or deleted. required: true type: str network_interfaces: description: - - A list of hash/dictionaries of network interfaces to add to the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - description (String), - device_index (Integer), - secondary_private_ip_address_count (Integer), - associate_public_ip_address (Boolean), - delete_on_termination (Boolean), - groups (List of Strings), - network_interface_id (String), - private_ip_address (String), - subnet_id (String), - associate_ipv6_address (Boolean), - private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean)) + - A list of hash/dictionaries of network interfaces to add to the elastigroup; '[{"key":"value", "key":"value"}]'; keys + allowed are - description (String), device_index (Integer), secondary_private_ip_address_count (Integer), associate_public_ip_address + (Boolean), delete_on_termination (Boolean), groups (List of Strings), network_interface_id (String), private_ip_address + (String), subnet_id (String), associate_ipv6_address (Boolean), private_ip_addresses (List of Objects, Keys are privateIpAddress + (String, required) and primary (Boolean)). type: list elements: dict on_demand_count: description: - - Required if risk is not set - - Number of on demand instances to launch. All other instances will be spot instances.; - Either set this parameter or the risk parameter + - Required if risk is not set. + - Number of on demand instances to launch. All other instances are spot instances.; Either set this parameter or the + O(risk) parameter. type: int on_demand_instance_type: description: - - On-demand instance type that will be provisioned + - On-demand instance type that is provisioned. type: str opsworks: description: - - The elastigroup OpsWorks integration configration.; - Expects the following key - - layer_id (String) + - The elastigroup OpsWorks integration configuration.; Expects the following key - layer_id (String). type: dict persistence: description: - - The Stateful elastigroup configration.; - Accepts the following keys - - should_persist_root_device (Boolean), - should_persist_block_devices (Boolean), - should_persist_private_ip (Boolean) + - The Stateful elastigroup configuration.; Accepts the following keys - should_persist_root_device (Boolean), should_persist_block_devices + (Boolean), should_persist_private_ip (Boolean). type: dict product: description: - Operation system type. - - "Available choices are: C(Linux/UNIX), C(SUSE Linux), C(Windows), C(Linux/UNIX (Amazon VPC)), C(SUSE Linux (Amazon VPC))." + - 'Available choices are: V(Linux/UNIX), V(SUSE Linux), V(Windows), V(Linux/UNIX (Amazon VPC)), V(SUSE Linux (Amazon + VPC)).' required: true type: str rancher: description: - - The Rancher integration configuration.; - Expects the following keys - - version (String), - access_key (String), - secret_key (String), - master_host (String) + - The Rancher integration configuration.; Expects the following keys - version (String), access_key (String), secret_key + (String), master_host (String). type: dict right_scale: description: - - The Rightscale integration configuration.; - Expects the following keys - - account_id (String), - refresh_token (String) + - The Rightscale integration configuration.; Expects the following keys - account_id (String), refresh_token (String). type: dict risk: @@ -297,64 +258,49 @@ options: roll_config: description: - - Roll configuration.; - If you would like the group to roll after updating, please use this feature. - Accepts the following keys - - batch_size_percentage(Integer, Required), - grace_period - (Integer, Required), - health_check_type(String, Optional) + - Roll configuration. + - If you would like the group to roll after updating, please use this feature. + - Accepts the following keys - batch_size_percentage(Integer, Required), grace_period - (Integer, Required), health_check_type(String, + Optional). type: dict scheduled_tasks: description: - - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - adjustment (Integer), - scale_target_capacity (Integer), - scale_min_capacity (Integer), - scale_max_capacity (Integer), - adjustment_percentage (Integer), - batch_size_percentage (Integer), - cron_expression (String), - frequency (String), - grace_period (Integer), - task_type (String, required), - is_enabled (Boolean) + - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup, as in V([{"key":"value", "key":"value"}]). + - 'Keys allowed are: adjustment (Integer), scale_target_capacity (Integer), scale_min_capacity (Integer), scale_max_capacity + (Integer), adjustment_percentage (Integer), batch_size_percentage (Integer), cron_expression (String), frequency (String), + grace_period (Integer), task_type (String, required), is_enabled (Boolean).' type: list elements: dict security_group_ids: description: - - One or more security group IDs. ; - In case of update it will override the existing Security Group with the new given array + - One or more security group IDs. + - In case of update it overrides the existing Security Group with the new given array. required: true type: list elements: str shutdown_script: description: - - The Base64-encoded shutdown script that executes prior to instance termination. - Encode before setting. + - The Base64-encoded shutdown script that executes prior to instance termination. Encode before setting. type: str signals: description: - - A list of hash/dictionaries of signals to configure in the elastigroup; - keys allowed are - - name (String, required), - timeout (Integer) + - A list of hash/dictionaries of signals to configure in the elastigroup; keys allowed are - name (String, required), + timeout (Integer). type: list elements: dict spin_up_time: description: - - Spin up time, in seconds, for the instance + - Spin up time, in seconds, for the instance. type: int spot_instance_types: description: - - Spot instance type that will be provisioned. + - Spot instance type that is provisioned. required: true type: list elements: str @@ -364,108 +310,73 @@ options: - present - absent description: - - Create or delete the elastigroup + - Create or delete the elastigroup. default: present type: str tags: description: - - A list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value); + - A list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value). type: list elements: dict target: description: - - The number of instances to launch + - The number of instances to launch. required: true type: int target_group_arns: description: - - List of target group arns instances should be registered to + - List of target group arns instances should be registered to. type: list elements: str tenancy: description: - - Dedicated vs shared tenancy. - - "The available choices are: C(default), C(dedicated)." + - Dedicated or shared tenancy. + - 'The available choices are: V(default), V(dedicated).' type: str terminate_at_end_of_billing_hour: description: - - Terminate at the end of billing hour + - Terminate at the end of billing hour. type: bool unit: description: - The capacity unit to launch instances by. - - "The available choices are: C(instance), C(weight)." + - 'The available choices are: V(instance), V(weight).' type: str up_scaling_policies: description: - - A list of hash/dictionaries of scaling policies to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - policy_name (String, required), - namespace (String, required), - metric_name (String, required), - dimensions (List of Objects, Keys allowed are name (String, required) and value (String)), - statistic (String, required) - evaluation_periods (String, required), - period (String, required), - threshold (String, required), - cooldown (String, required), - unit (String, required), - operator (String, required), - action_type (String, required), - adjustment (String), - min_target_capacity (String), - target (String), - maximum (String), - minimum (String) + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; + keys allowed are - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions + (List of Objects, Keys allowed are name (String, required) and value (String)), statistic (String, required) evaluation_periods + (String, required), period (String, required), threshold (String, required), cooldown (String, required), unit (String, + required), operator (String, required), action_type (String, required), adjustment (String), min_target_capacity (String), + target (String), maximum (String), minimum (String). type: list elements: dict down_scaling_policies: description: - - A list of hash/dictionaries of scaling policies to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - policy_name (String, required), - namespace (String, required), - metric_name (String, required), - dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)), - statistic (String, required), - evaluation_periods (String, required), - period (String, required), - threshold (String, required), - cooldown (String, required), - unit (String, required), - operator (String, required), - action_type (String, required), - adjustment (String), - max_target_capacity (String), - target (String), - maximum (String), - minimum (String) + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; + keys allowed are - policy_name (String, required), namespace (String, required), metric_name (String, required), dimensions + ((List of Objects), Keys allowed are name (String, required) and value (String)), statistic (String, required), evaluation_periods + (String, required), period (String, required), threshold (String, required), cooldown (String, required), unit (String, + required), operator (String, required), action_type (String, required), adjustment (String), max_target_capacity (String), + target (String), maximum (String), minimum (String). type: list elements: dict target_tracking_policies: description: - - A list of hash/dictionaries of target tracking policies to configure in the elastigroup; - '[{"key":"value", "key":"value"}]'; - keys allowed are - - policy_name (String, required), - namespace (String, required), - source (String, required), - metric_name (String, required), - statistic (String, required), - unit (String, required), - cooldown (String, required), - target (String, required) + - A list of hash/dictionaries of target tracking policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; + keys allowed are - policy_name (String, required), namespace (String, required), source (String, required), metric_name + (String, required), statistic (String, required), unit (String, required), cooldown (String, required), target (String, + required). type: list elements: dict @@ -474,8 +385,8 @@ options: - id - name description: - - If your group names are not unique, you may use this feature to update or delete a specific group. - Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created. + - If your group names are not unique, you may use this feature to update or delete a specific group. Whenever this property + is set, you must set a group_id in order to update or delete a group, otherwise a group is created. default: name type: str @@ -486,55 +397,76 @@ options: utilize_reserved_instances: description: - - In case of any available Reserved Instances, - Elastigroup will utilize your reservations before purchasing Spot instances. + - In case of any available Reserved Instances, Elastigroup utilizes your reservations before purchasing Spot instances. type: bool wait_for_instances: description: - - Whether or not the elastigroup creation / update actions should wait for the instances to spin + - Whether or not the elastigroup creation / update actions should wait for the instances to spin. type: bool default: false wait_timeout: description: - - How long the module should wait for instances before failing the action.; - Only works if wait_for_instances is True. + - How long the module should wait for instances before failing the action. + - Only works if O(wait_for_instances=true). type: int -''' -EXAMPLES = ''' + do_not_update: + description: + - TODO document. + type: list + elements: str + default: [] + + multai_token: + description: + - Token used for Multai configuration. + type: str + + multai_load_balancers: + description: + - Configuration parameters for Multai load balancers. + type: list + elements: dict + + elastic_beanstalk: + description: + - Placeholder parameter for future implementation of Elastic Beanstalk configurations. + type: dict +""" +EXAMPLES = r""" # Basic configuration YAML example - hosts: localhost tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - monitoring: True - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target register: result - ansible.builtin.debug: var=result @@ -544,39 +476,39 @@ EXAMPLES = ''' tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - state: present - account_id: act-1a9dd2b - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - tags: - - Environment: someEnvValue - - OtherTagKey: otherValue - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 5 - min_size: 0 - target: 0 - unit: instance - monitoring: True - name: ansible-group-tal - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-8f4b8fe9 - block_device_mappings: - - device_name: '/dev/sda1' - ebs: - volume_size: 100 - volume_type: gp2 - spot_instance_types: - - c3.large - do_not_update: - - image_id - wait_for_instances: True - wait_timeout: 600 + state: present + account_id: act-1a9dd2b + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + tags: + - Environment: someEnvValue + - OtherTagKey: otherValue + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 5 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group-tal + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-8f4b8fe9 + block_device_mappings: + - device_name: '/dev/sda1' + ebs: + volume_size: 100 + volume_type: gp2 + spot_instance_types: + - c3.large + do_not_update: + - image_id + wait_for_instances: true + wait_timeout: 600 register: result - name: Store private ips to file @@ -591,43 +523,43 @@ EXAMPLES = ''' tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - state: present - account_id: act-1a9dd2b - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - tags: - - Environment: someEnvValue - - OtherTagKey: otherValue - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 5 - min_size: 0 - target: 0 - unit: instance - monitoring: True - name: ansible-group-tal - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-8f4b8fe9 - block_device_mappings: - - device_name: '/dev/xvda' - ebs: - volume_size: 60 - volume_type: gp2 - - device_name: '/dev/xvdb' - ebs: - volume_size: 120 - volume_type: gp2 - spot_instance_types: - - c3.large - do_not_update: - - image_id - wait_for_instances: True - wait_timeout: 600 + state: present + account_id: act-1a9dd2b + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + tags: + - Environment: someEnvValue + - OtherTagKey: otherValue + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 5 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group-tal + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-8f4b8fe9 + block_device_mappings: + - device_name: '/dev/xvda' + ebs: + volume_size: 60 + volume_type: gp2 + - device_name: '/dev/xvdb' + ebs: + volume_size: 120 + volume_type: gp2 + spot_instance_types: + - c3.large + do_not_update: + - image_id + wait_for_instances: true + wait_timeout: 600 register: result - name: Store private ips to file @@ -641,36 +573,36 @@ EXAMPLES = ''' tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - block_device_mappings: - - device_name: '/dev/xvda' - virtual_name: ephemeral0 - - device_name: '/dev/xvdb/' - virtual_name: ephemeral1 - monitoring: True - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + block_device_mappings: + - device_name: '/dev/xvda' + virtual_name: ephemeral0 + - device_name: '/dev/xvdb/' + virtual_name: ephemeral1 + monitoring: true + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target register: result - ansible.builtin.debug: var=result @@ -681,34 +613,34 @@ EXAMPLES = ''' tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - state: present - risk: 100 - availability_vs_cost: balanced - network_interfaces: - - associate_public_ip_address: true - device_index: 0 - availability_zones: - - name: us-west-2a - subnet_id: subnet-2b68a15c - image_id: ami-f173cc91 - key_pair: spotinst-oregon - max_size: 15 - min_size: 0 - target: 0 - unit: instance - monitoring: True - name: ansible-group - on_demand_instance_type: c3.large - product: Linux/UNIX - load_balancers: - - test-lb-1 - security_group_ids: - - sg-8f4b8fe9 - spot_instance_types: - - c3.large - do_not_update: - - image_id - - target + state: present + risk: 100 + availability_vs_cost: balanced + network_interfaces: + - associate_public_ip_address: true + device_index: 0 + availability_zones: + - name: us-west-2a + subnet_id: subnet-2b68a15c + image_id: ami-f173cc91 + key_pair: spotinst-oregon + max_size: 15 + min_size: 0 + target: 0 + unit: instance + monitoring: true + name: ansible-group + on_demand_instance_type: c3.large + product: Linux/UNIX + load_balancers: + - test-lb-1 + security_group_ids: + - sg-8f4b8fe9 + spot_instance_types: + - c3.large + do_not_update: + - image_id + - target register: result - ansible.builtin.debug: var=result @@ -719,73 +651,67 @@ EXAMPLES = ''' tasks: - name: Create elastigroup community.general.spotinst_aws_elastigroup: - account_id: act-92d45673 - state: present - risk: 100 - availability_vs_cost: balanced - availability_zones: - - name: us-west-2a - subnet_id: subnet-79da021e - image_id: ami-f173cc91 - fallback_to_od: true - tags: - - Creator: ValueOfCreatorTag - - Environment: ValueOfEnvironmentTag - key_pair: spotinst-labs-oregon - max_size: 10 - min_size: 0 - target: 2 - unit: instance - monitoring: True - name: ansible-group-1 - on_demand_instance_type: c3.large - product: Linux/UNIX - security_group_ids: - - sg-46cdc13d - spot_instance_types: - - c3.large - target_tracking_policies: - - policy_name: target-tracking-1 - namespace: AWS/EC2 - metric_name: CPUUtilization - statistic: average - unit: percent - target: 50 - cooldown: 120 - do_not_update: - - image_id + account_id: act-92d45673 + state: present + risk: 100 + availability_vs_cost: balanced + availability_zones: + - name: us-west-2a + subnet_id: subnet-79da021e + image_id: ami-f173cc91 + fallback_to_od: true + tags: + - Creator: ValueOfCreatorTag + - Environment: ValueOfEnvironmentTag + key_pair: spotinst-labs-oregon + max_size: 10 + min_size: 0 + target: 2 + unit: instance + monitoring: true + name: ansible-group-1 + on_demand_instance_type: c3.large + product: Linux/UNIX + security_group_ids: + - sg-46cdc13d + spot_instance_types: + - c3.large + target_tracking_policies: + - policy_name: target-tracking-1 + namespace: AWS/EC2 + metric_name: CPUUtilization + statistic: average + unit: percent + target: 50 + cooldown: 120 + do_not_update: + - image_id register: result - ansible.builtin.debug: var=result -''' +""" -RETURN = ''' ---- +RETURN = r""" instances: - description: List of active elastigroup instances and their details. - returned: success - type: dict - sample: [ - { - "spotInstanceRequestId": "sir-regs25zp", - "instanceId": "i-09640ad8678234c", - "instanceType": "m4.large", - "product": "Linux/UNIX", - "availabilityZone": "us-west-2b", - "privateIp": "180.0.2.244", - "createdAt": "2017-07-17T12:46:18.000Z", - "status": "fulfilled" - } - ] + description: List of active elastigroup instances and their details. + returned: success + type: dict + sample: + - "spotInstanceRequestId": "sir-regs25zp" + "instanceId": "i-09640ad8678234c" + "instanceType": "m4.large" + "product": "Linux/UNIX" + "availabilityZone": "us-west-2b" + "privateIp": "180.0.2.244" + "createdAt": "2017-07-17T12:46:18.000Z" + "status": "fulfilled" group_id: - description: Created / Updated group's ID. - returned: success - type: str - sample: "sig-12345" - -''' + description: Created / Updated group's ID. + returned: success + type: str + sample: "sig-12345" +""" HAS_SPOTINST_SDK = False -__metaclass__ = type import os import time @@ -1448,7 +1374,7 @@ def main(): block_device_mappings=dict(type='list', elements='dict'), chef=dict(type='dict'), credentials_path=dict(type='path', default="~/.spotinst/credentials"), - do_not_update=dict(default=[], type='list'), + do_not_update=dict(default=[], type='list', elements='str'), down_scaling_policies=dict(type='list', elements='dict'), draining_timeout=dict(type='int'), ebs_optimized=dict(type='bool'), @@ -1472,7 +1398,7 @@ def main(): mesosphere=dict(type='dict'), min_size=dict(type='int', required=True), monitoring=dict(type='str'), - multai_load_balancers=dict(type='list'), + multai_load_balancers=dict(type='list', elements='dict'), multai_token=dict(type='str', no_log=True), name=dict(type='str', required=True), network_interfaces=dict(type='list', elements='dict'), diff --git a/plugins/modules/storage/hpe3par/ss_3par_cpg.py b/plugins/modules/ss_3par_cpg.py similarity index 90% rename from plugins/modules/storage/hpe3par/ss_3par_cpg.py rename to plugins/modules/ss_3par_cpg.py index be4a6a02a2..6c6219ed64 100644 --- a/plugins/modules/storage/hpe3par/ss_3par_cpg.py +++ b/plugins/modules/ss_3par_cpg.py @@ -1,16 +1,13 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP -# GNU General Public License v3.0+ -# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2018, Hewlett Packard Enterprise Development LP +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" short_description: Manage HPE StoreServ 3PAR CPG author: - Farhan Nomani (@farhan7500) @@ -18,6 +15,11 @@ author: description: - Create and delete CPG on HPE 3PAR. module: ss_3par_cpg +attributes: + check_mode: + support: none + diff_mode: + support: none options: cpg_name: description: @@ -34,22 +36,20 @@ options: type: str domain: description: - - Specifies the name of the domain in which the object will reside. + - Specifies the name of the domain in which the object resides. type: str growth_increment: description: - - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage - created on each auto-grow operation. + - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage created on each auto-grow operation. type: str growth_limit: description: - - Specifies that the autogrow operation is limited to the specified - storage amount that sets the growth limit(in MiB, GiB or TiB). + - Specifies that the autogrow operation is limited to the specified storage amount that sets the growth limit (in MiB, + GiB or TiB). type: str growth_warning: description: - - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded - results in a warning alert. + - Specifies that the threshold (in MiB, GiB or TiB) of used logical disk space when exceeded results in a warning alert. type: str high_availability: choices: @@ -57,8 +57,7 @@ options: - CAGE - MAG description: - - Specifies that the layout must support the failure of one port pair, - one cage, or one magazine. + - Specifies that the layout must support the failure of one port pair, one cage, or one magazine. type: str raid_type: choices: @@ -85,14 +84,14 @@ options: description: - Specifies whether the certificate needs to be validated while communicating. type: bool - default: no + default: false extends_documentation_fragment: -- community.general.hpe3par - -''' + - community.general.hpe3par + - community.general.attributes +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create CPG sample_cpg community.general.ss_3par_cpg: storage_system_ip: 10.10.10.1 @@ -108,7 +107,7 @@ EXAMPLES = r''' set_size: 8 high_availability: MAG disk_type: FC - secure: no + secure: false - name: Delete CPG sample_cpg community.general.ss_3par_cpg: @@ -117,11 +116,11 @@ EXAMPLES = r''' storage_system_password: password state: absent cpg_name: sample_cpg - secure: no -''' + secure: false +""" -RETURN = r''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par diff --git a/plugins/modules/system/ssh_config.py b/plugins/modules/ssh_config.py similarity index 53% rename from plugins/modules/system/ssh_config.py rename to plugins/modules/ssh_config.py index 18262a2aae..6844da92a7 100644 --- a/plugins/modules/system/ssh_config.py +++ b/plugins/modules/ssh_config.py @@ -1,48 +1,53 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2015, Björn Andersson -# Copyright: (c) 2021, Ansible Project -# Copyright: (c) 2021, Abhijeet Kasurde -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Björn Andersson +# Copyright (c) 2021, Ansible Project +# Copyright (c) 2021, Abhijeet Kasurde +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- + +DOCUMENTATION = r""" module: ssh_config short_description: Manage SSH config for user version_added: '2.0.0' description: - - Configures SSH hosts with special C(IdentityFile)s and hostnames. + - Configures SSH hosts with special C(IdentityFile)s and hostnames. author: - - Björn Andersson (@gaqzi) - - Abhijeet Kasurde (@Akasurde) + - Björn Andersson (@gaqzi) + - Abhijeet Kasurde (@Akasurde) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: description: - Whether a host entry should exist or not. default: present - choices: [ 'present', 'absent' ] + choices: ['present', 'absent'] type: str user: description: - Which user account this configuration file belongs to. - - If none given and I(ssh_config_file) is not specified, C(/etc/ssh/ssh_config) is used. + - If none given and O(ssh_config_file) is not specified, C(/etc/ssh/ssh_config) is used. - If a user is given, C(~/.ssh/config) is used. - - Mutually exclusive with I(ssh_config_file). + - Mutually exclusive with O(ssh_config_file). type: str group: description: - Which group this configuration file belongs to. - - If none given, I(user) is used. + - If none given, O(user) is used. type: str host: description: - The endpoint this configuration is valid for. - - Can be an actual address on the internet or an alias that will - connect to the value of I(hostname). + - It can be an actual address on the internet or an alias that connects to the value of O(hostname). required: true type: str hostname: @@ -59,10 +64,16 @@ options: type: str identity_file: description: - - The path to an identity file (SSH private key) that will be used - when connecting to this host. - - File need to exist and have mode C(0600) to be valid. + - The path to an identity file (SSH private key) that is used when connecting to this host. + - File need to exist and have mode V(0600) to be valid. type: path + identities_only: + description: + - Specifies that SSH should only use the configured authentication identity and certificate files (either the default + files, or those explicitly configured in the C(ssh_config) files or passed on the ssh command-line), even if C(ssh-agent) + or a C(PKCS11Provider) or C(SecurityKeyProvider) offers more identities. + type: bool + version_added: 8.2.0 user_known_hosts_file: description: - Sets the user known hosts file option. @@ -70,30 +81,74 @@ options: strict_host_key_checking: description: - Whether to strictly check the host key when doing connections to the remote host. - choices: [ 'yes', 'no', 'ask' ] + - The value V(accept-new) is supported since community.general 8.6.0. + choices: ['yes', 'no', 'ask', 'accept-new'] type: str proxycommand: description: - Sets the C(ProxyCommand) option. + - Mutually exclusive with O(proxyjump). type: str + proxyjump: + description: + - Sets the C(ProxyJump) option. + - Mutually exclusive with O(proxycommand). + type: str + version_added: 6.5.0 forward_agent: description: - Sets the C(ForwardAgent) option. type: bool version_added: 4.0.0 + add_keys_to_agent: + description: + - Sets the C(AddKeysToAgent) option. + type: bool + version_added: 8.2.0 ssh_config_file: description: - SSH config file. - - If I(user) and this option are not specified, C(/etc/ssh/ssh_config) is used. - - Mutually exclusive with I(user). + - If O(user) and this option are not specified, C(/etc/ssh/ssh_config) is used. + - Mutually exclusive with O(user). type: path + host_key_algorithms: + description: + - Sets the C(HostKeyAlgorithms) option. + type: str + version_added: 6.1.0 + controlmaster: + description: + - Sets the C(ControlMaster) option. + choices: ['yes', 'no', 'ask', 'auto', 'autoask'] + type: str + version_added: 8.1.0 + controlpath: + description: + - Sets the C(ControlPath) option. + type: str + version_added: 8.1.0 + controlpersist: + description: + - Sets the C(ControlPersist) option. + type: str + version_added: 8.1.0 + dynamicforward: + description: + - Sets the C(DynamicForward) option. + type: str + version_added: 10.1.0 + other_options: + description: + - Allows specifying arbitrary SSH config entry options using a dictionary. + - The key names must be lower case. Keys with upper case values are rejected. + - The values must be strings. Other values are rejected. + type: dict + version_added: 10.4.0 requirements: -- StormSSH -notes: -- Supports check mode. -''' + - paramiko +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add a host in the configuration community.general.ssh_config: user: akasurde @@ -102,15 +157,26 @@ EXAMPLES = r''' identity_file: "/home/akasurde/.ssh/id_rsa" port: '2223' state: present + other_options: + serveraliveinterval: '30' + +- name: Add SSH config with key auto-added to agent + community.general.ssh_config: + user: devops + host: "example.com" + hostname: "staging.example.com" + identity_file: "/home/devops/.ssh/id_rsa" + add_keys_to_agent: true + state: present - name: Delete a host from the configuration community.general.ssh_config: ssh_config_file: "{{ ssh_config_test }}" host: "example.com" state: absent -''' +""" -RETURN = r''' +RETURN = r""" hosts_added: description: A list of host added. returned: success @@ -130,44 +196,60 @@ hosts_change_diff: description: A list of host diff changes. returned: on change type: list - sample: [ - { - "example.com": { - "new": { - "hostname": "github.com", - "identityfile": ["/tmp/test_ssh_config/fake_id_rsa"], - "port": "2224" - }, - "old": { - "hostname": "github.com", - "identityfile": ["/tmp/test_ssh_config/fake_id_rsa"], - "port": "2224" + sample: + [ + { + "example.com": { + "new": { + "hostname": "github.com", + "identityfile": [ + "/tmp/test_ssh_config/fake_id_rsa" + ], + "port": "2224" + }, + "old": { + "hostname": "github.com", + "identityfile": [ + "/tmp/test_ssh_config/fake_id_rsa" + ], + "port": "2224" + } } } - } - ] -''' + ] +""" import os -import traceback from copy import deepcopy -STORM_IMP_ERR = None -try: - from storm.parsers.ssh_config_parser import ConfigParser - HAS_STORM = True -except ImportError: - HAS_STORM = False - STORM_IMP_ERR = traceback.format_exc() - from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils._stormssh import ConfigParser, HAS_PARAMIKO, PARAMIKO_IMPORT_ERROR +from ansible_collections.community.general.plugins.module_utils.ssh import determine_config_file -class SSHConfig(): +def convert_bool(value): + if value is True: + return 'yes' + if value is False: + return 'no' + return None + + +def fix_bool_str(value): + if value == 'True': + return 'yes' + if value == 'False': + return 'no' + return value + + +class SSHConfig(object): def __init__(self, module): self.module = module + if not HAS_PARAMIKO: + module.fail_json(msg=missing_required_lib('PARAMIKO'), exception=PARAMIKO_IMPORT_ERROR) self.params = module.params self.user = self.params.get('user') self.group = self.params.get('group') or self.user @@ -182,10 +264,7 @@ class SSHConfig(): self.config.load() def check_ssh_config_path(self): - if self.user: - self.config_file = os.path.join(os.path.expanduser('~%s' % self.user), '.ssh', 'config') - elif self.config_file is None: - self.config_file = '/etc/ssh/ssh_config' + self.config_file = determine_config_file(self.user, self.config_file) # See if the identity file exists or not, relative to the config file if os.path.exists(self.config_file) and self.identity_file is not None: @@ -202,17 +281,31 @@ class SSHConfig(): hostname=self.params.get('hostname'), port=self.params.get('port'), identity_file=self.params.get('identity_file'), + identities_only=convert_bool(self.params.get('identities_only')), user=self.params.get('remote_user'), strict_host_key_checking=self.params.get('strict_host_key_checking'), user_known_hosts_file=self.params.get('user_known_hosts_file'), proxycommand=self.params.get('proxycommand'), + proxyjump=self.params.get('proxyjump'), + host_key_algorithms=self.params.get('host_key_algorithms'), + forward_agent=convert_bool(self.params.get('forward_agent')), + add_keys_to_agent=convert_bool(self.params.get('add_keys_to_agent')), + controlmaster=self.params.get('controlmaster'), + controlpath=self.params.get('controlpath'), + controlpersist=fix_bool_str(self.params.get('controlpersist')), + dynamicforward=self.params.get('dynamicforward'), ) - - # Convert True / False to 'yes' / 'no' for usage in ssh_config - if self.params['forward_agent'] is True: - args['forward_agent'] = 'yes' - if self.params['forward_agent'] is False: - args['forward_agent'] = 'no' + if self.params.get('other_options'): + for key, value in self.params.get('other_options').items(): + if key.lower() != key: + self.module.fail_json(msg="The other_options key {key!r} must be lower case".format(key=key)) + if key not in args: + if not isinstance(value, str): + self.module.fail_json(msg="The other_options value provided for key {key!r} must be a string, got {type}".format(key=key, + type=type(value))) + args[key] = value + else: + self.module.fail_json(msg="Multiple values provided for key {key!r}".format(key=key)) config_changed = False hosts_changed = [] @@ -255,7 +348,8 @@ class SSHConfig(): try: self.config.write_to_ssh_config() except PermissionError as perm_exec: - self.module.fail_json(msg="Failed to write to %s due to permission issue: %s" % (self.config_file, to_native(perm_exec))) + self.module.fail_json( + msg="Failed to write to %s due to permission issue: %s" % (self.config_file, to_native(perm_exec))) # Make sure we set the permission perm_mode = '0600' if self.config_file == '/etc/ssh/ssh_config': @@ -293,33 +387,36 @@ class SSHConfig(): def main(): module = AnsibleModule( argument_spec=dict( - group=dict(default=None, type='str'), + group=dict(type='str'), host=dict(type='str', required=True), hostname=dict(type='str'), + host_key_algorithms=dict(type='str', no_log=False), identity_file=dict(type='path'), + identities_only=dict(type='bool'), + other_options=dict(type='dict'), port=dict(type='str'), - proxycommand=dict(type='str', default=None), + proxycommand=dict(type='str'), + proxyjump=dict(type='str'), forward_agent=dict(type='bool'), + add_keys_to_agent=dict(type='bool'), remote_user=dict(type='str'), - ssh_config_file=dict(default=None, type='path'), + ssh_config_file=dict(type='path'), state=dict(type='str', default='present', choices=['present', 'absent']), - strict_host_key_checking=dict( - default=None, - choices=['yes', 'no', 'ask'] - ), - user=dict(default=None, type='str'), - user_known_hosts_file=dict(type='str', default=None), + strict_host_key_checking=dict(type='str', choices=['yes', 'no', 'ask', 'accept-new']), + controlmaster=dict(type='str', choices=['yes', 'no', 'ask', 'auto', 'autoask']), + controlpath=dict(type='str'), + controlpersist=dict(type='str'), + dynamicforward=dict(type='str'), + user=dict(type='str'), + user_known_hosts_file=dict(type='str'), ), supports_check_mode=True, mutually_exclusive=[ ['user', 'ssh_config_file'], + ['proxycommand', 'proxyjump'], ], ) - if not HAS_STORM: - module.fail_json(changed=False, msg=missing_required_lib("stormssh"), - exception=STORM_IMP_ERR) - ssh_config_obj = SSHConfig(module) ssh_config_obj.ensure_state() diff --git a/plugins/modules/remote_management/stacki/stacki_host.py b/plugins/modules/stacki_host.py similarity index 84% rename from plugins/modules/remote_management/stacki/stacki_host.py rename to plugins/modules/stacki_host.py index fda0c5d318..58312e8784 100644 --- a/plugins/modules/remote_management/stacki/stacki_host.py +++ b/plugins/modules/stacki_host.py @@ -1,40 +1,46 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2016, Hugh Ma -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2016, Hugh Ma +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: stacki_host short_description: Add or remove host to stacki front-end description: - - Use this module to add or remove hosts to a stacki front-end via API. + - Use this module to add or remove hosts to a stacki front-end using API. - Information on stacki can be found at U(https://github.com/StackIQ/stacki). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: name: description: - Name of the host to be added to Stacki. - required: True + required: true type: str stacki_user: description: - - Username for authenticating with Stacki API, but if not specified, the environment variable C(stacki_user) is used instead. - required: True + - Username for authenticating with Stacki API, but if not specified, the environment variable E(stacki_user) is used + instead. + required: true type: str stacki_password: description: - - Password for authenticating with Stacki API, but if not - specified, the environment variable C(stacki_password) is used instead. - required: True + - Password for authenticating with Stacki API, but if not specified, the environment variable E(stacki_password) is + used instead. + required: true type: str stacki_endpoint: description: - URL for the Stacki API Endpoint. - required: True + required: true type: str prim_intf_mac: description: @@ -53,32 +59,34 @@ options: type: str force_install: description: - - Set value to C(true) to force node into install state if it already exists in stacki. + - Set value to V(true) to force node into install state if it already exists in stacki. type: bool - default: no + default: false state: description: - Set value to the desired state for the specified host. type: str - choices: [ absent, present ] + choices: [absent, present] default: present appliance: description: - - Applicance to be used in host creation. - - Required if I(state) is C(present) and host does not yet exist. + - Appliance to be used in host creation. + - Required if O(state=present) and host does not yet exist. type: str default: backend rack: description: - Rack to be used in host creation. - - Required if I(state) is C(present) and host does not yet exist. + - Required if O(state=present) and host does not yet exist. type: int + default: 0 rank: description: - Rank to be used in host creation. - In Stacki terminology, the rank is the position of the machine in a rack. - - Required if I(state) is C(present) and host does not yet exist. + - Required if O(state=present) and host does not yet exist. type: int + default: 0 network: description: - Network to be configured in the host. @@ -86,10 +94,10 @@ options: type: str default: private author: -- Hugh Ma (@bbyhuy) -''' + - Hugh Ma (@bbyhuy) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add a host named test-1 community.general.stacki_host: name: test-1 @@ -107,32 +115,13 @@ EXAMPLES = ''' stacki_password: pwd stacki_endpoint: url state: absent -''' +""" -RETURN = ''' -changed: - description: response to whether or not the api call completed successfully - returned: always - type: bool - sample: true - -stdout: - description: the set of responses from the commands - returned: always - type: list - sample: ['...', '...'] - -stdout_lines: - description: the value of stdout split into a list - returned: always - type: list - sample: [['...', '...'], ['...'], ['...']] -''' import json from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.six.moves.urllib.parse import urlencode +from urllib.parse import urlencode from ansible.module_utils.urls import fetch_url @@ -278,7 +267,7 @@ def main(): for param in ['appliance', 'rack', 'rank', 'prim_intf', 'prim_intf_ip', 'network', 'prim_intf_mac']: if not module.params[param]: missing_params.append(param) - if len(missing_params) > 0: # @FIXME replace with required_if + if len(missing_params) > 0: module.fail_json(msg="missing required arguments: {0}".format(missing_params)) stacki.stack_add(result) diff --git a/plugins/modules/monitoring/statsd.py b/plugins/modules/statsd.py similarity index 83% rename from plugins/modules/monitoring/statsd.py rename to plugins/modules/statsd.py index b07851641b..c127cd42f1 100644 --- a/plugins/modules/monitoring/statsd.py +++ b/plugins/modules/statsd.py @@ -1,28 +1,33 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: statsd short_description: Send metrics to StatsD version_added: 2.1.0 description: - The C(statsd) module sends metrics to StatsD. - For more information, see U(https://statsd-metrics.readthedocs.io/en/latest/). - - Supported metric types are C(counter) and C(gauge). - Currently unupported metric types are C(timer), C(set), and C(gaugedelta). + - Supported metric types are V(counter) and V(gauge). Currently unupported metric types are V(timer), V(set), and V(gaugedelta). author: "Mark Mercado (@mamercad)" requirements: - statsd +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: state: type: str description: - - State of the check, only C(present) makes sense. + - State of the check, only V(present) makes sense. choices: ["present"] default: present host: @@ -34,7 +39,7 @@ options: type: int default: 8125 description: - - The port on C(host) which StatsD is listening on. + - The port on O(host) which StatsD is listening on. protocol: type: str default: udp @@ -45,7 +50,7 @@ options: type: float default: 1.0 description: - - Sender timeout, only applicable if C(protocol) is C(tcp). + - Sender timeout, only applicable if O(protocol) is V(tcp). metric: type: str required: true @@ -61,6 +66,7 @@ options: type: str description: - The prefix to add to the metric. + default: '' value: type: int required: true @@ -70,10 +76,10 @@ options: type: bool default: false description: - - If the metric is of type C(gauge), change the value by C(delta). -''' + - If the metric is of type V(gauge), change the value by O(delta). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Increment the metric my_counter by 1 community.general.statsd: host: localhost @@ -91,7 +97,7 @@ EXAMPLES = ''' metric: my_gauge metric_type: gauge value: 7 -''' +""" from ansible.module_utils.basic import (AnsibleModule, missing_required_lib) diff --git a/plugins/modules/monitoring/statusio_maintenance.py b/plugins/modules/statusio_maintenance.py similarity index 68% rename from plugins/modules/monitoring/statusio_maintenance.py rename to plugins/modules/statusio_maintenance.py index 10f733d4a8..43921488d9 100644 --- a/plugins/modules/monitoring/statusio_maintenance.py +++ b/plugins/modules/statusio_maintenance.py @@ -1,127 +1,129 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2015, Benjamin Copeland (@bhcopeland) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Benjamin Copeland (@bhcopeland) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - +DOCUMENTATION = r""" module: statusio_maintenance short_description: Create maintenance windows for your status.io dashboard description: - - Creates a maintenance window for status.io - - Deletes a maintenance window for status.io + - Creates or deletes a maintenance window for status.io. notes: - - You can use the apiary API url (http://docs.statusio.apiary.io/) to - capture API traffic - - Use start_date and start_time with minutes to set future maintenance window + - You can use the apiary API URL (U(http://docs.statusio.apiary.io/)) to capture API traffic. + - Use start_date and start_time with minutes to set future maintenance window. author: Benjamin Copeland (@bhcopeland) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - title: - type: str - description: - - A descriptive title for the maintenance window - default: "A new maintenance window" - desc: - type: str - description: - - Message describing the maintenance window - default: "Created by Ansible" - state: - type: str - description: - - Desired state of the package. - default: "present" - choices: ["present", "absent"] - api_id: - type: str - description: - - Your unique API ID from status.io - required: true - api_key: - type: str - description: - - Your unique API Key from status.io - required: true - statuspage: - type: str - description: - - Your unique StatusPage ID from status.io - required: true - url: - type: str - description: - - Status.io API URL. A private apiary can be used instead. - default: "https://api.status.io" - components: - type: list - elements: str - description: - - The given name of your component (server name) - aliases: ['component'] - containers: - type: list - elements: str - description: - - The given name of your container (data center) - aliases: ['container'] - all_infrastructure_affected: - description: - - If it affects all components and containers - type: bool - default: 'no' - automation: - description: - - Automatically start and end the maintenance window - type: bool - default: 'no' - maintenance_notify_now: - description: - - Notify subscribers now - type: bool - default: 'no' - maintenance_notify_72_hr: - description: - - Notify subscribers 72 hours before maintenance start time - type: bool - default: 'no' - maintenance_notify_24_hr: - description: - - Notify subscribers 24 hours before maintenance start time - type: bool - default: 'no' - maintenance_notify_1_hr: - description: - - Notify subscribers 1 hour before maintenance start time - type: bool - default: 'no' - maintenance_id: - type: str - description: - - The maintenance id number when deleting a maintenance window - minutes: - type: int - description: - - The length of time in UTC that the maintenance will run - (starting from playbook runtime) - default: 10 - start_date: - type: str - description: - - Date maintenance is expected to start (Month/Day/Year) (UTC) - - End Date is worked out from start_date + minutes - start_time: - type: str - description: - - Time maintenance is expected to start (Hour:Minutes) (UTC) - - End Time is worked out from start_time + minutes -''' + title: + type: str + description: + - A descriptive title for the maintenance window. + default: "A new maintenance window" + desc: + type: str + description: + - Message describing the maintenance window. + default: "Created by Ansible" + state: + type: str + description: + - Desired state of the package. + default: "present" + choices: ["present", "absent"] + api_id: + type: str + description: + - Your unique API ID from status.io. + required: true + api_key: + type: str + description: + - Your unique API Key from status.io. + required: true + statuspage: + type: str + description: + - Your unique StatusPage ID from status.io. + required: true + url: + type: str + description: + - Status.io API URL. A private apiary can be used instead. + default: "https://api.status.io" + components: + type: list + elements: str + description: + - The given name of your component (server name). + aliases: ['component'] + containers: + type: list + elements: str + description: + - The given name of your container (data center). + aliases: ['container'] + all_infrastructure_affected: + description: + - If it affects all components and containers. + type: bool + default: false + automation: + description: + - Automatically start and end the maintenance window. + type: bool + default: false + maintenance_notify_now: + description: + - Notify subscribers now. + type: bool + default: false + maintenance_notify_72_hr: + description: + - Notify subscribers 72 hours before maintenance start time. + type: bool + default: false + maintenance_notify_24_hr: + description: + - Notify subscribers 24 hours before maintenance start time. + type: bool + default: false + maintenance_notify_1_hr: + description: + - Notify subscribers 1 hour before maintenance start time. + type: bool + default: false + maintenance_id: + type: str + description: + - The maintenance ID number when deleting a maintenance window. + minutes: + type: int + description: + - The duration of the maintenance window (starting from playbook runtime). + default: 10 + start_date: + type: str + description: + - Date maintenance is expected to start (Month/Day/Year) (UTC). + - End Date is worked out from O(start_date) + O(minutes). + start_time: + type: str + description: + - Time maintenance is expected to start (Hour:Minutes) (UTC). + - End Time is worked out from O(start_time) + O(minutes). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a maintenance window for 10 minutes on server1, with automation to stop the maintenance community.general.statusio_maintenance: title: Router Upgrade from ansible @@ -130,8 +132,8 @@ EXAMPLES = ''' api_id: api_id api_key: api_key statuspage: statuspage_id - maintenance_notify_1_hr: True - automation: True + maintenance_notify_1_hr: true + automation: true - name: Create a maintenance window for 60 minutes on server1 and server2 community.general.statusio_maintenance: @@ -144,8 +146,8 @@ EXAMPLES = ''' api_id: api_id api_key: api_key statuspage: statuspage_id - maintenance_notify_1_hr: True - automation: True + maintenance_notify_1_hr: true + automation: true delegate_to: localhost - name: Create a future maintenance window for 24 hours to all hosts inside the Primary Data Center @@ -168,10 +170,9 @@ EXAMPLES = ''' api_id: api_id api_key: api_key state: absent - -''' +""" # TODO: Add RETURN documentation. -RETURN = ''' # ''' +RETURN = """ # """ import datetime import json @@ -180,6 +181,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import open_url +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + def get_api_auth_headers(api_id, api_key, url, statuspage): @@ -262,11 +267,11 @@ def get_date_time(start_date, start_time, minutes): except (NameError, ValueError): return 1, None, "Couldn't work out a valid date" else: - now = datetime.datetime.utcnow() - delta = now + datetime.timedelta(minutes=minutes) + now_t = now() + delta = now_t + datetime.timedelta(minutes=minutes) # start_date - returned_date.append(now.strftime("%m/%d/%Y")) - returned_date.append(now.strftime("%H:%M")) + returned_date.append(now_t.strftime("%m/%d/%Y")) + returned_date.append(now_t.strftime("%H:%M")) # end_date returned_date.append(delta.strftime("%m/%d/%Y")) returned_date.append(delta.strftime("%H:%M")) @@ -278,25 +283,24 @@ def create_maintenance(auth_headers, url, statuspage, host_ids, returned_date, maintenance_notify_now, maintenance_notify_72_hr, maintenance_notify_24_hr, maintenance_notify_1_hr): - returned_dates = [[x] for x in returned_date] component_id = [] container_id = [] for val in host_ids: component_id.append(val['component_id']) container_id.append(val['container_id']) + infrastructure_id = [i + '-' + j for i, j in zip(component_id, container_id)] try: values = json.dumps({ "statuspage_id": statuspage, - "components": component_id, - "containers": container_id, "all_infrastructure_affected": str(int(all_infrastructure_affected)), + "infrastructure_affected": infrastructure_id, "automation": str(int(automation)), "maintenance_name": title, "maintenance_details": desc, - "date_planned_start": returned_dates[0], - "time_planned_start": returned_dates[1], - "date_planned_end": returned_dates[2], - "time_planned_end": returned_dates[3], + "date_planned_start": returned_date[0], + "time_planned_start": returned_date[1], + "date_planned_end": returned_date[2], + "time_planned_end": returned_date[3], "maintenance_notify_now": str(int(maintenance_notify_now)), "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)), "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)), @@ -338,30 +342,22 @@ def main(): api_id=dict(required=True), api_key=dict(required=True, no_log=True), statuspage=dict(required=True), - state=dict(required=False, default='present', - choices=['present', 'absent']), - url=dict(default='https://api.status.io', required=False), - components=dict(type='list', elements='str', required=False, default=None, - aliases=['component']), - containers=dict(type='list', elements='str', required=False, default=None, - aliases=['container']), - all_infrastructure_affected=dict(type='bool', default=False, - required=False), - automation=dict(type='bool', default=False, required=False), - title=dict(required=False, default='A new maintenance window'), - desc=dict(required=False, default='Created by Ansible'), - minutes=dict(type='int', required=False, default=10), - maintenance_notify_now=dict(type='bool', default=False, - required=False), - maintenance_notify_72_hr=dict(type='bool', default=False, - required=False), - maintenance_notify_24_hr=dict(type='bool', default=False, - required=False), - maintenance_notify_1_hr=dict(type='bool', default=False, - required=False), - maintenance_id=dict(required=False, default=None), - start_date=dict(default=None, required=False), - start_time=dict(default=None, required=False) + state=dict(default='present', choices=['present', 'absent']), + url=dict(default='https://api.status.io'), + components=dict(type='list', elements='str', aliases=['component']), + containers=dict(type='list', elements='str', aliases=['container']), + all_infrastructure_affected=dict(type='bool', default=False), + automation=dict(type='bool', default=False), + title=dict(default='A new maintenance window'), + desc=dict(default='Created by Ansible'), + minutes=dict(type='int', default=10), + maintenance_notify_now=dict(type='bool', default=False), + maintenance_notify_72_hr=dict(type='bool', default=False), + maintenance_notify_24_hr=dict(type='bool', default=False), + maintenance_notify_1_hr=dict(type='bool', default=False), + maintenance_id=dict(), + start_date=dict(), + start_time=dict() ), supports_check_mode=True, ) diff --git a/plugins/modules/storage/zfs/zfs_facts.py b/plugins/modules/storage/zfs/zfs_facts.py deleted file mode 100644 index cb106de111..0000000000 --- a/plugins/modules/storage/zfs/zfs_facts.py +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Adam Števko -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: zfs_facts -short_description: Gather facts about ZFS datasets. -description: - - Gather facts from ZFS dataset properties. -author: Adam Števko (@xen0l) -options: - name: - description: - - ZFS dataset name. - required: yes - aliases: [ "ds", "dataset" ] - type: str - recurse: - description: - - Specifies if properties for any children should be recursively - displayed. - type: bool - default: 'no' - parsable: - description: - - Specifies if property values should be displayed in machine - friendly format. - type: bool - default: 'no' - properties: - description: - - Specifies which dataset properties should be queried in comma-separated format. - For more information about dataset properties, check zfs(1M) man page. - default: all - type: str - type: - description: - - Specifies which datasets types to display. Multiple values have to be - provided in comma-separated form. - choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ] - default: all - type: str - depth: - description: - - Specifies recursion depth. - type: int -''' - -EXAMPLES = ''' -- name: Gather facts about ZFS dataset rpool/export/home - community.general.zfs_facts: - dataset: rpool/export/home - -- name: Report space usage on ZFS filesystems under data/home - community.general.zfs_facts: - name: data/home - recurse: yes - type: filesystem - -- ansible.builtin.debug: - msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.' - with_items: '{{ ansible_zfs_datasets }}' -''' - -RETURN = ''' -name: - description: ZFS dataset name - returned: always - type: str - sample: rpool/var/spool -parsable: - description: if parsable output should be provided in machine friendly format. - returned: if 'parsable' is set to True - type: bool - sample: True -recurse: - description: if we should recurse over ZFS dataset - returned: if 'recurse' is set to True - type: bool - sample: True -zfs_datasets: - description: ZFS dataset facts - returned: always - type: str - sample: - { - "aclinherit": "restricted", - "aclmode": "discard", - "atime": "on", - "available": "43.8G", - "canmount": "on", - "casesensitivity": "sensitive", - "checksum": "on", - "compression": "off", - "compressratio": "1.00x", - "copies": "1", - "creation": "Thu Jun 16 11:37 2016", - "dedup": "off", - "devices": "on", - "exec": "on", - "filesystem_count": "none", - "filesystem_limit": "none", - "logbias": "latency", - "logicalreferenced": "18.5K", - "logicalused": "3.45G", - "mlslabel": "none", - "mounted": "yes", - "mountpoint": "/rpool", - "name": "rpool", - "nbmand": "off", - "normalization": "none", - "org.openindiana.caiman:install": "ready", - "primarycache": "all", - "quota": "none", - "readonly": "off", - "recordsize": "128K", - "redundant_metadata": "all", - "refcompressratio": "1.00x", - "referenced": "29.5K", - "refquota": "none", - "refreservation": "none", - "reservation": "none", - "secondarycache": "all", - "setuid": "on", - "sharenfs": "off", - "sharesmb": "off", - "snapdir": "hidden", - "snapshot_count": "none", - "snapshot_limit": "none", - "sync": "standard", - "type": "filesystem", - "used": "4.41G", - "usedbychildren": "4.41G", - "usedbydataset": "29.5K", - "usedbyrefreservation": "0", - "usedbysnapshots": "0", - "utf8only": "off", - "version": "5", - "vscan": "off", - "written": "29.5K", - "xattr": "on", - "zoned": "off" - } -''' - -from collections import defaultdict - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import iteritems - - -SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark'] - - -class ZFSFacts(object): - def __init__(self, module): - - self.module = module - - self.name = module.params['name'] - self.recurse = module.params['recurse'] - self.parsable = module.params['parsable'] - self.properties = module.params['properties'] - self.type = module.params['type'] - self.depth = module.params['depth'] - - self._datasets = defaultdict(dict) - self.facts = [] - - def dataset_exists(self): - cmd = [self.module.get_bin_path('zfs'), 'list', self.name] - - (rc, out, err) = self.module.run_command(cmd) - - if rc == 0: - return True - else: - return False - - def get_facts(self): - cmd = [self.module.get_bin_path('zfs'), 'get', '-H'] - if self.parsable: - cmd.append('-p') - if self.recurse: - cmd.append('-r') - if int(self.depth) != 0: - cmd.append('-d') - cmd.append('%s' % self.depth) - if self.type: - cmd.append('-t') - cmd.append(self.type) - cmd.extend(['-o', 'name,property,value', self.properties, self.name]) - - (rc, out, err) = self.module.run_command(cmd) - - if rc == 0: - for line in out.splitlines(): - dataset, property, value = line.split('\t') - - self._datasets[dataset].update({property: value}) - - for k, v in iteritems(self._datasets): - v.update({'name': k}) - self.facts.append(v) - - return {'ansible_zfs_datasets': self.facts} - else: - self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name, - stderr=err, - rc=rc) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True, aliases=['ds', 'dataset'], type='str'), - recurse=dict(required=False, default=False, type='bool'), - parsable=dict(required=False, default=False, type='bool'), - properties=dict(required=False, default='all', type='str'), - type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES), - depth=dict(required=False, default=0, type='int') - ), - supports_check_mode=True - ) - - zfs_facts = ZFSFacts(module) - - result = {} - result['changed'] = False - result['name'] = zfs_facts.name - - if zfs_facts.parsable: - result['parsable'] = zfs_facts.parsable - - if zfs_facts.recurse: - result['recurse'] = zfs_facts.recurse - - if zfs_facts.dataset_exists(): - result['ansible_facts'] = zfs_facts.get_facts() - else: - module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name) - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/zfs/zpool_facts.py b/plugins/modules/storage/zfs/zpool_facts.py deleted file mode 100644 index b7a66255c6..0000000000 --- a/plugins/modules/storage/zfs/zpool_facts.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2016, Adam Števko -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: zpool_facts -short_description: Gather facts about ZFS pools. -description: - - Gather facts from ZFS pool properties. -author: Adam Števko (@xen0l) -options: - name: - description: - - ZFS pool name. - type: str - aliases: [ "pool", "zpool" ] - required: false - parsable: - description: - - Specifies if property values should be displayed in machine - friendly format. - type: bool - default: False - required: false - properties: - description: - - Specifies which dataset properties should be queried in comma-separated format. - For more information about dataset properties, check zpool(1M) man page. - type: str - default: all - required: false -''' - -EXAMPLES = ''' -- name: Gather facts about ZFS pool rpool - community.general.zpool_facts: pool=rpool - -- name: Gather space usage about all imported ZFS pools - community.general.zpool_facts: properties='free,size' - -- name: Print gathered information - ansible.builtin.debug: - msg: 'ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.' - with_items: '{{ ansible_zfs_pools }}' -''' - -RETURN = ''' -ansible_facts: - description: Dictionary containing all the detailed information about the ZFS pool facts - returned: always - type: complex - contains: - ansible_zfs_pools: - description: ZFS pool facts - returned: always - type: str - sample: - { - "allocated": "3.46G", - "altroot": "-", - "autoexpand": "off", - "autoreplace": "off", - "bootfs": "rpool/ROOT/openindiana", - "cachefile": "-", - "capacity": "6%", - "comment": "-", - "dedupditto": "0", - "dedupratio": "1.00x", - "delegation": "on", - "expandsize": "-", - "failmode": "wait", - "feature@async_destroy": "enabled", - "feature@bookmarks": "enabled", - "feature@edonr": "enabled", - "feature@embedded_data": "active", - "feature@empty_bpobj": "active", - "feature@enabled_txg": "active", - "feature@extensible_dataset": "enabled", - "feature@filesystem_limits": "enabled", - "feature@hole_birth": "active", - "feature@large_blocks": "enabled", - "feature@lz4_compress": "active", - "feature@multi_vdev_crash_dump": "enabled", - "feature@sha512": "enabled", - "feature@skein": "enabled", - "feature@spacemap_histogram": "active", - "fragmentation": "3%", - "free": "46.3G", - "freeing": "0", - "guid": "15729052870819522408", - "health": "ONLINE", - "leaked": "0", - "listsnapshots": "off", - "name": "rpool", - "readonly": "off", - "size": "49.8G", - "version": "-" - } -name: - description: ZFS pool name - returned: always - type: str - sample: rpool -parsable: - description: if parsable output should be provided in machine friendly format. - returned: if 'parsable' is set to True - type: bool - sample: True -''' - -from collections import defaultdict - -from ansible.module_utils.six import iteritems -from ansible.module_utils.basic import AnsibleModule - - -class ZPoolFacts(object): - def __init__(self, module): - - self.module = module - self.name = module.params['name'] - self.parsable = module.params['parsable'] - self.properties = module.params['properties'] - self._pools = defaultdict(dict) - self.facts = [] - - def pool_exists(self): - cmd = [self.module.get_bin_path('zpool'), 'list', self.name] - rc, dummy, dummy = self.module.run_command(cmd) - return rc == 0 - - def get_facts(self): - cmd = [self.module.get_bin_path('zpool'), 'get', '-H'] - if self.parsable: - cmd.append('-p') - cmd.append('-o') - cmd.append('name,property,value') - cmd.append(self.properties) - if self.name: - cmd.append(self.name) - - rc, out, err = self.module.run_command(cmd, check_rc=True) - - for line in out.splitlines(): - pool, prop, value = line.split('\t') - - self._pools[pool].update({prop: value}) - - for k, v in iteritems(self._pools): - v.update({'name': k}) - self.facts.append(v) - - return {'ansible_zfs_pools': self.facts} - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(aliases=['pool', 'zpool'], type='str'), - parsable=dict(default=False, type='bool'), - properties=dict(default='all', type='str'), - ), - supports_check_mode=True - ) - - zpool_facts = ZPoolFacts(module) - - result = { - 'changed': False, - 'name': zpool_facts.name, - } - if zpool_facts.parsable: - result['parsable'] = zpool_facts.parsable - - if zpool_facts.name is not None: - if zpool_facts.pool_exists(): - result['ansible_facts'] = zpool_facts.get_facts() - else: - module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name) - else: - result['ansible_facts'] = zpool_facts.get_facts() - - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/sudoers.py b/plugins/modules/sudoers.py similarity index 52% rename from plugins/modules/system/sudoers.py rename to plugins/modules/sudoers.py index 86d8306c26..0a40e5155e 100644 --- a/plugins/modules/system/sudoers.py +++ b/plugins/modules/sudoers.py @@ -1,16 +1,14 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019, Jon Ellis (@JonEllis) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019, Jon Ellis (@JonEllis) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: sudoers short_description: Manage sudoers files version_added: "4.3.0" @@ -18,38 +16,63 @@ description: - This module allows for the manipulation of sudoers files. author: - "Jon Ellis (@JonEllis) " +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: commands: description: - The commands allowed by the sudoers rule. - Multiple can be added by passing a list of commands. - - Use C(ALL) for all commands. + - Use V(ALL) for all commands. type: list elements: str group: description: - The name of the group for the sudoers rule. - - This option cannot be used in conjunction with I(user). + - This option cannot be used in conjunction with O(user). type: str name: required: true description: - The name of the sudoers rule. - - This will be used for the filename for the sudoers file managed by this rule. + - This is used for the filename for the sudoers file managed by this rule. type: str + noexec: + description: + - Whether a command is prevented to run further commands itself. + default: false + type: bool + version_added: 8.4.0 nopassword: description: - - Whether a password will be required to run the sudo'd command. + - Whether a password is not required when command is run with sudo. default: true type: bool + setenv: + description: + - Whether to allow keeping the environment when command is run with sudo. + default: false + type: bool + version_added: 6.3.0 + host: + description: + - Specify the host the rule is for. + default: ALL + type: str + version_added: 6.2.0 runas: description: - - Specify the target user the command(s) will run as. + - Specify the target user the command(s) runs as. type: str version_added: 4.7.0 sudoers_path: description: - - The path which sudoers config files will be managed in. + - The path which sudoers config files are managed in. default: /etc/sudoers.d type: str state: @@ -63,11 +86,20 @@ options: user: description: - The name of the user for the sudoers rule. - - This option cannot be used in conjunction with I(group). + - This option cannot be used in conjunction with O(group). type: str -''' + validation: + description: + - If V(absent), the sudoers rule is added without validation. + - If V(detect) and C(visudo) is available, then the sudoers rule is validated by C(visudo). + - If V(required), C(visudo) must be available to validate the sudoers rule. + type: str + default: detect + choices: [absent, detect, required] + version_added: 5.2.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Allow the backup user to sudo /usr/local/bin/backup community.general.sudoers: name: allow-backup @@ -85,10 +117,11 @@ EXAMPLES = ''' - name: >- Allow the monitoring group to run sudo /usr/local/bin/gather-app-metrics - without requiring a password + without requiring a password on the host called webserver community.general.sudoers: name: monitor-app group: monitoring + host: webserver commands: /usr/local/bin/gather-app-metrics - name: >- @@ -106,7 +139,23 @@ EXAMPLES = ''' community.general.sudoers: name: alice-service state: absent -''' + +- name: Allow alice to sudo /usr/local/bin/upload and keep env variables + community.general.sudoers: + name: allow-alice-upload + user: alice + commands: /usr/local/bin/upload + setenv: true + +- name: >- + Allow alice to sudo /usr/bin/less but prevent less from + running further commands itself + community.general.sudoers: + name: allow-alice-restricted-less + user: alice + commands: /usr/bin/less + noexec: true +""" import os from ansible.module_utils.basic import AnsibleModule @@ -115,17 +164,25 @@ from ansible.module_utils.common.text.converters import to_native class Sudoers(object): + FILE_MODE = 0o440 + def __init__(self, module): + self.module = module + self.check_mode = module.check_mode self.name = module.params['name'] self.user = module.params['user'] self.group = module.params['group'] self.state = module.params['state'] + self.noexec = module.params['noexec'] self.nopassword = module.params['nopassword'] + self.setenv = module.params['setenv'] + self.host = module.params['host'] self.runas = module.params['runas'] self.sudoers_path = module.params['sudoers_path'] self.file = os.path.join(self.sudoers_path, self.name) self.commands = module.params['commands'] + self.validation = module.params['validation'] def write(self): if self.check_mode: @@ -134,6 +191,8 @@ class Sudoers(object): with open(self.file, 'w') as f: f.write(self.content()) + os.chmod(self.file, self.FILE_MODE) + def delete(self): if self.check_mode: return @@ -145,7 +204,12 @@ class Sudoers(object): def matches(self): with open(self.file, 'r') as f: - return f.read() == self.content() + content_matches = f.read() == self.content() + + current_mode = os.stat(self.file).st_mode & 0o777 + mode_matches = current_mode == self.FILE_MODE + + return content_matches and mode_matches def content(self): if self.user: @@ -154,14 +218,43 @@ class Sudoers(object): owner = '%{group}'.format(group=self.group) commands_str = ', '.join(self.commands) + noexec_str = 'NOEXEC:' if self.noexec else '' nopasswd_str = 'NOPASSWD:' if self.nopassword else '' + setenv_str = 'SETENV:' if self.setenv else '' runas_str = '({runas})'.format(runas=self.runas) if self.runas is not None else '' - return "{owner} ALL={runas}{nopasswd} {commands}\n".format(owner=owner, runas=runas_str, nopasswd=nopasswd_str, commands=commands_str) + return "{owner} {host}={runas}{noexec}{nopasswd}{setenv} {commands}\n".format( + owner=owner, + host=self.host, + runas=runas_str, + noexec=noexec_str, + nopasswd=nopasswd_str, + setenv=setenv_str, + commands=commands_str + ) + + def validate(self): + if self.validation == 'absent': + return + + visudo_path = self.module.get_bin_path('visudo', required=self.validation == 'required') + if visudo_path is None: + return + + check_command = [visudo_path, '-c', '-f', '-'] + rc, stdout, stderr = self.module.run_command(check_command, data=self.content()) + + if rc != 0: + self.module.fail_json(msg='Failed to validate sudoers rule:\n{stdout}'.format(stdout=stdout or stderr), stdout=stdout, stderr=stderr) def run(self): - if self.state == 'absent' and self.exists(): - self.delete() - return True + if self.state == 'absent': + if self.exists(): + self.delete() + return True + else: + return False + + self.validate() if self.exists() and self.matches(): return False @@ -180,10 +273,22 @@ def main(): 'name': { 'required': True, }, + 'noexec': { + 'type': 'bool', + 'default': False, + }, 'nopassword': { 'type': 'bool', 'default': True, }, + 'setenv': { + 'type': 'bool', + 'default': False, + }, + 'host': { + 'type': 'str', + 'default': 'ALL', + }, 'runas': { 'type': 'str', 'default': None, @@ -197,6 +302,10 @@ def main(): 'choices': ['present', 'absent'], }, 'user': {}, + 'validation': { + 'default': 'detect', + 'choices': ['absent', 'detect', 'required'] + }, } module = AnsibleModule( diff --git a/plugins/modules/web_infrastructure/supervisorctl.py b/plugins/modules/supervisorctl.py similarity index 76% rename from plugins/modules/web_infrastructure/supervisorctl.py rename to plugins/modules/supervisorctl.py index bc4ef19af1..e1aac7e37a 100644 --- a/plugins/modules/web_infrastructure/supervisorctl.py +++ b/plugins/modules/supervisorctl.py @@ -1,50 +1,61 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2012, Matt Wright -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2012, Matt Wright +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: supervisorctl -short_description: Manage the state of a program or group of programs running via supervisord +short_description: Manage the state of a program or group of programs managed by C(supervisord) description: - - Manage the state of a program or group of programs running via supervisord + - Manage the state of a program or group of programs managed by C(supervisord). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: type: str description: - The name of the supervisord program or group to manage. - - The name will be taken as group name when it ends with a colon I(:) - - Group support is only available in Ansible version 1.6 or later. - - If I(name=all), all programs and program groups will be managed. + - The name is taken as group name when it ends with a colon V(:). + - If O(name=all), all programs and program groups are managed. required: true config: type: path description: - - The supervisor configuration file path + - The supervisor configuration file path. server_url: type: str description: - - URL on which supervisord server is listening + - URL on which supervisord server is listening. username: type: str description: - - username to use for authentication + - Username to use for authentication. password: type: str description: - - password to use for authentication + - Password to use for authentication. state: type: str description: - The desired state of program/group. required: true - choices: [ "present", "started", "stopped", "restarted", "absent", "signalled" ] + choices: ["present", "started", "stopped", "restarted", "absent", "signalled"] + stop_before_removing: + type: bool + description: + - Use O(stop_before_removing=true) to stop the program/group before removing it. + required: false + default: false + version_added: 7.5.0 signal: type: str description: @@ -52,18 +63,20 @@ options: supervisorctl_path: type: path description: - - path to supervisorctl executable + - Path to C(supervisorctl) executable. notes: - - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist. - - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart). - - When C(state) = I(absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group. -requirements: [ "supervisorctl" ] + - When O(state=present), the module calls C(supervisorctl reread) then C(supervisorctl add) if the program/group does not + exist. + - When O(state=restarted), the module calls C(supervisorctl update) then calls C(supervisorctl restart). + - When O(state=absent), the module calls C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group. + If the program/group is still running, the action fails. If you want to stop the program/group before removing, use O(stop_before_removing=true). +requirements: ["supervisorctl"] author: - - "Matt Wright (@mattupstate)" - - "Aaron Wang (@inetfuture) " -''' + - "Matt Wright (@mattupstate)" + - "Aaron Wang (@inetfuture) " +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Manage the state of program to be in started state community.general.supervisorctl: name: my_app @@ -98,7 +111,7 @@ EXAMPLES = ''' community.general.supervisorctl: name: all state: restarted -''' +""" import os from ansible.module_utils.basic import AnsibleModule, is_executable @@ -113,6 +126,7 @@ def main(): password=dict(type='str', no_log=True), supervisorctl_path=dict(type='path'), state=dict(type='str', required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent', 'signalled']), + stop_before_removing=dict(type='bool', default=False), signal=dict(type='str'), ) @@ -128,6 +142,7 @@ def main(): is_group = True name = name.rstrip(':') state = module.params['state'] + stop_before_removing = module.params.get('stop_before_removing') config = module.params.get('config') server_url = module.params.get('server_url') username = module.params.get('username') @@ -191,22 +206,27 @@ def main(): matched.append((process_name, status)) return matched - def take_action_on_processes(processes, status_filter, action, expected_result): + def take_action_on_processes(processes, status_filter, action, expected_result, exit_module=True): to_take_action_on = [] for process_name, status in processes: if status_filter(status): to_take_action_on.append(process_name) if len(to_take_action_on) == 0: + if not exit_module: + return module.exit_json(changed=False, name=name, state=state) if module.check_mode: + if not exit_module: + return module.exit_json(changed=True) for process_name in to_take_action_on: rc, out, err = run_supervisorctl(action, process_name, check_rc=True) if '%s: %s' % (process_name, expected_result) not in out: module.fail_json(msg=out) - module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on) + if exit_module: + module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on) if state == 'restarted': rc, out, err = run_supervisorctl('update', check_rc=True) @@ -222,6 +242,9 @@ def main(): if len(processes) == 0: module.exit_json(changed=False, name=name, state=state) + if stop_before_removing: + take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped', exit_module=False) + if module.check_mode: module.exit_json(changed=True) run_supervisorctl('reread', check_rc=True) diff --git a/plugins/modules/system/svc.py b/plugins/modules/svc.py similarity index 80% rename from plugins/modules/system/svc.py rename to plugins/modules/svc.py index 53f46f0440..a4ad991d63 100644 --- a/plugins/modules/system/svc.py +++ b/plugins/modules/svc.py @@ -1,59 +1,62 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # -# Copyright: (c) 2015, Brian Coca -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2015, Brian Coca +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: svc author: -- Brian Coca (@bcoca) -short_description: Manage daemontools services + - Brian Coca (@bcoca) +short_description: Manage C(daemontools) services description: - - Controls daemontools services on remote hosts using the svc utility. + - Controls C(daemontools) services on remote hosts using the C(svc) utility. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the service to manage. - type: str - required: true - state: - description: - - C(Started)/C(stopped) are idempotent actions that will not run - commands unless necessary. C(restarted) will always bounce the - svc (svc -t) and C(killed) will always bounce the svc (svc -k). - C(reloaded) will send a sigusr1 (svc -1). - C(once) will run a normally downed svc once (svc -o), not really - an idempotent operation. - type: str - choices: [ killed, once, reloaded, restarted, started, stopped ] - downed: - description: - - Should a 'down' file exist or not, if it exists it disables auto startup. - Defaults to no. Downed does not imply stopped. - type: bool - enabled: - description: - - Whether the service is enabled or not, if disabled it also implies stopped. - Take note that a service can be enabled and downed (no auto restart). - type: bool - service_dir: - description: - - Directory svscan watches for services - type: str - default: /service - service_src: - description: - - Directory where services are defined, the source of symlinks to service_dir. - type: str - default: /etc/service -''' + name: + description: + - Name of the service to manage. + type: str + required: true + state: + description: + - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary. + - V(restarted) always bounces the svc (svc -t) and V(killed) always bounces the svc (svc -k). + - V(reloaded) sends a sigusr1 (svc -1). + - V(once) runs a normally downed svc once (svc -o), not really an idempotent operation. + type: str + choices: [killed, once, reloaded, restarted, started, stopped] + downed: + description: + - Should a C(down) file exist or not, if it exists it disables auto startup. Defaults to V(false). Downed does not imply + stopped. + type: bool + enabled: + description: + - Whether the service is enabled or not, if disabled it also implies O(state=stopped). Take note that a service can + be enabled and downed (no auto restart). + type: bool + service_dir: + description: + - Directory C(svscan) watches for services. + type: str + default: /service + service_src: + description: + - Directory where services are defined, the source of symlinks to O(service_dir). + type: str + default: /etc/service +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Start svc dnscache, if not running community.general.svc: name: dnscache @@ -84,7 +87,7 @@ EXAMPLES = ''' name: dnscache state: reloaded service_dir: /var/service -''' +""" import os import re diff --git a/plugins/modules/packaging/os/svr4pkg.py b/plugins/modules/svr4pkg.py similarity index 80% rename from plugins/modules/packaging/os/svr4pkg.py rename to plugins/modules/svr4pkg.py index aa7a5c2e52..4f9a61e104 100644 --- a/plugins/modules/packaging/os/svr4pkg.py +++ b/plugins/modules/svr4pkg.py @@ -1,54 +1,60 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2012, Boyd Adamson +# Copyright (c) 2012, Boyd Adamson # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: svr4pkg short_description: Manage Solaris SVR4 packages description: - - Manages SVR4 packages on Solaris 10 and 11. - - These were the native packages on Solaris <= 10 and are available - as a legacy feature in Solaris 11. - - Note that this is a very basic packaging system. It will not enforce - dependencies on install or remove. + - Manages SVR4 packages on Solaris 10 and 11. + - These were the native packages on Solaris <= 10 and are available as a legacy feature in Solaris 11. + - Note that this is a very basic packaging system. It does not enforce dependencies on install or remove. author: "Boyd Adamson (@brontitall)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - - Package name, e.g. C(SUNWcsr) + - Package name, for example V(SUNWcsr). required: true type: str state: description: - - Whether to install (C(present)), or remove (C(absent)) a package. - - If the package is to be installed, then I(src) is required. - - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package. + - Whether to install (V(present)), or remove (V(absent)) a package. + - If the package is to be installed, then O(src) is required. + - The SVR4 package system does not provide an upgrade operation. You need to uninstall the old, then install the new + package. required: true choices: ["present", "absent"] type: str src: description: - - Specifies the location to install the package from. Required when C(state=present). - - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)." - - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them there. + - Specifies the location to install the package from. Required when O(state=present). + - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. For example: V(somefile.pkg), V(/dir/with/pkgs), + V(http://server/mypkgs.pkg)." + - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module + for a way to get them there. type: str proxy: description: - - HTTP[s] proxy to be used if C(src) is a URL. + - HTTP[s] proxy to be used if O(src) is a URL. type: str response_file: description: - - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4) + - Specifies the location of a response file to be used if package expects input on install. required: false type: str zone: @@ -65,9 +71,9 @@ options: required: false type: bool default: false -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install a package from an already copied file community.general.svr4pkg: name: CSWcommon @@ -98,7 +104,7 @@ EXAMPLES = ''' name: FIREFOX state: absent category: true -''' +""" import os @@ -112,7 +118,7 @@ def package_installed(module, name, category): if category: cmd.append('-c') cmd.append(name) - rc, out, err = module.run_command(' '.join(cmd)) + rc, out, err = module.run_command(cmd) if rc == 0: return True else: @@ -184,10 +190,10 @@ def main(): argument_spec=dict( name=dict(required=True), state=dict(required=True, choices=['present', 'absent']), - src=dict(default=None), - proxy=dict(default=None), - response_file=dict(default=None), - zone=dict(required=False, default='all', choices=['current', 'all']), + src=dict(), + proxy=dict(), + response_file=dict(), + zone=dict(default='all', choices=['current', 'all']), category=dict(default=False, type='bool') ), supports_check_mode=True diff --git a/plugins/modules/packaging/os/swdepot.py b/plugins/modules/swdepot.py similarity index 73% rename from plugins/modules/packaging/os/swdepot.py rename to plugins/modules/swdepot.py index 7e9db8353b..8e0233b04f 100644 --- a/plugins/modules/packaging/os/swdepot.py +++ b/plugins/modules/swdepot.py @@ -1,44 +1,49 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2013, Raul Melo +# Copyright (c) 2013, Raul Melo # Written by Raul Melo # Based on yum module written by Seth Vidal # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: swdepot short_description: Manage packages with swdepot package manager (HP-UX) description: - - Will install, upgrade and remove packages with swdepot package manager (HP-UX) + - Installs, upgrades, and removes packages with C(swdepot) package manager (HP-UX). notes: [] author: "Raul Melo (@melodous)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - package name. - aliases: [pkg] - required: true - type: str - state: - description: - - whether to install (C(present), C(latest)), or remove (C(absent)) a package. - required: true - choices: [ 'present', 'latest', 'absent'] - type: str - depot: - description: - - The source repository from which install or upgrade a package. - type: str -''' + name: + description: + - Package name. + aliases: [pkg] + required: true + type: str + state: + description: + - Whether to install (V(present), V(latest)), or remove (V(absent)) a package. + required: true + choices: ['present', 'latest', 'absent'] + type: str + depot: + description: + - The source repository from which install or upgrade a package. + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install a package community.general.swdepot: name: unzip-6.0 @@ -55,12 +60,11 @@ EXAMPLES = ''' community.general.swdepot: name: unzip state: absent -''' +""" import re from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import shlex_quote def compare_package(version1, version2): @@ -86,13 +90,13 @@ def compare_package(version1, version2): def query_package(module, name, depot=None): """ Returns whether a package is installed or not and version. """ - cmd_list = '/usr/sbin/swlist -a revision -l product' + cmd_list = ['/usr/sbin/swlist', '-a', 'revision', '-l', 'product'] if depot: - rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, shlex_quote(depot), shlex_quote(name), shlex_quote(name)), - use_unsafe_shell=True) - else: - rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, shlex_quote(name), shlex_quote(name)), use_unsafe_shell=True) + cmd_list.extend(['-s', depot]) + cmd_list.append(name) + rc, stdout, stderr = module.run_command(cmd_list) if rc == 0: + stdout = ''.join(line for line in stdout.splitlines(True) if name in line) version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1] else: version = None @@ -104,7 +108,7 @@ def remove_package(module, name): """ Uninstall package if installed. """ cmd_remove = '/usr/sbin/swremove' - rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name)) + rc, stdout, stderr = module.run_command([cmd_remove, name]) if rc == 0: return rc, stdout @@ -115,8 +119,8 @@ def remove_package(module, name): def install_package(module, depot, name): """ Install package if not already installed """ - cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false' - rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name)) + cmd_install = ['/usr/sbin/swinstall', '-x', 'mount_all_filesystems=false'] + rc, stdout, stderr = module.run_command(cmd_install + ["-s", depot, name]) if rc == 0: return rc, stdout else: @@ -128,7 +132,7 @@ def main(): argument_spec=dict( name=dict(aliases=['pkg'], required=True), state=dict(choices=['present', 'absent', 'latest'], required=True), - depot=dict(default=None, required=False) + depot=dict() ), supports_check_mode=True ) diff --git a/plugins/modules/packaging/os/swupd.py b/plugins/modules/swupd.py similarity index 80% rename from plugins/modules/packaging/os/swupd.py rename to plugins/modules/swupd.py index 6ededcad02..0ab529e3e7 100644 --- a/plugins/modules/packaging/os/swupd.py +++ b/plugins/modules/swupd.py @@ -1,38 +1,40 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# (c) 2017, Alberto Murillo +# Copyright (c) 2017, Alberto Murillo # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: swupd -short_description: Manages updates and bundles in ClearLinux systems. +short_description: Manages updates and bundles in ClearLinux systems description: - - Manages updates and bundles with the swupd bundle manager, which is used by the - Clear Linux Project for Intel Architecture. + - Manages updates and bundles with the swupd bundle manager, which is used by the Clear Linux Project for Intel Architecture. author: Alberto Murillo (@albertomurillo) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: contenturl: description: - - URL pointing to the contents of available bundles. - If not specified, the contents are retrieved from clearlinux.org. + - URL pointing to the contents of available bundles. If not specified, the contents are retrieved from clearlinux.org. type: str format: description: - - The format suffix for version file downloads. For example [1,2,3,staging,etc]. + - The format suffix for version file downloads. For example V(1), V(2), V(3), and so on, or the special value V(staging). If not specified, the default format is used. type: str manifest: description: - - The manifest contains information about the bundles at certain version of the OS. - Specify a Manifest version to verify against that version or leave unspecified to - verify against the current version. + - The manifest contains information about the bundles at certain version of the OS. Specify a Manifest version to verify + against that version or leave unspecified to verify against the current version. aliases: [release, version] type: int name: @@ -42,8 +44,8 @@ options: type: str state: description: - - Indicates the desired (I)bundle state. C(present) ensures the bundle - is installed while C(absent) ensures the (I)bundle is not installed. + - Indicates the desired (I)bundle state. V(present) ensures the bundle is installed while V(absent) ensures the (I)bundle + is not installed. default: present choices: [present, absent] type: str @@ -54,7 +56,7 @@ options: default: false url: description: - - Overrides both I(contenturl) and I(versionurl). + - Overrides both O(contenturl) and O(versionurl). type: str verify: description: @@ -65,12 +67,12 @@ options: description: - URL for version string download. type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Update the OS to the latest version community.general.swupd: - update: yes + update: true - name: Installs the "foo" bundle community.general.swupd: @@ -84,24 +86,14 @@ EXAMPLES = ''' - name: Check integrity of filesystem community.general.swupd: - verify: yes + verify: true - name: Downgrade OS to release 12920 community.general.swupd: - verify: yes + verify: true manifest: 12920 -''' +""" -RETURN = ''' -stdout: - description: stdout of swupd - returned: always - type: str -stderr: - description: stderr of swupd - returned: always - type: str -''' import os from ansible.module_utils.basic import AnsibleModule @@ -136,19 +128,19 @@ class Swupd(object): self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False) def _get_cmd(self, command): - cmd = "%s %s" % (self.swupd_cmd, command) + cmd = [self.swupd_cmd] + command if self.format: - cmd += " --format=%s" % self.format + cmd.append("--format=%s" % self.format) if self.manifest: - cmd += " --manifest=%s" % self.manifest + cmd.append("--manifest=%s" % self.manifest) if self.url: - cmd += " --url=%s" % self.url + cmd.append("--url=%s" % self.url) else: if self.contenturl and command != "check-update": - cmd += " --contenturl=%s" % self.contenturl + cmd.append("--contenturl=%s" % self.contenturl) if self.versionurl: - cmd += " --versionurl=%s" % self.versionurl + cmd.append("--versionurl=%s" % self.versionurl) return cmd @@ -161,7 +153,7 @@ class Swupd(object): return True def _needs_update(self): - cmd = self._get_cmd("check-update") + cmd = self._get_cmd(["check-update"]) self._run_cmd(cmd) if self.rc == 0: @@ -174,7 +166,7 @@ class Swupd(object): self.msg = "Failed to check for updates" def _needs_verify(self): - cmd = self._get_cmd("verify") + cmd = self._get_cmd(["verify"]) self._run_cmd(cmd) if self.rc != 0: @@ -195,7 +187,7 @@ class Swupd(object): self.msg = "Bundle %s is already installed" % bundle return - cmd = self._get_cmd("bundle-add %s" % bundle) + cmd = self._get_cmd(["bundle-add", bundle]) self._run_cmd(cmd) if self.rc == 0: @@ -215,7 +207,7 @@ class Swupd(object): self.msg = "Bundle %s not installed" return - cmd = self._get_cmd("bundle-remove %s" % bundle) + cmd = self._get_cmd(["bundle-remove", bundle]) self._run_cmd(cmd) if self.rc == 0: @@ -235,7 +227,7 @@ class Swupd(object): self.msg = "There are no updates available" return - cmd = self._get_cmd("update") + cmd = self._get_cmd(["update"]) self._run_cmd(cmd) if self.rc == 0: @@ -255,7 +247,7 @@ class Swupd(object): self.msg = "No files where changed" return - cmd = self._get_cmd("verify --fix") + cmd = self._get_cmd(["verify", "--fix"]) self._run_cmd(cmd) if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout): diff --git a/plugins/modules/notification/syslogger.py b/plugins/modules/syslogger.py similarity index 67% rename from plugins/modules/notification/syslogger.py rename to plugins/modules/syslogger.py index 7627f35985..9922d4b579 100644 --- a/plugins/modules/notification/syslogger.py +++ b/plugins/modules/syslogger.py @@ -1,53 +1,74 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2017, Tim Rightnour -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2017, Tim Rightnour +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: syslogger short_description: Log messages in the syslog description: - - Uses syslog to add log entries to the host. + - Uses syslog to add log entries to the host. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - msg: - type: str - description: - - This is the message to place in syslog. - required: True - priority: - type: str - description: - - Set the log priority. - choices: [ "emerg", "alert", "crit", "err", "warning", "notice", "info", "debug" ] - default: "info" - facility: - type: str - description: - - Set the log facility. - choices: [ "kern", "user", "mail", "daemon", "auth", "lpr", "news", - "uucp", "cron", "syslog", "local0", "local1", "local2", - "local3", "local4", "local5", "local6", "local7" ] - default: "daemon" - log_pid: - description: - - Log the PID in brackets. - type: bool - default: False - ident: - description: - - Specify the name of application name which is sending the log to syslog. - type: str - default: 'ansible_syslogger' - version_added: '0.2.0' + msg: + type: str + description: + - This is the message to place in syslog. + required: true + priority: + type: str + description: + - Set the log priority. + choices: ["emerg", "alert", "crit", "err", "warning", "notice", "info", "debug"] + default: "info" + facility: + type: str + description: + - Set the log facility. + choices: + - kern + - user + - mail + - daemon + - auth + - lpr + - news + - uucp + - cron + - syslog + - local0 + - local1 + - local2 + - local3 + - local4 + - local5 + - local6 + - local7 + default: "daemon" + log_pid: + description: + - Log the PID in brackets. + type: bool + default: false + ident: + description: + - Specify the name of application name which is sending the log to syslog. + type: str + default: 'ansible_syslogger' + version_added: '0.2.0' author: - - Tim Rightnour (@garbled1) -''' + - Tim Rightnour (@garbled1) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Simple Usage community.general.syslogger: msg: "I will end up as daemon.info" @@ -64,36 +85,36 @@ EXAMPLES = r''' ident: "MyApp" msg: "I want to believe" priority: "alert" -''' +""" -RETURN = r''' +RETURN = r""" ident: - description: Name of application sending the message to log + description: Name of application sending the message to log. returned: always type: str sample: "ansible_syslogger" version_added: '0.2.0' priority: - description: Priority level + description: Priority level. returned: always type: str sample: "daemon" facility: - description: Syslog facility + description: Syslog facility. returned: always type: str sample: "info" log_pid: - description: Log PID status + description: Log PID status. returned: always type: bool - sample: True + sample: true msg: - description: Message sent to syslog + description: Message sent to syslog. returned: always type: str sample: "Hello from Ansible" -''' +""" import syslog import traceback @@ -144,17 +165,17 @@ def main(): module_args = dict( ident=dict(type='str', default='ansible_syslogger'), msg=dict(type='str', required=True), - priority=dict(type='str', required=False, + priority=dict(type='str', choices=["emerg", "alert", "crit", "err", "warning", "notice", "info", "debug"], default='info'), - facility=dict(type='str', required=False, + facility=dict(type='str', choices=["kern", "user", "mail", "daemon", "auth", "lpr", "news", "uucp", "cron", "syslog", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"], default='daemon'), - log_pid=dict(type='bool', required=False, default=False) + log_pid=dict(type='bool', default=False) ) module = AnsibleModule( diff --git a/plugins/modules/system/syspatch.py b/plugins/modules/syspatch.py similarity index 72% rename from plugins/modules/system/syspatch.py rename to plugins/modules/syspatch.py index 42cb17b8a3..a64c8a4bec 100644 --- a/plugins/modules/system/syspatch.py +++ b/plugins/modules/syspatch.py @@ -1,34 +1,40 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: (c) 2019-2020, Andrew Klaus -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Copyright (c) 2019-2020, Andrew Klaus +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: syspatch short_description: Manage OpenBSD system patches description: - - "Manage OpenBSD system patches using syspatch." + - Manage OpenBSD system patches using syspatch. +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: - revert: - description: - - Revert system patches. - type: str - choices: [ all, one ] + revert: + description: + - Revert system patches. + type: str + choices: [all, one] author: - - Andrew Klaus (@precurse) -''' + - Andrew Klaus (@precurse) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Apply all available system patches community.general.syspatch: @@ -48,29 +54,15 @@ EXAMPLES = ''' - name: Reboot if patch requires it ansible.builtin.reboot: when: syspatch.reboot_needed -''' +""" -RETURN = r''' -rc: - description: The command return code (0 means success) - returned: always - type: int -stdout: - description: syspatch standard output. - returned: always - type: str - sample: "001_rip6cksum" -stderr: - description: syspatch standard error. - returned: always - type: str - sample: "syspatch: need root privileges" +RETURN = r""" reboot_needed: description: Whether or not a reboot is required after an update. returned: always type: bool - sample: True -''' + sample: true +""" from ansible.module_utils.basic import AnsibleModule @@ -95,7 +87,6 @@ def syspatch_run(module): cmd = module.get_bin_path('syspatch', True) changed = False reboot_needed = False - warnings = [] # Set safe defaults for run_flag and check_flag run_flag = ['-c'] @@ -137,11 +128,11 @@ def syspatch_run(module): # Kernel update applied reboot_needed = True elif out.lower().find('syspatch updated itself') >= 0: - warnings.append('Syspatch was updated. Please run syspatch again.') + module.warn('Syspatch was updated. Please run syspatch again.') # If no stdout, then warn user if len(out) == 0: - warnings.append('syspatch had suggested changes, but stdout was empty.') + module.warn('syspatch had suggested changes, but stdout was empty.') changed = True else: @@ -153,7 +144,6 @@ def syspatch_run(module): rc=rc, stderr=err, stdout=out, - warnings=warnings ) diff --git a/plugins/modules/sysrc.py b/plugins/modules/sysrc.py new file mode 100644 index 0000000000..1f34a90aaf --- /dev/null +++ b/plugins/modules/sysrc.py @@ -0,0 +1,215 @@ +#!/usr/bin/python + +# Copyright (c) 2019 David Lundgren +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +author: + - David Lundgren (@dlundgren) +module: sysrc +short_description: Manage FreeBSD using sysrc +version_added: '2.0.0' +description: + - Manages C(/etc/rc.conf) for FreeBSD. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Name of variable in C(/etc/rc.conf) to manage. + type: str + required: true + value: + description: + - The value to set when O(state=present). + - The value to add when O(state=value_present). + - The value to remove when O(state=value_absent). + type: str + state: + description: + - Use V(present) to add the variable. + - Use V(absent) to remove the variable. + - Use V(value_present) to add the value to the existing variable. + - Use V(value_absent) to remove the value from the existing variable. + type: str + default: "present" + choices: [absent, present, value_present, value_absent] + path: + description: + - Path to file to use instead of V(/etc/rc.conf). + type: str + default: "/etc/rc.conf" + delim: + description: + - Delimiter to be used instead of V(" ") (space). + - Only used when O(state=value_present) or O(state=value_absent). + default: " " + type: str + jail: + description: + - Name or ID of the jail to operate on. + type: str +notes: + - The O(name) cannot contain periods as sysrc does not support OID style names. +""" + +EXAMPLES = r""" +# enable mysql in the /etc/rc.conf +- name: Configure mysql pid file + community.general.sysrc: + name: mysql_pidfile + value: "/var/run/mysqld/mysqld.pid" + +# enable accf_http kld in the boot loader +- name: Enable accf_http kld + community.general.sysrc: + name: accf_http_load + state: present + value: "YES" + path: /boot/loader.conf + +# add gif0 to cloned_interfaces +- name: Add gif0 interface + community.general.sysrc: + name: cloned_interfaces + state: value_present + value: "gif0" + +# enable nginx on a jail +- name: Enable nginx in test jail + community.general.sysrc: + name: nginx_enable + value: "YES" + jail: testjail +""" + + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper + +import os +import re + + +class Sysrc(StateModuleHelper): + module = dict( + argument_spec=dict( + name=dict(type='str', required=True), + value=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present', 'value_present', 'value_absent']), + path=dict(type='str', default='/etc/rc.conf'), + delim=dict(type='str', default=' '), + jail=dict(type='str') + ), + supports_check_mode=True + ) + output_params = ('value',) + use_old_vardict = False + + def __init_module__(self): + # OID style names are not supported + if not re.match(r'^\w+$', self.vars.name, re.ASCII): + self.module.fail_json(msg="Name may only contain alpha-numeric and underscore characters") + + self.sysrc = self.module.get_bin_path('sysrc', True) + + def _contains(self): + value = self._get() + if value is None: + return False, None + + value = value.split(self.vars.delim) + + return self.vars.value in value, value + + def _get(self): + if not os.path.exists(self.vars.path): + return None + + (rc, out, err) = self._sysrc('-v', '-n', self.vars.name) + if "unknown variable" in err or "unknown variable" in out: + # Prior to FreeBSD 11.1 sysrc would write "unknown variable" to stdout and not stderr + # https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=229806 + return None + + if out.startswith(self.vars.path): + return out.split(':', 1)[1].strip() + + return None + + def _modify(self, op, changed): + (rc, out, err) = self._sysrc("%s%s=%s%s" % (self.vars.name, op, self.vars.delim, self.vars.value)) + if out.startswith("%s:" % self.vars.name): + return changed(out.split(' -> ')[1].strip().split(self.vars.delim)) + + return False + + def _sysrc(self, *args): + cmd = [self.sysrc, '-f', self.vars.path] + if self.vars.jail: + cmd += ['-j', self.vars.jail] + cmd.extend(args) + + (rc, out, err) = self.module.run_command(cmd) + if "Permission denied" in err: + self.module.fail_json(msg="Permission denied for %s" % self.vars.path) + + return rc, out, err + + def state_absent(self): + if self._get() is None: + return + + if not self.check_mode: + self._sysrc('-x', self.vars.name) + + self.changed = True + + def state_present(self): + value = self._get() + if value == self.vars.value: + return + + if self.vars.value is None: + self.vars.set('value', value) + return + + if not self.check_mode: + self._sysrc("%s=%s" % (self.vars.name, self.vars.value)) + + self.changed = True + + def state_value_absent(self): + (contains, _unused) = self._contains() + if not contains: + return + + self.changed = self.check_mode or self._modify('-', lambda values: self.vars.value not in values) + + def state_value_present(self): + (contains, value) = self._contains() + if contains: + return + + if self.vars.value is None: + self.vars.set('value', value) + return + + self.changed = self.check_mode or self._modify('+', lambda values: self.vars.value in values) + + +def main(): + Sysrc.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/system/alternatives.py b/plugins/modules/system/alternatives.py deleted file mode 100644 index ca075d69b4..0000000000 --- a/plugins/modules/system/alternatives.py +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Gabe Mulley -# Copyright: (c) 2015, David Wittman -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' ---- -module: alternatives -short_description: Manages alternative programs for common commands -description: - - Manages symbolic links using the 'update-alternatives' tool. - - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). -author: - - David Wittman (@DavidWittman) - - Gabe Mulley (@mulby) -options: - name: - description: - - The generic name of the link. - type: str - required: true - path: - description: - - The path to the real executable that the link should point to. - type: path - required: true - link: - description: - - The path to the symbolic link that should point to the real executable. - - This option is always required on RHEL-based distributions. On Debian-based distributions this option is - required when the alternative I(name) is unknown to the system. - type: path - priority: - description: - - The priority of the alternative. - type: int - default: 50 - state: - description: - - C(present) - install the alternative (if not already installed), but do - not set it as the currently selected alternative for the group. - - C(selected) - install the alternative (if not already installed), and - set it as the currently selected alternative for the group. - choices: [ present, selected ] - default: selected - type: str - version_added: 4.8.0 -requirements: [ update-alternatives ] -''' - -EXAMPLES = r''' -- name: Correct java version selected - community.general.alternatives: - name: java - path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - -- name: Alternatives link created - community.general.alternatives: - name: hadoop-conf - link: /etc/hadoop/conf - path: /etc/hadoop/conf.ansible - -- name: Make java 32 bit an alternative with low priority - community.general.alternatives: - name: java - path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java - priority: -10 - -- name: Install Python 3.5 but do not select it - community.general.alternatives: - name: python - path: /usr/bin/python3.5 - link: /usr/bin/python - state: present -''' - -import os -import re -import subprocess - -from ansible.module_utils.basic import AnsibleModule - - -class AlternativeState: - PRESENT = "present" - SELECTED = "selected" - - @classmethod - def to_list(cls): - return [cls.PRESENT, cls.SELECTED] - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True), - path=dict(type='path', required=True), - link=dict(type='path'), - priority=dict(type='int', default=50), - state=dict( - type='str', - choices=AlternativeState.to_list(), - default=AlternativeState.SELECTED, - ), - ), - supports_check_mode=True, - ) - - params = module.params - name = params['name'] - path = params['path'] - link = params['link'] - priority = params['priority'] - state = params['state'] - - UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives', True) - - current_path = None - all_alternatives = [] - - # Run `update-alternatives --display ` to find existing alternatives - (rc, display_output, dummy) = module.run_command( - ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name] - ) - - if rc == 0: - # Alternatives already exist for this link group - # Parse the output to determine the current path of the symlink and - # available alternatives - current_path_regex = re.compile(r'^\s*link currently points to (.*)$', - re.MULTILINE) - alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s\S+\s)?priority', re.MULTILINE) - - match = current_path_regex.search(display_output) - if match: - current_path = match.group(1) - all_alternatives = alternative_regex.findall(display_output) - - if not link: - # Read the current symlink target from `update-alternatives --query` - # in case we need to install the new alternative before setting it. - # - # This is only compatible on Debian-based systems, as the other - # alternatives don't have --query available - rc, query_output, dummy = module.run_command( - ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name] - ) - if rc == 0: - for line in query_output.splitlines(): - if line.startswith('Link:'): - link = line.split()[1] - break - - changed = False - if current_path != path: - - # Check mode: expect a change if this alternative is not already - # installed, or if it is to be set as the current selection. - if module.check_mode: - module.exit_json( - changed=( - path not in all_alternatives or - state == AlternativeState.SELECTED - ), - current_path=current_path, - ) - - try: - # install the requested path if necessary - if path not in all_alternatives: - if not os.path.exists(path): - module.fail_json(msg="Specified path %s does not exist" % path) - if not link: - module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link") - - module.run_command( - [UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)], - check_rc=True - ) - changed = True - - # set the current selection to this path (if requested) - if state == AlternativeState.SELECTED: - module.run_command( - [UPDATE_ALTERNATIVES, '--set', name, path], - check_rc=True - ) - changed = True - - except subprocess.CalledProcessError as cpe: - module.fail_json(msg=str(dir(cpe))) - elif current_path == path and state == AlternativeState.PRESENT: - # Case where alternative is currently selected, but state is set - # to 'present'. In this case, we set to auto mode. - if module.check_mode: - module.exit_json(changed=True, current_path=current_path) - - changed = True - try: - module.run_command( - [UPDATE_ALTERNATIVES, '--auto', name], - check_rc=True, - ) - except subprocess.CalledProcessError as cpe: - module.fail_json(msg=str(dir(cpe))) - - module.exit_json(changed=changed) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/facter.py b/plugins/modules/system/facter.py deleted file mode 100644 index abd2ebc3a7..0000000000 --- a/plugins/modules/system/facter.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2012, Michael DeHaan -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: facter -short_description: Runs the discovery program I(facter) on the remote system -description: - - Runs the C(facter) discovery program - (U(https://github.com/puppetlabs/facter)) on the remote system, returning - JSON data that can be useful for inventory purposes. -options: - arguments: - description: - - Specifies arguments for facter. - type: list - elements: str -requirements: - - facter - - ruby-json -author: - - Ansible Core Team - - Michael DeHaan -''' - -EXAMPLES = ''' -# Example command-line invocation -# ansible www.example.net -m facter - -- name: Execute facter no arguments - community.general.facter: - -- name: Execute facter with arguments - community.general.facter: - arguments: - - -p - - system_uptime - - timezone - - is_virtual -''' -import json - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - argument_spec=dict( - arguments=dict(required=False, type='list', elements='str') - ) - ) - - facter_path = module.get_bin_path( - 'facter', - opt_dirs=['/opt/puppetlabs/bin']) - - cmd = [facter_path, "--json"] - if module.params['arguments']: - cmd += module.params['arguments'] - - rc, out, err = module.run_command(cmd, check_rc=True) - module.exit_json(**json.loads(out)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/gconftool2.py b/plugins/modules/system/gconftool2.py deleted file mode 100644 index 86bb2f9259..0000000000 --- a/plugins/modules/system/gconftool2.py +++ /dev/null @@ -1,229 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Kenneth D. Evensen -# Copyright: (c) 2017, Abhijeet Kasurde -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: gconftool2 -author: - - Kenneth D. Evensen (@kevensen) -short_description: Edit GNOME Configurations -description: - - This module allows for the manipulation of GNOME 2 Configuration via - gconftool-2. Please see the gconftool-2(1) man pages for more details. -options: - key: - type: str - description: - - A GConf preference key is an element in the GConf repository - that corresponds to an application preference. See man gconftool-2(1) - required: yes - value: - type: str - description: - - Preference keys typically have simple values such as strings, - integers, or lists of strings and integers. This is ignored if the state - is "get". See man gconftool-2(1) - value_type: - type: str - description: - - The type of value being set. This is ignored if the state is "get". - choices: [ bool, float, int, string ] - state: - type: str - description: - - The action to take upon the key/value. - required: yes - choices: [ absent, get, present ] - config_source: - type: str - description: - - Specify a configuration source to use rather than the default path. - See man gconftool-2(1) - direct: - description: - - Access the config database directly, bypassing server. If direct is - specified then the config_source must be specified as well. - See man gconftool-2(1) - type: bool - default: 'no' -''' - -EXAMPLES = """ -- name: Change the widget font to "Serif 12" - community.general.gconftool2: - key: "/desktop/gnome/interface/font_name" - value_type: "string" - value: "Serif 12" -""" - -RETURN = ''' - key: - description: The key specified in the module parameters - returned: success - type: str - sample: /desktop/gnome/interface/font_name - value_type: - description: The type of the value that was changed - returned: success - type: str - sample: string - value: - description: The value of the preference key after executing the module - returned: success - type: str - sample: "Serif 12" -... -''' - -from ansible.module_utils.basic import AnsibleModule - - -class GConf2Preference(object): - def __init__(self, ansible, key, value_type, value, - direct=False, config_source=""): - self.ansible = ansible - self.key = key - self.value_type = value_type - self.value = value - self.config_source = config_source - self.direct = direct - - def value_already_set(self): - return False - - def call(self, call_type, fail_onerr=True): - """ Helper function to perform gconftool-2 operations """ - config_source = [] - direct = [] - changed = False - out = '' - - # If the configuration source is different from the default, create - # the argument - if self.config_source is not None and len(self.config_source) > 0: - config_source = ["--config-source", self.config_source] - - # If direct is true, create the argument - if self.direct: - direct = ["--direct"] - - # Execute the call - cmd = ["gconftool-2"] - try: - # If the call is "get", then we don't need as many parameters and - # we can ignore some - if call_type == 'get': - cmd.extend(["--get", self.key]) - # Otherwise, we will use all relevant parameters - elif call_type == 'set': - cmd.extend(direct) - cmd.extend(config_source) - cmd.extend(["--type", self.value_type, "--{3}".format(call_type), self.key, self.value]) - elif call_type == 'unset': - cmd.extend(["--unset", self.key]) - - # Start external command - rc, out, err = self.ansible.run_command(cmd) - - if err and fail_onerr: - self.ansible.fail_json(msg='gconftool-2 failed with ' - 'error: %s' % (str(err))) - else: - changed = True - - except OSError as exception: - self.ansible.fail_json(msg='gconftool-2 failed with exception: ' - '%s' % exception) - return changed, out.rstrip() - - -def main(): - # Setup the Ansible module - module = AnsibleModule( - argument_spec=dict( - key=dict(type='str', required=True, no_log=False), - value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']), - value=dict(type='str'), - state=dict(type='str', required=True, choices=['absent', 'get', 'present']), - direct=dict(type='bool', default=False), - config_source=dict(type='str'), - ), - supports_check_mode=True - ) - - state_values = {"present": "set", "absent": "unset", "get": "get"} - - # Assign module values to dictionary values - key = module.params['key'] - value_type = module.params['value_type'] - if module.params['value'].lower() == "true": - value = "true" - elif module.params['value'] == "false": - value = "false" - else: - value = module.params['value'] - - state = state_values[module.params['state']] - direct = module.params['direct'] - config_source = module.params['config_source'] - - # Initialize some variables for later - change = False - new_value = '' - - if state != "get": - if value is None or value == "": - module.fail_json(msg='State %s requires "value" to be set' - % str(state)) - elif value_type is None or value_type == "": - module.fail_json(msg='State %s requires "value_type" to be set' - % str(state)) - - if direct and config_source is None: - module.fail_json(msg='If "direct" is "yes" then the ' + - '"config_source" must be specified') - elif not direct and config_source is not None: - module.fail_json(msg='If the "config_source" is specified ' + - 'then "direct" must be "yes"') - - # Create a gconf2 preference - gconf_pref = GConf2Preference(module, key, value_type, - value, direct, config_source) - # Now we get the current value, if not found don't fail - dummy, current_value = gconf_pref.call("get", fail_onerr=False) - - # Check if the current value equals the value we want to set. If not, make - # a change - if current_value != value: - # If check mode, we know a change would have occurred. - if module.check_mode: - # So we will set the change to True - change = True - # And set the new_value to the value that would have been set - new_value = value - # If not check mode make the change. - else: - change, new_value = gconf_pref.call(state) - # If the value we want to set is the same as the current_value, we will - # set the new_value to the current_value for reporting - else: - new_value = current_value - - facts = dict(gconftool2={'changed': change, - 'key': key, - 'value_type': value_type, - 'new_value': new_value, - 'previous_value': current_value, - 'playbook_value': module.params['value']}) - - module.exit_json(changed=change, ansible_facts=facts) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/interfaces_file.py b/plugins/modules/system/interfaces_file.py deleted file mode 100644 index 91cf74b426..0000000000 --- a/plugins/modules/system/interfaces_file.py +++ /dev/null @@ -1,372 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2016, Roman Belyakovsky -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: interfaces_file -short_description: Tweak settings in /etc/network/interfaces files -extends_documentation_fragment: files -description: - - Manage (add, remove, change) individual interface options in an interfaces-style file without having - to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file. - - Read information about interfaces from interfaces-styled files -options: - dest: - type: path - description: - - Path to the interfaces file - default: /etc/network/interfaces - iface: - type: str - description: - - Name of the interface, required for value changes or option remove - address_family: - type: str - description: - - Address family of the interface, useful if same interface name is used for both inet and inet6 - option: - type: str - description: - - Name of the option, required for value changes or option remove - value: - type: str - description: - - If I(option) is not presented for the I(interface) and I(state) is C(present) option will be added. - If I(option) already exists and is not C(pre-up), C(up), C(post-up) or C(down), it's value will be updated. - C(pre-up), C(up), C(post-up) and C(down) options can't be updated, only adding new options, removing existing - ones or cleaning the whole option set are supported - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - type: bool - default: 'no' - state: - type: str - description: - - If set to C(absent) the option or section will be removed if present instead of created. - default: "present" - choices: [ "present", "absent" ] - -notes: - - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state -requirements: [] -author: "Roman Belyakovsky (@hryamzik)" -''' - -RETURN = ''' -dest: - description: destination file/path - returned: success - type: str - sample: "/etc/network/interfaces" -ifaces: - description: interfaces dictionary - returned: success - type: complex - contains: - ifaces: - description: interface dictionary - returned: success - type: dict - contains: - eth0: - description: Name of the interface - returned: success - type: dict - contains: - address_family: - description: interface address family - returned: success - type: str - sample: "inet" - method: - description: interface method - returned: success - type: str - sample: "manual" - mtu: - description: other options, all values returned as strings - returned: success - type: str - sample: "1500" - pre-up: - description: list of C(pre-up) scripts - returned: success - type: list - sample: - - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" - up: - description: list of C(up) scripts - returned: success - type: list - sample: - - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" - post-up: - description: list of C(post-up) scripts - returned: success - type: list - sample: - - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" - down: - description: list of C(down) scripts - returned: success - type: list - sample: - - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" -... -''' - -EXAMPLES = ''' -- name: Set eth1 mtu configuration value to 8000 - community.general.interfaces_file: - dest: /etc/network/interfaces.d/eth1.cfg - iface: eth1 - option: mtu - value: 8000 - backup: yes - state: present - register: eth1_cfg -''' - -import os -import re -import tempfile - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes - - -def line_dict(line): - return {'line': line, 'line_type': 'unknown'} - - -def make_option_dict(line, iface, option, value, address_family): - return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family} - - -def get_option_value(line): - patt = re.compile(r'^\s+(?P